method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
---|---|---|---|
_load_qa_with_sources_chain
|
if 'combine_documents_chain' in config:
combine_documents_chain_config = config.pop('combine_documents_chain')
combine_documents_chain = load_chain_from_config(
combine_documents_chain_config)
elif 'combine_documents_chain_path' in config:
combine_documents_chain = load_chain(config.pop(
'combine_documents_chain_path'))
else:
raise ValueError(
'One of `combine_documents_chain` or `combine_documents_chain_path` must be present.'
)
return QAWithSourcesChain(combine_documents_chain=combine_documents_chain,
**config)
|
def _load_qa_with_sources_chain(config: dict, **kwargs: Any
) ->QAWithSourcesChain:
if 'combine_documents_chain' in config:
combine_documents_chain_config = config.pop('combine_documents_chain')
combine_documents_chain = load_chain_from_config(
combine_documents_chain_config)
elif 'combine_documents_chain_path' in config:
combine_documents_chain = load_chain(config.pop(
'combine_documents_chain_path'))
else:
raise ValueError(
'One of `combine_documents_chain` or `combine_documents_chain_path` must be present.'
)
return QAWithSourcesChain(combine_documents_chain=
combine_documents_chain, **config)
| null |
test_pwd_command_persistent
|
"""Test correct functionality when the bash process is persistent."""
session = BashProcess(persistent=True, strip_newlines=True)
commands = ['pwd']
output = session.run(commands)
assert subprocess.check_output('pwd', shell=True).decode().strip() in output
session.run(['cd ..'])
new_output = session.run(['pwd'])
assert Path(output).parent == Path(new_output)
|
@pytest.mark.skip(reason='flaky on GHA, TODO to fix')
@pytest.mark.skipif(sys.platform.startswith('win'), reason=
'Test not supported on Windows')
def test_pwd_command_persistent() ->None:
"""Test correct functionality when the bash process is persistent."""
session = BashProcess(persistent=True, strip_newlines=True)
commands = ['pwd']
output = session.run(commands)
assert subprocess.check_output('pwd', shell=True).decode().strip(
) in output
session.run(['cd ..'])
new_output = session.run(['pwd'])
assert Path(output).parent == Path(new_output)
|
Test correct functionality when the bash process is persistent.
|
test_sql_chain_with_memory
|
valid_prompt_with_history = """
Only use the following tables:
{table_info}
Question: {input}
Given an input question, first create a syntactically correct
{dialect} query to run.
Always limit your query to at most {top_k} results.
Relevant pieces of previous conversation:
{history}
(You do not need to use these pieces of information if not relevant)
"""
prompt = PromptTemplate(input_variables=['input', 'table_info', 'dialect',
'top_k', 'history'], template=valid_prompt_with_history)
queries = {'foo': 'SELECT baaz from foo', 'foo2': 'SELECT baaz from foo'}
llm = FakeLLM(queries=queries, sequential_responses=True)
memory = ConversationBufferMemory()
db_chain = SQLDatabaseChain.from_llm(llm, db, memory=memory, prompt=prompt,
verbose=True)
assert db_chain.run('hello') == 'SELECT baaz from foo'
|
def test_sql_chain_with_memory() ->None:
valid_prompt_with_history = """
Only use the following tables:
{table_info}
Question: {input}
Given an input question, first create a syntactically correct
{dialect} query to run.
Always limit your query to at most {top_k} results.
Relevant pieces of previous conversation:
{history}
(You do not need to use these pieces of information if not relevant)
"""
prompt = PromptTemplate(input_variables=['input', 'table_info',
'dialect', 'top_k', 'history'], template=valid_prompt_with_history)
queries = {'foo': 'SELECT baaz from foo', 'foo2': 'SELECT baaz from foo'}
llm = FakeLLM(queries=queries, sequential_responses=True)
memory = ConversationBufferMemory()
db_chain = SQLDatabaseChain.from_llm(llm, db, memory=memory, prompt=
prompt, verbose=True)
assert db_chain.run('hello') == 'SELECT baaz from foo'
| null |
add_texts
|
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters.
Returns:
List of ids from adding the texts into the vectorstore.
"""
texts = list(texts)
if metadatas is not None and len(texts) != len(metadatas):
raise ValueError(
f'texts and metadatas do not have the same length. Received {len(texts)} texts and {len(metadatas)} metadatas.'
)
logger.debug('Embedding documents.')
embeddings = self.embedding.embed_documents(texts)
jsons = []
ids = []
for idx, (embedding, text) in enumerate(zip(embeddings, texts)):
id = str(uuid.uuid4())
ids.append(id)
json_: dict = {'id': id, 'embedding': embedding}
if metadatas is not None:
json_['metadata'] = metadatas[idx]
jsons.append(json_)
self._upload_to_gcs(text, f'documents/{id}')
logger.debug(f'Uploaded {len(ids)} documents to GCS.')
result_str = '\n'.join([json.dumps(x) for x in jsons])
filename_prefix = f'indexes/{uuid.uuid4()}'
filename = f'{filename_prefix}/{time.time()}.json'
self._upload_to_gcs(result_str, filename)
logger.debug(
f'Uploaded updated json with embeddings to {self.gcs_bucket_name}/{filename}.'
)
self.index = self.index.update_embeddings(contents_delta_uri=
f'gs://{self.gcs_bucket_name}/{filename_prefix}/')
logger.debug('Updated index with new configuration.')
return ids
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, **kwargs: Any) ->List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters.
Returns:
List of ids from adding the texts into the vectorstore.
"""
texts = list(texts)
if metadatas is not None and len(texts) != len(metadatas):
raise ValueError(
f'texts and metadatas do not have the same length. Received {len(texts)} texts and {len(metadatas)} metadatas.'
)
logger.debug('Embedding documents.')
embeddings = self.embedding.embed_documents(texts)
jsons = []
ids = []
for idx, (embedding, text) in enumerate(zip(embeddings, texts)):
id = str(uuid.uuid4())
ids.append(id)
json_: dict = {'id': id, 'embedding': embedding}
if metadatas is not None:
json_['metadata'] = metadatas[idx]
jsons.append(json_)
self._upload_to_gcs(text, f'documents/{id}')
logger.debug(f'Uploaded {len(ids)} documents to GCS.')
result_str = '\n'.join([json.dumps(x) for x in jsons])
filename_prefix = f'indexes/{uuid.uuid4()}'
filename = f'{filename_prefix}/{time.time()}.json'
self._upload_to_gcs(result_str, filename)
logger.debug(
f'Uploaded updated json with embeddings to {self.gcs_bucket_name}/{filename}.'
)
self.index = self.index.update_embeddings(contents_delta_uri=
f'gs://{self.gcs_bucket_name}/{filename_prefix}/')
logger.debug('Updated index with new configuration.')
return ids
|
Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters.
Returns:
List of ids from adding the texts into the vectorstore.
|
load_json_to_dict
|
"""Load json file to a dictionary.
Parameters:
json_path (str): The path to the json file.
Returns:
(dict): The dictionary representation of the json file.
"""
with open(json_path, 'r') as f:
data = json.load(f)
return data
|
def load_json_to_dict(json_path: Union[str, Path]) ->dict:
"""Load json file to a dictionary.
Parameters:
json_path (str): The path to the json file.
Returns:
(dict): The dictionary representation of the json file.
"""
with open(json_path, 'r') as f:
data = json.load(f)
return data
|
Load json file to a dictionary.
Parameters:
json_path (str): The path to the json file.
Returns:
(dict): The dictionary representation of the json file.
|
test_openai_functions_router
|
revise = mocker.Mock(side_effect=lambda kw:
f"Revised draft: no more {kw['notes']}!")
accept = mocker.Mock(side_effect=lambda kw: f"Accepted draft: {kw['draft']}!")
router = OpenAIFunctionsRouter({'revise': revise, 'accept': accept},
functions=[{'name': 'revise', 'description':
'Sends the draft for revision.', 'parameters': {'type': 'object',
'properties': {'notes': {'type': 'string', 'description':
"The editor's notes to guide the revision."}}}}, {'name': 'accept',
'description': 'Accepts the draft.', 'parameters': {'type': 'object',
'properties': {'draft': {'type': 'string', 'description':
'The draft to accept.'}}}}])
model = FakeChatOpenAI()
chain = model.bind(functions=router.functions) | router
assert router.functions == snapshot
assert chain.invoke('Something about turtles?') == 'Accepted draft: turtles!'
revise.assert_not_called()
accept.assert_called_once_with({'draft': 'turtles'})
|
def test_openai_functions_router(snapshot: SnapshotAssertion, mocker:
MockerFixture) ->None:
revise = mocker.Mock(side_effect=lambda kw:
f"Revised draft: no more {kw['notes']}!")
accept = mocker.Mock(side_effect=lambda kw:
f"Accepted draft: {kw['draft']}!")
router = OpenAIFunctionsRouter({'revise': revise, 'accept': accept},
functions=[{'name': 'revise', 'description':
'Sends the draft for revision.', 'parameters': {'type': 'object',
'properties': {'notes': {'type': 'string', 'description':
"The editor's notes to guide the revision."}}}}, {'name': 'accept',
'description': 'Accepts the draft.', 'parameters': {'type':
'object', 'properties': {'draft': {'type': 'string', 'description':
'The draft to accept.'}}}}])
model = FakeChatOpenAI()
chain = model.bind(functions=router.functions) | router
assert router.functions == snapshot
assert chain.invoke('Something about turtles?'
) == 'Accepted draft: turtles!'
revise.assert_not_called()
accept.assert_called_once_with({'draft': 'turtles'})
| null |
__deepcopy__
|
"""Deepcopy the tracer."""
return self
|
def __deepcopy__(self, memo: dict) ->BaseTracer:
"""Deepcopy the tracer."""
return self
|
Deepcopy the tracer.
|
test_neo4jvector_hybrid_retrieval_query2
|
"""Test custom retrieval_query with hybrid search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(text_embeddings=
text_embedding_pairs, embedding=FakeEmbeddingsWithOsDimension(), url=
url, username=username, password=password, pre_delete_collection=True,
search_type=SearchType.HYBRID, retrieval_query=
"RETURN node.text AS text, score, {test: 'test'} AS metadata")
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'test': 'test'})]
drop_vector_indexes(docsearch)
|
def test_neo4jvector_hybrid_retrieval_query2() ->None:
"""Test custom retrieval_query with hybrid search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(text_embeddings=
text_embedding_pairs, embedding=FakeEmbeddingsWithOsDimension(),
url=url, username=username, password=password,
pre_delete_collection=True, search_type=SearchType.HYBRID,
retrieval_query=
"RETURN node.text AS text, score, {test: 'test'} AS metadata")
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'test': 'test'})]
drop_vector_indexes(docsearch)
|
Test custom retrieval_query with hybrid search.
|
__init__
|
super().__init__(**kwargs)
self.run_map: Dict[str, Run] = {}
|
def __init__(self, **kwargs: Any) ->None:
super().__init__(**kwargs)
self.run_map: Dict[str, Run] = {}
| null |
test_visit_comparison
|
comp = Comparison(comparator=Comparator.EQ, attribute='foo', value='1')
expected = {'term': {'metadata.foo.keyword': '1'}}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
|
def test_visit_comparison() ->None:
comp = Comparison(comparator=Comparator.EQ, attribute='foo', value='1')
expected = {'term': {'metadata.foo.keyword': '1'}}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
| null |
toy_dir
|
"""Yield a pre-populated directory to test the blob loader."""
with tempfile.TemporaryDirectory() as temp_dir:
with open(os.path.join(temp_dir, 'test.txt'), 'w') as test_txt:
test_txt.write('This is a test.txt file.')
with open(os.path.join(temp_dir, 'test.html'), 'w') as test_html:
test_html.write(
'<html><body><h1>This is a test.html file.</h1></body></html>')
with open(os.path.join(temp_dir, '.hidden_file'), 'w') as hidden_file:
hidden_file.write('This is a hidden file.')
some_dir = os.path.join(temp_dir, 'some_dir')
os.makedirs(some_dir)
with open(os.path.join(some_dir, 'nested_file.txt'), 'w') as nested_file:
nested_file.write('This is a nested_file.txt file.')
other_dir = os.path.join(some_dir, 'other_dir')
os.makedirs(other_dir)
with open(os.path.join(other_dir, 'more_nested.txt'), 'w') as nested_file:
nested_file.write('This is a more_nested.txt file.')
yield Path(temp_dir)
|
@pytest.fixture
def toy_dir() ->Generator[Path, None, None]:
"""Yield a pre-populated directory to test the blob loader."""
with tempfile.TemporaryDirectory() as temp_dir:
with open(os.path.join(temp_dir, 'test.txt'), 'w') as test_txt:
test_txt.write('This is a test.txt file.')
with open(os.path.join(temp_dir, 'test.html'), 'w') as test_html:
test_html.write(
'<html><body><h1>This is a test.html file.</h1></body></html>')
with open(os.path.join(temp_dir, '.hidden_file'), 'w') as hidden_file:
hidden_file.write('This is a hidden file.')
some_dir = os.path.join(temp_dir, 'some_dir')
os.makedirs(some_dir)
with open(os.path.join(some_dir, 'nested_file.txt'), 'w'
) as nested_file:
nested_file.write('This is a nested_file.txt file.')
other_dir = os.path.join(some_dir, 'other_dir')
os.makedirs(other_dir)
with open(os.path.join(other_dir, 'more_nested.txt'), 'w'
) as nested_file:
nested_file.write('This is a more_nested.txt file.')
yield Path(temp_dir)
|
Yield a pre-populated directory to test the blob loader.
|
to_messages
|
"""Return prompt as messages."""
return [HumanMessage(content=self.text)]
|
def to_messages(self) ->List[BaseMessage]:
"""Return prompt as messages."""
return [HumanMessage(content=self.text)]
|
Return prompt as messages.
|
test_sql_database_run_update
|
"""Test that update commands run successfully and returned in correct format."""
engine = create_engine('sqlite:///:memory:')
metadata_obj.create_all(engine)
stmt = insert(user).values(user_id=13, user_name='Harrison', user_company='Foo'
)
with engine.connect() as conn:
conn.execute(stmt)
db = SQLDatabase(engine)
db_chain = SQLDatabaseChain.from_llm(OpenAI(temperature=0), db)
output = db_chain.run("Update Harrison's workplace to Bar")
expected_output = " Harrison's workplace has been updated to Bar."
assert output == expected_output
output = db_chain.run('What company does Harrison work at?')
expected_output = ' Harrison works at Bar.'
assert output == expected_output
|
def test_sql_database_run_update() ->None:
"""Test that update commands run successfully and returned in correct format."""
engine = create_engine('sqlite:///:memory:')
metadata_obj.create_all(engine)
stmt = insert(user).values(user_id=13, user_name='Harrison',
user_company='Foo')
with engine.connect() as conn:
conn.execute(stmt)
db = SQLDatabase(engine)
db_chain = SQLDatabaseChain.from_llm(OpenAI(temperature=0), db)
output = db_chain.run("Update Harrison's workplace to Bar")
expected_output = " Harrison's workplace has been updated to Bar."
assert output == expected_output
output = db_chain.run('What company does Harrison work at?')
expected_output = ' Harrison works at Bar.'
assert output == expected_output
|
Test that update commands run successfully and returned in correct format.
|
test_similarity_search
|
"""Test similarity search"""
from bagel.config import Settings
setting = Settings(bagel_api_impl='rest', bagel_server_host='api.bageldb.ai')
bagel = Bagel(client_settings=setting)
bagel.add_texts(texts=['hello bagel', 'hello langchain'])
result = bagel.similarity_search(query='bagel', k=1)
assert result == [Document(page_content='hello bagel')]
bagel.delete_cluster()
|
def test_similarity_search() ->None:
"""Test similarity search"""
from bagel.config import Settings
setting = Settings(bagel_api_impl='rest', bagel_server_host=
'api.bageldb.ai')
bagel = Bagel(client_settings=setting)
bagel.add_texts(texts=['hello bagel', 'hello langchain'])
result = bagel.similarity_search(query='bagel', k=1)
assert result == [Document(page_content='hello bagel')]
bagel.delete_cluster()
|
Test similarity search
|
_response_to_generation
|
"""Converts a stream response to a generation chunk."""
try:
generation_info = {'is_blocked': response.is_blocked,
'safety_attributes': response.safety_attributes}
except Exception:
generation_info = None
return GenerationChunk(text=response.text, generation_info=generation_info)
|
def _response_to_generation(self, response: TextGenerationResponse
) ->GenerationChunk:
"""Converts a stream response to a generation chunk."""
try:
generation_info = {'is_blocked': response.is_blocked,
'safety_attributes': response.safety_attributes}
except Exception:
generation_info = None
return GenerationChunk(text=response.text, generation_info=generation_info)
|
Converts a stream response to a generation chunk.
|
raise_value_error
|
"""Raise a value error."""
raise ValueError(f'x is {x}')
|
def raise_value_error(x: str) ->Any:
"""Raise a value error."""
raise ValueError(f'x is {x}')
|
Raise a value error.
|
test_model_response
|
parser = OpenAIFunctionsAgentOutputParser()
msg = AIMessage(content='Model response.')
result = parser.invoke(msg)
assert isinstance(result, AgentFinish)
assert result.return_values == {'output': 'Model response.'}
assert result.log == 'Model response.'
|
def test_model_response() ->None:
parser = OpenAIFunctionsAgentOutputParser()
msg = AIMessage(content='Model response.')
result = parser.invoke(msg)
assert isinstance(result, AgentFinish)
assert result.return_values == {'output': 'Model response.'}
assert result.log == 'Model response.'
| null |
on_agent_action
|
"""Run when agent action is received.
Args:
action (AgentAction): The agent action.
Returns:
Any: The result of the callback.
"""
handle_event(self.handlers, 'on_agent_action', 'ignore_agent', action,
run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags,
**kwargs)
|
def on_agent_action(self, action: AgentAction, **kwargs: Any) ->Any:
"""Run when agent action is received.
Args:
action (AgentAction): The agent action.
Returns:
Any: The result of the callback.
"""
handle_event(self.handlers, 'on_agent_action', 'ignore_agent', action,
run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.
tags, **kwargs)
|
Run when agent action is received.
Args:
action (AgentAction): The agent action.
Returns:
Any: The result of the callback.
|
ignore_llm
|
"""Whether to ignore LLM callbacks."""
return self.ignore_llm_
|
@property
def ignore_llm(self) ->bool:
"""Whether to ignore LLM callbacks."""
return self.ignore_llm_
|
Whether to ignore LLM callbacks.
|
_get_post_headers
|
"""Returns headers that should be attached to each post request."""
return {'x-api-key': self._vectara_api_key, 'customer-id': self.
_vectara_customer_id, 'Content-Type': 'application/json', 'X-Source':
self._source}
|
def _get_post_headers(self) ->dict:
"""Returns headers that should be attached to each post request."""
return {'x-api-key': self._vectara_api_key, 'customer-id': self.
_vectara_customer_id, 'Content-Type': 'application/json',
'X-Source': self._source}
|
Returns headers that should be attached to each post request.
|
from_llm
|
"""Create a TrajectoryEvalChain object from a language model chain.
Args:
llm (BaseChatModel): The language model chain.
agent_tools (Optional[Sequence[BaseTool]]): A list of tools
available to the agent.
output_parser (Optional[TrajectoryOutputParser]): The output parser
used to parse the chain output into a score.
Returns:
TrajectoryEvalChain: The TrajectoryEvalChain object.
"""
if not isinstance(llm, BaseChatModel):
raise NotImplementedError(
'Only chat models supported by the current trajectory eval')
if agent_tools:
prompt = EVAL_CHAT_PROMPT
else:
prompt = TOOL_FREE_EVAL_CHAT_PROMPT
eval_chain = LLMChain(llm=llm, prompt=prompt)
return cls(agent_tools=agent_tools, eval_chain=eval_chain, output_parser=
output_parser or TrajectoryOutputParser(), **kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, agent_tools: Optional[Sequence[
BaseTool]]=None, output_parser: Optional[TrajectoryOutputParser]=None,
**kwargs: Any) ->'TrajectoryEvalChain':
"""Create a TrajectoryEvalChain object from a language model chain.
Args:
llm (BaseChatModel): The language model chain.
agent_tools (Optional[Sequence[BaseTool]]): A list of tools
available to the agent.
output_parser (Optional[TrajectoryOutputParser]): The output parser
used to parse the chain output into a score.
Returns:
TrajectoryEvalChain: The TrajectoryEvalChain object.
"""
if not isinstance(llm, BaseChatModel):
raise NotImplementedError(
'Only chat models supported by the current trajectory eval')
if agent_tools:
prompt = EVAL_CHAT_PROMPT
else:
prompt = TOOL_FREE_EVAL_CHAT_PROMPT
eval_chain = LLMChain(llm=llm, prompt=prompt)
return cls(agent_tools=agent_tools, eval_chain=eval_chain,
output_parser=output_parser or TrajectoryOutputParser(), **kwargs)
|
Create a TrajectoryEvalChain object from a language model chain.
Args:
llm (BaseChatModel): The language model chain.
agent_tools (Optional[Sequence[BaseTool]]): A list of tools
available to the agent.
output_parser (Optional[TrajectoryOutputParser]): The output parser
used to parse the chain output into a score.
Returns:
TrajectoryEvalChain: The TrajectoryEvalChain object.
|
_identifying_params
|
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {'eas_service_url': self.eas_service_url, 'eas_service_token': self.
eas_service_token, **{'model_kwargs': _model_kwargs}}
|
@property
def _identifying_params(self) ->Dict[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {'eas_service_url': self.eas_service_url, 'eas_service_token':
self.eas_service_token, **{'model_kwargs': _model_kwargs}}
|
Get the identifying parameters.
|
load
|
"""Load documents."""
if self.dropbox_folder_path is not None:
return self._load_documents_from_folder(self.dropbox_folder_path)
else:
return self._load_documents_from_paths()
|
def load(self) ->List[Document]:
"""Load documents."""
if self.dropbox_folder_path is not None:
return self._load_documents_from_folder(self.dropbox_folder_path)
else:
return self._load_documents_from_paths()
|
Load documents.
|
__repr__
|
return str(self)
|
def __repr__(self) ->str:
return str(self)
| null |
similarity_search_with_score
|
"""Return pinecone documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Dictionary of argument(s) to filter on metadata
namespace: Namespace to search in. Default will search in '' namespace.
Returns:
List of Documents most similar to the query and score for each
"""
return self.similarity_search_by_vector_with_score(self._embed_query(query),
k=k, filter=filter, namespace=namespace)
|
def similarity_search_with_score(self, query: str, k: int=4, filter:
Optional[dict]=None, namespace: Optional[str]=None) ->List[Tuple[
Document, float]]:
"""Return pinecone documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Dictionary of argument(s) to filter on metadata
namespace: Namespace to search in. Default will search in '' namespace.
Returns:
List of Documents most similar to the query and score for each
"""
return self.similarity_search_by_vector_with_score(self._embed_query(
query), k=k, filter=filter, namespace=namespace)
|
Return pinecone documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Dictionary of argument(s) to filter on metadata
namespace: Namespace to search in. Default will search in '' namespace.
Returns:
List of Documents most similar to the query and score for each
|
__exit__
|
"""Close file that stdout was piped to."""
sys.stdout.close()
sys.stdout = self._original_stdout
|
def __exit__(self, *_: Any) ->None:
"""Close file that stdout was piped to."""
sys.stdout.close()
sys.stdout = self._original_stdout
|
Close file that stdout was piped to.
|
_import_azure_cognitive_services_AzureCogsImageAnalysisTool
|
from langchain_community.tools.azure_cognitive_services import AzureCogsImageAnalysisTool
return AzureCogsImageAnalysisTool
|
def _import_azure_cognitive_services_AzureCogsImageAnalysisTool() ->Any:
from langchain_community.tools.azure_cognitive_services import AzureCogsImageAnalysisTool
return AzureCogsImageAnalysisTool
| null |
lazy_load
|
yield from self._get_notes()
|
def lazy_load(self) ->Iterator[Document]:
yield from self._get_notes()
| null |
_generate
|
generations: List[List[Generation]] = []
generation_config = {'stop_sequences': stop, 'temperature': self.
temperature, 'top_p': self.top_p, 'top_k': self.top_k,
'max_output_tokens': self.max_output_tokens, 'candidate_count': self.n}
for prompt in prompts:
if self.is_gemini:
res = completion_with_retry(self, prompt=prompt, stream=False,
is_gemini=True, run_manager=run_manager, generation_config=
generation_config)
candidates = [''.join([p.text for p in c.content.parts]) for c in
res.candidates]
generations.append([Generation(text=c) for c in candidates])
else:
res = completion_with_retry(self, model=self.model_name, prompt=
prompt, stream=False, is_gemini=False, run_manager=run_manager,
**generation_config)
prompt_generations = []
for candidate in res.candidates:
raw_text = candidate['output']
stripped_text = _strip_erroneous_leading_spaces(raw_text)
prompt_generations.append(Generation(text=stripped_text))
generations.append(prompt_generations)
return LLMResult(generations=generations)
|
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None,
run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->LLMResult:
generations: List[List[Generation]] = []
generation_config = {'stop_sequences': stop, 'temperature': self.
temperature, 'top_p': self.top_p, 'top_k': self.top_k,
'max_output_tokens': self.max_output_tokens, 'candidate_count': self.n}
for prompt in prompts:
if self.is_gemini:
res = completion_with_retry(self, prompt=prompt, stream=False,
is_gemini=True, run_manager=run_manager, generation_config=
generation_config)
candidates = [''.join([p.text for p in c.content.parts]) for c in
res.candidates]
generations.append([Generation(text=c) for c in candidates])
else:
res = completion_with_retry(self, model=self.model_name, prompt
=prompt, stream=False, is_gemini=False, run_manager=
run_manager, **generation_config)
prompt_generations = []
for candidate in res.candidates:
raw_text = candidate['output']
stripped_text = _strip_erroneous_leading_spaces(raw_text)
prompt_generations.append(Generation(text=stripped_text))
generations.append(prompt_generations)
return LLMResult(generations=generations)
| null |
setup_class
|
url = 'http://127.0.0.1:8080/fwww/'
cls.pod = 'vdb'
cls.store = 'langchain_test_store'
vector_index = 'v'
vector_type = 'cosine_fraction_float'
vector_dimension = 10
embeddings = ConsistentFakeEmbeddings()
cls.vectorstore = Jaguar(cls.pod, cls.store, vector_index, vector_type,
vector_dimension, url, embeddings)
|
@classmethod
def setup_class(cls) ->None:
url = 'http://127.0.0.1:8080/fwww/'
cls.pod = 'vdb'
cls.store = 'langchain_test_store'
vector_index = 'v'
vector_type = 'cosine_fraction_float'
vector_dimension = 10
embeddings = ConsistentFakeEmbeddings()
cls.vectorstore = Jaguar(cls.pod, cls.store, vector_index, vector_type,
vector_dimension, url, embeddings)
| null |
false
|
return False
|
def false(self) ->bool:
return False
| null |
test_typescript_code_splitter
|
splitter = RecursiveCharacterTextSplitter.from_language(Language.TS,
chunk_size=CHUNK_SIZE, chunk_overlap=0)
code = """
function helloWorld(): void {
console.log("Hello, World!");
}
// Call the function
helloWorld();
"""
chunks = splitter.split_text(code)
assert chunks == ['function', 'helloWorld():', 'void {', 'console.log("He',
'llo,', 'World!");', '}', '// Call the', 'function', 'helloWorld();']
|
def test_typescript_code_splitter() ->None:
splitter = RecursiveCharacterTextSplitter.from_language(Language.TS,
chunk_size=CHUNK_SIZE, chunk_overlap=0)
code = """
function helloWorld(): void {
console.log("Hello, World!");
}
// Call the function
helloWorld();
"""
chunks = splitter.split_text(code)
assert chunks == ['function', 'helloWorld():', 'void {',
'console.log("He', 'llo,', 'World!");', '}', '// Call the',
'function', 'helloWorld();']
| null |
similarity_search
|
"""Return docs most similar to query.
By default, supports Approximate Search.
Also supports Script Scoring and Painless Scripting.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
Optional Args:
vector_field: Document field embeddings are stored in. Defaults to
"vector_field".
text_field: Document field the text of the document is stored in. Defaults
to "text".
metadata_field: Document field that metadata is stored in. Defaults to
"metadata".
Can be set to a special value "*" to include the entire document.
Optional Args for Approximate Search:
search_type: "approximate_search"; default: "approximate_search"
boolean_filter: A Boolean filter is a post filter consists of a Boolean
query that contains a k-NN query and a filter.
subquery_clause: Query clause on the knn vector field; default: "must"
lucene_filter: the Lucene algorithm decides whether to perform an exact
k-NN search with pre-filtering or an approximate search with modified
post-filtering. (deprecated, use `efficient_filter`)
efficient_filter: the Lucene Engine or Faiss Engine decides whether to
perform an exact k-NN search with pre-filtering or an approximate search
with modified post-filtering.
Optional Args for Script Scoring Search:
search_type: "script_scoring"; default: "approximate_search"
space_type: "l2", "l1", "linf", "cosinesimil", "innerproduct",
"hammingbit"; default: "l2"
pre_filter: script_score query to pre-filter documents before identifying
nearest neighbors; default: {"match_all": {}}
Optional Args for Painless Scripting Search:
search_type: "painless_scripting"; default: "approximate_search"
space_type: "l2Squared", "l1Norm", "cosineSimilarity"; default: "l2Squared"
pre_filter: script_score query to pre-filter documents before identifying
nearest neighbors; default: {"match_all": {}}
"""
docs_with_scores = self.similarity_search_with_score(query, k, **kwargs)
return [doc[0] for doc in docs_with_scores]
|
def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[
Document]:
"""Return docs most similar to query.
By default, supports Approximate Search.
Also supports Script Scoring and Painless Scripting.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
Optional Args:
vector_field: Document field embeddings are stored in. Defaults to
"vector_field".
text_field: Document field the text of the document is stored in. Defaults
to "text".
metadata_field: Document field that metadata is stored in. Defaults to
"metadata".
Can be set to a special value "*" to include the entire document.
Optional Args for Approximate Search:
search_type: "approximate_search"; default: "approximate_search"
boolean_filter: A Boolean filter is a post filter consists of a Boolean
query that contains a k-NN query and a filter.
subquery_clause: Query clause on the knn vector field; default: "must"
lucene_filter: the Lucene algorithm decides whether to perform an exact
k-NN search with pre-filtering or an approximate search with modified
post-filtering. (deprecated, use `efficient_filter`)
efficient_filter: the Lucene Engine or Faiss Engine decides whether to
perform an exact k-NN search with pre-filtering or an approximate search
with modified post-filtering.
Optional Args for Script Scoring Search:
search_type: "script_scoring"; default: "approximate_search"
space_type: "l2", "l1", "linf", "cosinesimil", "innerproduct",
"hammingbit"; default: "l2"
pre_filter: script_score query to pre-filter documents before identifying
nearest neighbors; default: {"match_all": {}}
Optional Args for Painless Scripting Search:
search_type: "painless_scripting"; default: "approximate_search"
space_type: "l2Squared", "l1Norm", "cosineSimilarity"; default: "l2Squared"
pre_filter: script_score query to pre-filter documents before identifying
nearest neighbors; default: {"match_all": {}}
"""
docs_with_scores = self.similarity_search_with_score(query, k, **kwargs)
return [doc[0] for doc in docs_with_scores]
|
Return docs most similar to query.
By default, supports Approximate Search.
Also supports Script Scoring and Painless Scripting.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
Optional Args:
vector_field: Document field embeddings are stored in. Defaults to
"vector_field".
text_field: Document field the text of the document is stored in. Defaults
to "text".
metadata_field: Document field that metadata is stored in. Defaults to
"metadata".
Can be set to a special value "*" to include the entire document.
Optional Args for Approximate Search:
search_type: "approximate_search"; default: "approximate_search"
boolean_filter: A Boolean filter is a post filter consists of a Boolean
query that contains a k-NN query and a filter.
subquery_clause: Query clause on the knn vector field; default: "must"
lucene_filter: the Lucene algorithm decides whether to perform an exact
k-NN search with pre-filtering or an approximate search with modified
post-filtering. (deprecated, use `efficient_filter`)
efficient_filter: the Lucene Engine or Faiss Engine decides whether to
perform an exact k-NN search with pre-filtering or an approximate search
with modified post-filtering.
Optional Args for Script Scoring Search:
search_type: "script_scoring"; default: "approximate_search"
space_type: "l2", "l1", "linf", "cosinesimil", "innerproduct",
"hammingbit"; default: "l2"
pre_filter: script_score query to pre-filter documents before identifying
nearest neighbors; default: {"match_all": {}}
Optional Args for Painless Scripting Search:
search_type: "painless_scripting"; default: "approximate_search"
space_type: "l2Squared", "l1Norm", "cosineSimilarity"; default: "l2Squared"
pre_filter: script_score query to pre-filter documents before identifying
nearest neighbors; default: {"match_all": {}}
|
_get_scenexplain
|
return SceneXplainTool(**kwargs)
|
def _get_scenexplain(**kwargs: Any) ->BaseTool:
return SceneXplainTool(**kwargs)
| null |
create_list
|
"""
Creates a new list.
"""
query_dict, error = load_query(query, fault_tolerant=True)
if query_dict is None:
return {'Error': error}
location = self.folder_id if self.folder_id else self.space_id
url = f'{DEFAULT_URL}/folder/{location}/list'
payload = extract_dict_elements_from_component_fields(query_dict, Task)
headers = self.get_headers()
response = requests.post(url, json=payload, headers=headers)
data = response.json()
parsed_list = parse_dict_through_component(data, CUList, fault_tolerant=True)
if 'id' in parsed_list:
self.list_id = parsed_list['id']
return parsed_list
|
def create_list(self, query: str) ->Dict:
"""
Creates a new list.
"""
query_dict, error = load_query(query, fault_tolerant=True)
if query_dict is None:
return {'Error': error}
location = self.folder_id if self.folder_id else self.space_id
url = f'{DEFAULT_URL}/folder/{location}/list'
payload = extract_dict_elements_from_component_fields(query_dict, Task)
headers = self.get_headers()
response = requests.post(url, json=payload, headers=headers)
data = response.json()
parsed_list = parse_dict_through_component(data, CUList, fault_tolerant
=True)
if 'id' in parsed_list:
self.list_id = parsed_list['id']
return parsed_list
|
Creates a new list.
|
from_credentials
|
"""Initialize callback handler from Arthur credentials.
Args:
model_id (str): The ID of the arthur model to log to.
arthur_url (str, optional): The URL of the Arthur instance to log to.
Defaults to "https://app.arthur.ai".
arthur_login (str, optional): The login to use to connect to Arthur.
Defaults to None.
arthur_password (str, optional): The password to use to connect to
Arthur. Defaults to None.
Returns:
ArthurCallbackHandler: The initialized callback handler.
"""
arthurai = _lazy_load_arthur()
ArthurAI = arthurai.ArthurAI
ResponseClientError = arthurai.common.exceptions.ResponseClientError
if arthur_login is None:
try:
arthur_api_key = os.environ['ARTHUR_API_KEY']
except KeyError:
raise ValueError(
'No Arthur authentication provided. Either give a login to the ArthurCallbackHandler or set an ARTHUR_API_KEY as an environment variable.'
)
arthur = ArthurAI(url=arthur_url, access_key=arthur_api_key)
elif arthur_password is None:
arthur = ArthurAI(url=arthur_url, login=arthur_login)
else:
arthur = ArthurAI(url=arthur_url, login=arthur_login, password=
arthur_password)
try:
arthur_model = arthur.get_model(model_id)
except ResponseClientError:
raise ValueError(
f'Was unable to retrieve model with id {model_id} from Arthur. Make sure the ID corresponds to a model that is currently registered with your Arthur account.'
)
return cls(arthur_model)
|
@classmethod
def from_credentials(cls, model_id: str, arthur_url: Optional[str]=
'https://app.arthur.ai', arthur_login: Optional[str]=None,
arthur_password: Optional[str]=None) ->ArthurCallbackHandler:
"""Initialize callback handler from Arthur credentials.
Args:
model_id (str): The ID of the arthur model to log to.
arthur_url (str, optional): The URL of the Arthur instance to log to.
Defaults to "https://app.arthur.ai".
arthur_login (str, optional): The login to use to connect to Arthur.
Defaults to None.
arthur_password (str, optional): The password to use to connect to
Arthur. Defaults to None.
Returns:
ArthurCallbackHandler: The initialized callback handler.
"""
arthurai = _lazy_load_arthur()
ArthurAI = arthurai.ArthurAI
ResponseClientError = arthurai.common.exceptions.ResponseClientError
if arthur_login is None:
try:
arthur_api_key = os.environ['ARTHUR_API_KEY']
except KeyError:
raise ValueError(
'No Arthur authentication provided. Either give a login to the ArthurCallbackHandler or set an ARTHUR_API_KEY as an environment variable.'
)
arthur = ArthurAI(url=arthur_url, access_key=arthur_api_key)
elif arthur_password is None:
arthur = ArthurAI(url=arthur_url, login=arthur_login)
else:
arthur = ArthurAI(url=arthur_url, login=arthur_login, password=
arthur_password)
try:
arthur_model = arthur.get_model(model_id)
except ResponseClientError:
raise ValueError(
f'Was unable to retrieve model with id {model_id} from Arthur. Make sure the ID corresponds to a model that is currently registered with your Arthur account.'
)
return cls(arthur_model)
|
Initialize callback handler from Arthur credentials.
Args:
model_id (str): The ID of the arthur model to log to.
arthur_url (str, optional): The URL of the Arthur instance to log to.
Defaults to "https://app.arthur.ai".
arthur_login (str, optional): The login to use to connect to Arthur.
Defaults to None.
arthur_password (str, optional): The password to use to connect to
Arthur. Defaults to None.
Returns:
ArthurCallbackHandler: The initialized callback handler.
|
test_load_nonexistent_dataset
|
"""Tests that ValueError is thrown for nonexistent dataset name"""
page_content_column = 'text'
name = 'v3'
loader = HuggingFaceDatasetLoader(HUGGING_FACE_EXAMPLE_DATASET,
page_content_column, name)
with pytest.raises(ValueError):
loader.load()
|
@pytest.mark.requires('datasets')
@pytest.fixture
def test_load_nonexistent_dataset() ->None:
"""Tests that ValueError is thrown for nonexistent dataset name"""
page_content_column = 'text'
name = 'v3'
loader = HuggingFaceDatasetLoader(HUGGING_FACE_EXAMPLE_DATASET,
page_content_column, name)
with pytest.raises(ValueError):
loader.load()
|
Tests that ValueError is thrown for nonexistent dataset name
|
test_prompttemplate_validation
|
"""Test that few shot works when prefix and suffix are PromptTemplates."""
prefix = PromptTemplate(input_variables=['content'], template=
'This is a test about {content}.')
suffix = PromptTemplate(input_variables=['new_content'], template=
'Now you try to talk about {new_content}.')
examples = [{'question': 'foo', 'answer': 'bar'}, {'question': 'baz',
'answer': 'foo'}]
with pytest.raises(ValueError):
FewShotPromptWithTemplates(suffix=suffix, prefix=prefix,
input_variables=[], examples=examples, example_prompt=
EXAMPLE_PROMPT, example_separator='\n', validate_template=True)
assert FewShotPromptWithTemplates(suffix=suffix, prefix=prefix,
input_variables=[], examples=examples, example_prompt=EXAMPLE_PROMPT,
example_separator='\n').input_variables == ['content', 'new_content']
|
def test_prompttemplate_validation() ->None:
"""Test that few shot works when prefix and suffix are PromptTemplates."""
prefix = PromptTemplate(input_variables=['content'], template=
'This is a test about {content}.')
suffix = PromptTemplate(input_variables=['new_content'], template=
'Now you try to talk about {new_content}.')
examples = [{'question': 'foo', 'answer': 'bar'}, {'question': 'baz',
'answer': 'foo'}]
with pytest.raises(ValueError):
FewShotPromptWithTemplates(suffix=suffix, prefix=prefix,
input_variables=[], examples=examples, example_prompt=
EXAMPLE_PROMPT, example_separator='\n', validate_template=True)
assert FewShotPromptWithTemplates(suffix=suffix, prefix=prefix,
input_variables=[], examples=examples, example_prompt=
EXAMPLE_PROMPT, example_separator='\n').input_variables == ['content',
'new_content']
|
Test that few shot works when prefix and suffix are PromptTemplates.
|
_get_invocation_params
|
"""Get the parameters used to invoke the model FOR THE CALLBACKS."""
return {**self._default_params, **super()._get_invocation_params(stop=stop,
**kwargs)}
|
def _get_invocation_params(self, stop: Optional[List[str]]=None, **kwargs: Any
) ->Dict[str, Any]:
"""Get the parameters used to invoke the model FOR THE CALLBACKS."""
return {**self._default_params, **super()._get_invocation_params(stop=
stop, **kwargs)}
|
Get the parameters used to invoke the model FOR THE CALLBACKS.
|
test_call
|
"""Test that call runs."""
twilio = TwilioAPIWrapper()
output = twilio.run('Message', '+16162904619')
assert output
|
def test_call() ->None:
"""Test that call runs."""
twilio = TwilioAPIWrapper()
output = twilio.run('Message', '+16162904619')
assert output
|
Test that call runs.
|
__init__
|
"""Instantiate a prompt cache using Momento as a backend.
Note: to instantiate the cache client passed to MomentoCache,
you must have a Momento account. See https://gomomento.com/.
Args:
cache_client (CacheClient): The Momento cache client.
cache_name (str): The name of the cache to use to store the data.
ttl (Optional[timedelta], optional): The time to live for the cache items.
Defaults to None, ie use the client default TTL.
ensure_cache_exists (bool, optional): Create the cache if it doesn't
exist. Defaults to True.
Raises:
ImportError: Momento python package is not installed.
TypeError: cache_client is not of type momento.CacheClientObject
ValueError: ttl is non-null and non-negative
"""
try:
from momento import CacheClient
except ImportError:
raise ImportError(
'Could not import momento python package. Please install it with `pip install momento`.'
)
if not isinstance(cache_client, CacheClient):
raise TypeError('cache_client must be a momento.CacheClient object.')
_validate_ttl(ttl)
if ensure_cache_exists:
_ensure_cache_exists(cache_client, cache_name)
self.cache_client = cache_client
self.cache_name = cache_name
self.ttl = ttl
|
def __init__(self, cache_client: momento.CacheClient, cache_name: str, *,
ttl: Optional[timedelta]=None, ensure_cache_exists: bool=True):
"""Instantiate a prompt cache using Momento as a backend.
Note: to instantiate the cache client passed to MomentoCache,
you must have a Momento account. See https://gomomento.com/.
Args:
cache_client (CacheClient): The Momento cache client.
cache_name (str): The name of the cache to use to store the data.
ttl (Optional[timedelta], optional): The time to live for the cache items.
Defaults to None, ie use the client default TTL.
ensure_cache_exists (bool, optional): Create the cache if it doesn't
exist. Defaults to True.
Raises:
ImportError: Momento python package is not installed.
TypeError: cache_client is not of type momento.CacheClientObject
ValueError: ttl is non-null and non-negative
"""
try:
from momento import CacheClient
except ImportError:
raise ImportError(
'Could not import momento python package. Please install it with `pip install momento`.'
)
if not isinstance(cache_client, CacheClient):
raise TypeError('cache_client must be a momento.CacheClient object.')
_validate_ttl(ttl)
if ensure_cache_exists:
_ensure_cache_exists(cache_client, cache_name)
self.cache_client = cache_client
self.cache_name = cache_name
self.ttl = ttl
|
Instantiate a prompt cache using Momento as a backend.
Note: to instantiate the cache client passed to MomentoCache,
you must have a Momento account. See https://gomomento.com/.
Args:
cache_client (CacheClient): The Momento cache client.
cache_name (str): The name of the cache to use to store the data.
ttl (Optional[timedelta], optional): The time to live for the cache items.
Defaults to None, ie use the client default TTL.
ensure_cache_exists (bool, optional): Create the cache if it doesn't
exist. Defaults to True.
Raises:
ImportError: Momento python package is not installed.
TypeError: cache_client is not of type momento.CacheClientObject
ValueError: ttl is non-null and non-negative
|
_get_relevant_documents
|
"""
Get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of relevant documents.
"""
merged_documents = self.merge_documents(query, run_manager)
return merged_documents
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
"""
Get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of relevant documents.
"""
merged_documents = self.merge_documents(query, run_manager)
return merged_documents
|
Get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of relevant documents.
|
_import_metaphor_search
|
from langchain_community.tools.metaphor_search import MetaphorSearchResults
return MetaphorSearchResults
|
def _import_metaphor_search() ->Any:
from langchain_community.tools.metaphor_search import MetaphorSearchResults
return MetaphorSearchResults
| null |
__add__
|
if isinstance(other, ChatMessageChunk):
if self.role != other.role:
raise ValueError(
'Cannot concatenate ChatMessageChunks with different roles.')
return self.__class__(role=self.role, content=merge_content(self.
content, other.content), additional_kwargs=self._merge_kwargs_dict(
self.additional_kwargs, other.additional_kwargs))
elif isinstance(other, BaseMessageChunk):
return self.__class__(role=self.role, content=merge_content(self.
content, other.content), additional_kwargs=self._merge_kwargs_dict(
self.additional_kwargs, other.additional_kwargs))
else:
return super().__add__(other)
|
def __add__(self, other: Any) ->BaseMessageChunk:
if isinstance(other, ChatMessageChunk):
if self.role != other.role:
raise ValueError(
'Cannot concatenate ChatMessageChunks with different roles.')
return self.__class__(role=self.role, content=merge_content(self.
content, other.content), additional_kwargs=self.
_merge_kwargs_dict(self.additional_kwargs, other.additional_kwargs)
)
elif isinstance(other, BaseMessageChunk):
return self.__class__(role=self.role, content=merge_content(self.
content, other.content), additional_kwargs=self.
_merge_kwargs_dict(self.additional_kwargs, other.additional_kwargs)
)
else:
return super().__add__(other)
| null |
_call
|
"""Run the chain and generate the output.
Args:
inputs (Dict[str, str]): The input values for the chain.
run_manager (Optional[CallbackManagerForChainRun]): The callback
manager for the chain run.
Returns:
Dict[str, Any]: The output values of the chain.
"""
chain_input = {**inputs}
if self.agent_tools:
chain_input['tool_descriptions'] = self._tools_description
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
raw_output = self.eval_chain.run(chain_input, callbacks=_run_manager.
get_child())
return cast(dict, self.output_parser.parse(raw_output))
|
def _call(self, inputs: Dict[str, str], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, Any]:
"""Run the chain and generate the output.
Args:
inputs (Dict[str, str]): The input values for the chain.
run_manager (Optional[CallbackManagerForChainRun]): The callback
manager for the chain run.
Returns:
Dict[str, Any]: The output values of the chain.
"""
chain_input = {**inputs}
if self.agent_tools:
chain_input['tool_descriptions'] = self._tools_description
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
raw_output = self.eval_chain.run(chain_input, callbacks=_run_manager.
get_child())
return cast(dict, self.output_parser.parse(raw_output))
|
Run the chain and generate the output.
Args:
inputs (Dict[str, str]): The input values for the chain.
run_manager (Optional[CallbackManagerForChainRun]): The callback
manager for the chain run.
Returns:
Dict[str, Any]: The output values of the chain.
|
test_retrieval_qa_with_sources_chain_saving_loading
|
"""Test saving and loading."""
loader = DirectoryLoader('docs/extras/modules/', glob='*.txt')
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
docsearch = FAISS.from_documents(texts, embeddings)
qa = RetrievalQAWithSourcesChain.from_llm(llm=OpenAI(), retriever=docsearch
.as_retriever())
result = qa('What did the president say about Ketanji Brown Jackson?')
assert 'question' in result.keys()
assert 'answer' in result.keys()
assert 'sources' in result.keys()
file_path = str(tmp_path) + '/RetrievalQAWithSourcesChain.yaml'
qa.save(file_path=file_path)
qa_loaded = load_chain(file_path, retriever=docsearch.as_retriever())
assert qa_loaded == qa
qa2 = RetrievalQAWithSourcesChain.from_chain_type(llm=OpenAI(), retriever=
docsearch.as_retriever(), chain_type='stuff')
result2 = qa2('What did the president say about Ketanji Brown Jackson?')
assert 'question' in result2.keys()
assert 'answer' in result2.keys()
assert 'sources' in result2.keys()
|
def test_retrieval_qa_with_sources_chain_saving_loading(tmp_path: str) ->None:
"""Test saving and loading."""
loader = DirectoryLoader('docs/extras/modules/', glob='*.txt')
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
docsearch = FAISS.from_documents(texts, embeddings)
qa = RetrievalQAWithSourcesChain.from_llm(llm=OpenAI(), retriever=
docsearch.as_retriever())
result = qa('What did the president say about Ketanji Brown Jackson?')
assert 'question' in result.keys()
assert 'answer' in result.keys()
assert 'sources' in result.keys()
file_path = str(tmp_path) + '/RetrievalQAWithSourcesChain.yaml'
qa.save(file_path=file_path)
qa_loaded = load_chain(file_path, retriever=docsearch.as_retriever())
assert qa_loaded == qa
qa2 = RetrievalQAWithSourcesChain.from_chain_type(llm=OpenAI(),
retriever=docsearch.as_retriever(), chain_type='stuff')
result2 = qa2('What did the president say about Ketanji Brown Jackson?')
assert 'question' in result2.keys()
assert 'answer' in result2.keys()
assert 'sources' in result2.keys()
|
Test saving and loading.
|
test_agent_iterator_reset
|
"""Test reset functionality of AgentExecutorIterator."""
agent = _get_agent()
agent_iter = agent.iter(inputs='when was langchain made')
assert isinstance(agent_iter, AgentExecutorIterator)
iterator = iter(agent_iter)
next(iterator)
assert agent_iter.iterations == 1
assert agent_iter.time_elapsed > 0.0
assert agent_iter.intermediate_steps
agent_iter.reset()
assert agent_iter.iterations == 0
assert agent_iter.time_elapsed == 0.0
assert not agent_iter.intermediate_steps
|
def test_agent_iterator_reset() ->None:
"""Test reset functionality of AgentExecutorIterator."""
agent = _get_agent()
agent_iter = agent.iter(inputs='when was langchain made')
assert isinstance(agent_iter, AgentExecutorIterator)
iterator = iter(agent_iter)
next(iterator)
assert agent_iter.iterations == 1
assert agent_iter.time_elapsed > 0.0
assert agent_iter.intermediate_steps
agent_iter.reset()
assert agent_iter.iterations == 0
assert agent_iter.time_elapsed == 0.0
assert not agent_iter.intermediate_steps
|
Test reset functionality of AgentExecutorIterator.
|
on_llm_start
|
"""Run when LLM starts."""
self.step += 1
self.llm_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({'action': 'on_llm_start'})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
for prompt in prompts:
prompt_resp = deepcopy(resp)
prompt_resp['prompts'] = prompt
self.on_llm_start_records.append(prompt_resp)
self.action_records.append(prompt_resp)
if self.stream_logs:
self.run.log(prompt_resp)
|
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **
kwargs: Any) ->None:
"""Run when LLM starts."""
self.step += 1
self.llm_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({'action': 'on_llm_start'})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
for prompt in prompts:
prompt_resp = deepcopy(resp)
prompt_resp['prompts'] = prompt
self.on_llm_start_records.append(prompt_resp)
self.action_records.append(prompt_resp)
if self.stream_logs:
self.run.log(prompt_resp)
|
Run when LLM starts.
|
search
|
issues = self.jira.jql(query)
parsed_issues = self.parse_issues(issues)
parsed_issues_str = 'Found ' + str(len(parsed_issues)) + ' issues:\n' + str(
parsed_issues)
return parsed_issues_str
|
def search(self, query: str) ->str:
issues = self.jira.jql(query)
parsed_issues = self.parse_issues(issues)
parsed_issues_str = 'Found ' + str(len(parsed_issues)
) + ' issues:\n' + str(parsed_issues)
return parsed_issues_str
| null |
__init__
|
"""Create a new MarkdownHeaderTextSplitter.
Args:
headers_to_split_on: Headers we want to track
return_each_line: Return each line w/ associated headers
strip_headers: Strip split headers from the content of the chunk
"""
self.return_each_line = return_each_line
self.headers_to_split_on = sorted(headers_to_split_on, key=lambda split:
len(split[0]), reverse=True)
self.strip_headers = strip_headers
|
def __init__(self, headers_to_split_on: List[Tuple[str, str]],
return_each_line: bool=False, strip_headers: bool=True):
"""Create a new MarkdownHeaderTextSplitter.
Args:
headers_to_split_on: Headers we want to track
return_each_line: Return each line w/ associated headers
strip_headers: Strip split headers from the content of the chunk
"""
self.return_each_line = return_each_line
self.headers_to_split_on = sorted(headers_to_split_on, key=lambda split:
len(split[0]), reverse=True)
self.strip_headers = strip_headers
|
Create a new MarkdownHeaderTextSplitter.
Args:
headers_to_split_on: Headers we want to track
return_each_line: Return each line w/ associated headers
strip_headers: Strip split headers from the content of the chunk
|
_fetch_page
|
try:
return self.wiki_client.page(title=page, auto_suggest=False)
except (self.wiki_client.exceptions.PageError, self.wiki_client.exceptions.
DisambiguationError):
return None
|
def _fetch_page(self, page: str) ->Optional[str]:
try:
return self.wiki_client.page(title=page, auto_suggest=False)
except (self.wiki_client.exceptions.PageError, self.wiki_client.
exceptions.DisambiguationError):
return None
| null |
_evaluate_strings
|
"""Evaluate the prediction string.
Args:
prediction (str): The prediction string to evaluate.
input (str, optional): Not used in this evaluator.
reference (str): The reference string to compare against.
Returns:
dict: A dictionary containing the evaluation score.
"""
parsed = self._parse_json(prediction)
label = self._parse_json(cast(str, reference))
if isinstance(label, list):
if not isinstance(parsed, list):
return {'score': 0}
parsed = sorted(parsed, key=lambda x: str(x))
label = sorted(label, key=lambda x: str(x))
return {'score': self.operator(parsed, label)}
|
def _evaluate_strings(self, prediction: str, input: Optional[str]=None,
reference: Optional[str]=None, **kwargs: Any) ->dict:
"""Evaluate the prediction string.
Args:
prediction (str): The prediction string to evaluate.
input (str, optional): Not used in this evaluator.
reference (str): The reference string to compare against.
Returns:
dict: A dictionary containing the evaluation score.
"""
parsed = self._parse_json(prediction)
label = self._parse_json(cast(str, reference))
if isinstance(label, list):
if not isinstance(parsed, list):
return {'score': 0}
parsed = sorted(parsed, key=lambda x: str(x))
label = sorted(label, key=lambda x: str(x))
return {'score': self.operator(parsed, label)}
|
Evaluate the prediction string.
Args:
prediction (str): The prediction string to evaluate.
input (str, optional): Not used in this evaluator.
reference (str): The reference string to compare against.
Returns:
dict: A dictionary containing the evaluation score.
|
_default_approximate_search_query
|
"""For Approximate k-NN Search, this is the default query."""
return {'size': k, 'query': {'knn': {vector_field: {'vector': query_vector,
'k': k}}}}
|
def _default_approximate_search_query(query_vector: List[float], k: int=4,
vector_field: str='vector_field') ->Dict:
"""For Approximate k-NN Search, this is the default query."""
return {'size': k, 'query': {'knn': {vector_field: {'vector':
query_vector, 'k': k}}}}
|
For Approximate k-NN Search, this is the default query.
|
_completion_with_retry
|
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm, max_retries=llm.max_retries,
run_manager=run_manager)
@retry_decorator
def _completion_with_retry(prompt: LanguageModelInput, is_gemini: bool,
stream: bool, **kwargs: Any) ->Any:
generation_config = kwargs.get('generation_config', {})
if is_gemini:
return llm.client.generate_content(contents=prompt, stream=stream,
generation_config=generation_config)
return llm.client.generate_text(prompt=prompt, **kwargs)
return _completion_with_retry(prompt=prompt, is_gemini=is_gemini, stream=
stream, **kwargs)
|
def _completion_with_retry(llm: GoogleGenerativeAI, prompt:
LanguageModelInput, is_gemini: bool=False, stream: bool=False,
run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm, max_retries=llm.
max_retries, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(prompt: LanguageModelInput, is_gemini: bool,
stream: bool, **kwargs: Any) ->Any:
generation_config = kwargs.get('generation_config', {})
if is_gemini:
return llm.client.generate_content(contents=prompt, stream=
stream, generation_config=generation_config)
return llm.client.generate_text(prompt=prompt, **kwargs)
return _completion_with_retry(prompt=prompt, is_gemini=is_gemini,
stream=stream, **kwargs)
|
Use tenacity to retry the completion call.
|
test_clarifai_with_metadatas_with_scores
|
"""Test end to end construction and scored search."""
texts = ['oof', 'rab', 'zab']
metadatas = [{'page': str(i)} for i in range(len(texts))]
USER_ID = 'minhajul'
APP_ID = 'test-lang-2'
NUMBER_OF_DOCS = 1
docsearch = Clarifai.from_texts(user_id=USER_ID, app_id=APP_ID, texts=texts,
pat=None, number_of_docs=NUMBER_OF_DOCS, metadatas=metadatas)
time.sleep(2.5)
output = docsearch.similarity_search_with_score('oof', k=1)
assert output[0][0] == Document(page_content='oof', metadata={'page': '0'})
assert abs(output[0][1] - 1.0) < 0.001
|
def test_clarifai_with_metadatas_with_scores() ->None:
"""Test end to end construction and scored search."""
texts = ['oof', 'rab', 'zab']
metadatas = [{'page': str(i)} for i in range(len(texts))]
USER_ID = 'minhajul'
APP_ID = 'test-lang-2'
NUMBER_OF_DOCS = 1
docsearch = Clarifai.from_texts(user_id=USER_ID, app_id=APP_ID, texts=
texts, pat=None, number_of_docs=NUMBER_OF_DOCS, metadatas=metadatas)
time.sleep(2.5)
output = docsearch.similarity_search_with_score('oof', k=1)
assert output[0][0] == Document(page_content='oof', metadata={'page': '0'})
assert abs(output[0][1] - 1.0) < 0.001
|
Test end to end construction and scored search.
|
_is_openai_parts_format
|
return 'type' in part
|
def _is_openai_parts_format(part: dict) ->bool:
return 'type' in part
| null |
__init__
|
self.history: List[Dict[str, Union[int, float]]] = [{'step': 0, 'score': 0}]
self.step: int = step
self.i: int = 0
self.window_size: int = window_size
self.queue: deque = deque()
self.sum: float = 0.0
|
def __init__(self, window_size: int, step: int):
self.history: List[Dict[str, Union[int, float]]] = [{'step': 0, 'score': 0}
]
self.step: int = step
self.i: int = 0
self.window_size: int = window_size
self.queue: deque = deque()
self.sum: float = 0.0
| null |
_evaluate_strings
|
"""
Evaluate the string distance between the prediction and the reference.
Args:
prediction (str): The prediction string.
reference (Optional[str], optional): The reference string.
input (Optional[str], optional): The input string.
callbacks (Callbacks, optional): The callbacks to use.
**kwargs: Additional keyword arguments.
Returns:
dict: The evaluation results containing the score.
"""
result = self(inputs={'prediction': prediction, 'reference': reference},
callbacks=callbacks, tags=tags, metadata=metadata, include_run_info=
include_run_info)
return self._prepare_output(result)
|
def _evaluate_strings(self, *, prediction: str, reference: Optional[str]=
None, input: Optional[str]=None, callbacks: Callbacks=None, tags:
Optional[List[str]]=None, metadata: Optional[Dict[str, Any]]=None,
include_run_info: bool=False, **kwargs: Any) ->dict:
"""
Evaluate the string distance between the prediction and the reference.
Args:
prediction (str): The prediction string.
reference (Optional[str], optional): The reference string.
input (Optional[str], optional): The input string.
callbacks (Callbacks, optional): The callbacks to use.
**kwargs: Additional keyword arguments.
Returns:
dict: The evaluation results containing the score.
"""
result = self(inputs={'prediction': prediction, 'reference': reference},
callbacks=callbacks, tags=tags, metadata=metadata, include_run_info
=include_run_info)
return self._prepare_output(result)
|
Evaluate the string distance between the prediction and the reference.
Args:
prediction (str): The prediction string.
reference (Optional[str], optional): The reference string.
input (Optional[str], optional): The input string.
callbacks (Callbacks, optional): The callbacks to use.
**kwargs: Additional keyword arguments.
Returns:
dict: The evaluation results containing the score.
|
__init__
|
"""Initialize with a file path."""
try:
import fitz
except ImportError:
raise ImportError(
'`PyMuPDF` package not found, please install it with `pip install pymupdf`'
)
super().__init__(file_path, headers=headers)
self.extract_images = extract_images
self.text_kwargs = kwargs
|
def __init__(self, file_path: str, *, headers: Optional[Dict]=None,
extract_images: bool=False, **kwargs: Any) ->None:
"""Initialize with a file path."""
try:
import fitz
except ImportError:
raise ImportError(
'`PyMuPDF` package not found, please install it with `pip install pymupdf`'
)
super().__init__(file_path, headers=headers)
self.extract_images = extract_images
self.text_kwargs = kwargs
|
Initialize with a file path.
|
_get_doc_title
|
try:
return re.findall('^#\\s+(.*)', data, re.MULTILINE)[0]
except IndexError:
pass
try:
return re.findall('^(.*)\\n=+\\n', data, re.MULTILINE)[0]
except IndexError:
return file_name
|
def _get_doc_title(data: str, file_name: str) ->str:
try:
return re.findall('^#\\s+(.*)', data, re.MULTILINE)[0]
except IndexError:
pass
try:
return re.findall('^(.*)\\n=+\\n', data, re.MULTILINE)[0]
except IndexError:
return file_name
| null |
_import_e2b_data_analysis
|
from langchain_community.tools.e2b_data_analysis.tool import E2BDataAnalysisTool
return E2BDataAnalysisTool
|
def _import_e2b_data_analysis() ->Any:
from langchain_community.tools.e2b_data_analysis.tool import E2BDataAnalysisTool
return E2BDataAnalysisTool
| null |
_generate
|
if self.streaming:
stream_iter = self._stream(messages, stop=stop, run_manager=run_manager,
**kwargs)
return generate_from_stream(stream_iter)
request = get_cohere_chat_request(messages, **self._default_params, **kwargs)
response = self.client.chat(**request)
message = AIMessage(content=response.text)
generation_info = None
if hasattr(response, 'documents'):
generation_info = self._get_generation_info(response)
return ChatResult(generations=[ChatGeneration(message=message,
generation_info=generation_info)])
|
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->ChatResult:
if self.streaming:
stream_iter = self._stream(messages, stop=stop, run_manager=
run_manager, **kwargs)
return generate_from_stream(stream_iter)
request = get_cohere_chat_request(messages, **self._default_params, **
kwargs)
response = self.client.chat(**request)
message = AIMessage(content=response.text)
generation_info = None
if hasattr(response, 'documents'):
generation_info = self._get_generation_info(response)
return ChatResult(generations=[ChatGeneration(message=message,
generation_info=generation_info)])
| null |
trim_last_node
|
"""Remove the last node if it exists and has a single incoming edge,
ie. if removing it would not leave the graph without a "last" node."""
last_node = self.last_node()
if last_node:
if len(self.nodes) == 1 or len([edge for edge in self.edges if edge.
target == last_node.id]) == 1:
self.remove_node(last_node)
|
def trim_last_node(self) ->None:
"""Remove the last node if it exists and has a single incoming edge,
ie. if removing it would not leave the graph without a "last" node."""
last_node = self.last_node()
if last_node:
if len(self.nodes) == 1 or len([edge for edge in self.edges if edge
.target == last_node.id]) == 1:
self.remove_node(last_node)
|
Remove the last node if it exists and has a single incoming edge,
ie. if removing it would not leave the graph without a "last" node.
|
__init__
|
self.embedding = embedding
self.index_name = index_name
self.query_field = query_field
self.vector_query_field = vector_query_field
self.distance_strategy = (DistanceStrategy.COSINE if distance_strategy is
None else DistanceStrategy[distance_strategy])
self.strategy = strategy
if es_connection is not None:
self.client = es_connection.options(headers={'user-agent': self.
get_user_agent()})
elif es_url is not None or es_cloud_id is not None:
self.client = ElasticsearchStore.connect_to_elasticsearch(es_url=es_url,
username=es_user, password=es_password, cloud_id=es_cloud_id,
api_key=es_api_key, es_params=es_params)
else:
raise ValueError(
'Either provide a pre-existing Elasticsearch connection, or valid credentials for creating a new connection.'
)
|
def __init__(self, index_name: str, *, embedding: Optional[Embeddings]=None,
es_connection: Optional['Elasticsearch']=None, es_url: Optional[str]=
None, es_cloud_id: Optional[str]=None, es_user: Optional[str]=None,
es_api_key: Optional[str]=None, es_password: Optional[str]=None,
vector_query_field: str='vector', query_field: str='text',
distance_strategy: Optional[Literal[DistanceStrategy.COSINE,
DistanceStrategy.DOT_PRODUCT, DistanceStrategy.EUCLIDEAN_DISTANCE]]=
None, strategy: BaseRetrievalStrategy=ApproxRetrievalStrategy(),
es_params: Optional[Dict[str, Any]]=None):
self.embedding = embedding
self.index_name = index_name
self.query_field = query_field
self.vector_query_field = vector_query_field
self.distance_strategy = (DistanceStrategy.COSINE if distance_strategy is
None else DistanceStrategy[distance_strategy])
self.strategy = strategy
if es_connection is not None:
self.client = es_connection.options(headers={'user-agent': self.
get_user_agent()})
elif es_url is not None or es_cloud_id is not None:
self.client = ElasticsearchStore.connect_to_elasticsearch(es_url=
es_url, username=es_user, password=es_password, cloud_id=
es_cloud_id, api_key=es_api_key, es_params=es_params)
else:
raise ValueError(
'Either provide a pre-existing Elasticsearch connection, or valid credentials for creating a new connection.'
)
| null |
close
|
"""Mock Gradient close."""
return
|
def close(self) ->None:
"""Mock Gradient close."""
return
|
Mock Gradient close.
|
test_implements_string_evaluator_protocol
|
assert issubclass(chain_cls, StringEvaluator)
|
@pytest.mark.parametrize('chain_cls', [QAEvalChain, ContextQAEvalChain,
CotQAEvalChain])
def test_implements_string_evaluator_protocol(chain_cls: Type[LLMChain]
) ->None:
assert issubclass(chain_cls, StringEvaluator)
| null |
test_invoke
|
"""Test invoke tokens from ChatMistralAI"""
llm = ChatMistralAI()
result = llm.invoke("I'm Pickle Rick", config=dict(tags=['foo']))
assert isinstance(result.content, str)
|
def test_invoke() ->None:
"""Test invoke tokens from ChatMistralAI"""
llm = ChatMistralAI()
result = llm.invoke("I'm Pickle Rick", config=dict(tags=['foo']))
assert isinstance(result.content, str)
|
Test invoke tokens from ChatMistralAI
|
run
|
data = {'extracted_text': [{'body': {'text': 'Hello World'}}],
'file_extracted_data': [{'language': 'en'}], 'field_metadata': [{
'metadata': {'metadata': {'paragraphs': [{'end': 66, 'sentences': [{
'start': 1, 'end': 67}]}]}}}]}
return json.dumps(data)
|
def run(self: Any, **args: Any) ->str:
data = {'extracted_text': [{'body': {'text': 'Hello World'}}],
'file_extracted_data': [{'language': 'en'}], 'field_metadata': [{
'metadata': {'metadata': {'paragraphs': [{'end': 66, 'sentences': [
{'start': 1, 'end': 67}]}]}}}]}
return json.dumps(data)
| null |
on_chain_start
|
if self.__has_valid_config is False:
return
try:
name = serialized.get('id', [None, None, None, None])[3]
type = 'chain'
metadata = metadata or {}
agentName = metadata.get('agent_name')
if agentName is None:
agentName = metadata.get('agentName')
if name == 'AgentExecutor' or name == 'PlanAndExecute':
type = 'agent'
if agentName is not None:
type = 'agent'
name = agentName
if parent_run_id is not None:
type = 'chain'
user_id = _get_user_id(metadata)
user_props = _get_user_props(metadata)
input = _parse_input(inputs)
self.__track_event(type, 'start', user_id=user_id, run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None, name=
name, input=input, tags=tags, metadata=metadata, user_props=
user_props, app_id=self.__app_id)
except Exception as e:
logger.error(f'[LLMonitor] An error occurred in on_chain_start: {e}')
|
def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any],
*, run_id: UUID, parent_run_id: Union[UUID, None]=None, tags: Union[
List[str], None]=None, metadata: Union[Dict[str, Any], None]=None, **
kwargs: Any) ->Any:
if self.__has_valid_config is False:
return
try:
name = serialized.get('id', [None, None, None, None])[3]
type = 'chain'
metadata = metadata or {}
agentName = metadata.get('agent_name')
if agentName is None:
agentName = metadata.get('agentName')
if name == 'AgentExecutor' or name == 'PlanAndExecute':
type = 'agent'
if agentName is not None:
type = 'agent'
name = agentName
if parent_run_id is not None:
type = 'chain'
user_id = _get_user_id(metadata)
user_props = _get_user_props(metadata)
input = _parse_input(inputs)
self.__track_event(type, 'start', user_id=user_id, run_id=str(
run_id), parent_run_id=str(parent_run_id) if parent_run_id else
None, name=name, input=input, tags=tags, metadata=metadata,
user_props=user_props, app_id=self.__app_id)
except Exception as e:
logger.error(f'[LLMonitor] An error occurred in on_chain_start: {e}')
| null |
test_redis_from_existing
|
"""Test adding a new document"""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), index_name=
TEST_INDEX_NAME, redis_url=TEST_REDIS_URL)
schema: Dict = docsearch.schema
docsearch.write_schema('test_schema.yml')
docsearch2 = Redis.from_existing_index(FakeEmbeddings(), index_name=
TEST_INDEX_NAME, redis_url=TEST_REDIS_URL, schema=schema)
output = docsearch2.similarity_search('foo', k=1, return_metadata=False)
assert output == TEST_SINGLE_RESULT
|
def test_redis_from_existing(texts: List[str]) ->None:
"""Test adding a new document"""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), index_name=
TEST_INDEX_NAME, redis_url=TEST_REDIS_URL)
schema: Dict = docsearch.schema
docsearch.write_schema('test_schema.yml')
docsearch2 = Redis.from_existing_index(FakeEmbeddings(), index_name=
TEST_INDEX_NAME, redis_url=TEST_REDIS_URL, schema=schema)
output = docsearch2.similarity_search('foo', k=1, return_metadata=False)
assert output == TEST_SINGLE_RESULT
|
Test adding a new document
|
_Module
|
for stmt in tree.body:
self.dispatch(stmt)
|
def _Module(self, tree):
for stmt in tree.body:
self.dispatch(stmt)
| null |
__init__
|
super().__init__()
if not collection_name:
raise ValueError(
'collection_name must be specified when using ZepVectorStore.')
try:
from zep_python import ZepClient
except ImportError:
raise ImportError(
'Could not import zep-python python package. Please install it with `pip install zep-python`.'
)
self._client = ZepClient(api_url, api_key=api_key)
self.collection_name = collection_name
if config and config.name != self.collection_name:
config.name = self.collection_name
self._collection_config = config
self._collection = self._load_collection()
self._embedding = embedding
|
def __init__(self, collection_name: str, api_url: str, *, api_key: Optional
[str]=None, config: Optional[CollectionConfig]=None, embedding:
Optional[Embeddings]=None) ->None:
super().__init__()
if not collection_name:
raise ValueError(
'collection_name must be specified when using ZepVectorStore.')
try:
from zep_python import ZepClient
except ImportError:
raise ImportError(
'Could not import zep-python python package. Please install it with `pip install zep-python`.'
)
self._client = ZepClient(api_url, api_key=api_key)
self.collection_name = collection_name
if config and config.name != self.collection_name:
config.name = self.collection_name
self._collection_config = config
self._collection = self._load_collection()
self._embedding = embedding
| null |
get_user_agent
|
from langchain_community import __version__
return f'langchain/{__version__}'
|
@staticmethod
def get_user_agent() ->str:
from langchain_community import __version__
return f'langchain/{__version__}'
| null |
on_chain_error
|
self.on_chain_error_common()
|
def on_chain_error(self, *args: Any, **kwargs: Any) ->Any:
self.on_chain_error_common()
| null |
similarity_search_by_vector
|
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter on metadata properties, e.g.
{
"str_property": "foo",
"int_property": 123
}
brute_force: Whether to use brute force search. Defaults to False.
fraction_lists_to_search: Optional percentage of lists to search,
must be in range 0.0 and 1.0, exclusive.
If Node, uses service's default which is 0.05.
Returns:
List of Documents most similar to the query vector.
"""
tuples = self.similarity_search_with_score_by_vector(embedding, k, filter,
brute_force, fraction_lists_to_search, **kwargs)
return [i[0] for i in tuples]
|
def similarity_search_by_vector(self, embedding: List[float], k: int=
DEFAULT_TOP_K, filter: Optional[Dict[str, Any]]=None, brute_force: bool
=False, fraction_lists_to_search: Optional[float]=None, **kwargs: Any
) ->List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter on metadata properties, e.g.
{
"str_property": "foo",
"int_property": 123
}
brute_force: Whether to use brute force search. Defaults to False.
fraction_lists_to_search: Optional percentage of lists to search,
must be in range 0.0 and 1.0, exclusive.
If Node, uses service's default which is 0.05.
Returns:
List of Documents most similar to the query vector.
"""
tuples = self.similarity_search_with_score_by_vector(embedding, k,
filter, brute_force, fraction_lists_to_search, **kwargs)
return [i[0] for i in tuples]
|
Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter on metadata properties, e.g.
{
"str_property": "foo",
"int_property": 123
}
brute_force: Whether to use brute force search. Defaults to False.
fraction_lists_to_search: Optional percentage of lists to search,
must be in range 0.0 and 1.0, exclusive.
If Node, uses service's default which is 0.05.
Returns:
List of Documents most similar to the query vector.
|
config_with_context
|
"""Patch a runnable config with context getters and setters.
Args:
config: The runnable config.
steps: The runnable steps.
Returns:
The patched runnable config.
"""
return _config_with_context(config, steps, _setter, _getter, threading.Event)
|
def config_with_context(config: RunnableConfig, steps: List[Runnable]
) ->RunnableConfig:
"""Patch a runnable config with context getters and setters.
Args:
config: The runnable config.
steps: The runnable steps.
Returns:
The patched runnable config.
"""
return _config_with_context(config, steps, _setter, _getter, threading.
Event)
|
Patch a runnable config with context getters and setters.
Args:
config: The runnable config.
steps: The runnable steps.
Returns:
The patched runnable config.
|
_Attribute
|
self.dispatch(t.value)
if isinstance(t.value, ast.Constant) and isinstance(t.value.value, int):
self.write(' ')
self.write('.')
self.write(t.attr)
|
def _Attribute(self, t):
self.dispatch(t.value)
if isinstance(t.value, ast.Constant) and isinstance(t.value.value, int):
self.write(' ')
self.write('.')
self.write(t.attr)
| null |
_import_beam
|
from langchain_community.llms.beam import Beam
return Beam
|
def _import_beam() ->Any:
from langchain_community.llms.beam import Beam
return Beam
| null |
_restore_template_vars
|
"""Restore template variables replaced with placeholders to original values."""
if isinstance(obj, str):
for placeholder, value in placeholders.items():
obj = obj.replace(placeholder, f'{{{{{value}}}}}')
elif isinstance(obj, dict):
for key, value in obj.items():
obj[key] = self._restore_template_vars(value, placeholders)
elif isinstance(obj, list):
for i, item in enumerate(obj):
obj[i] = self._restore_template_vars(item, placeholders)
return obj
|
def _restore_template_vars(self, obj: Any, placeholders: Dict[str, str]) ->Any:
"""Restore template variables replaced with placeholders to original values."""
if isinstance(obj, str):
for placeholder, value in placeholders.items():
obj = obj.replace(placeholder, f'{{{{{value}}}}}')
elif isinstance(obj, dict):
for key, value in obj.items():
obj[key] = self._restore_template_vars(value, placeholders)
elif isinstance(obj, list):
for i, item in enumerate(obj):
obj[i] = self._restore_template_vars(item, placeholders)
return obj
|
Restore template variables replaced with placeholders to original values.
|
clear
|
"""Clear memory contents."""
self.chat_memory.clear()
|
def clear(self) ->None:
"""Clear memory contents."""
self.chat_memory.clear()
|
Clear memory contents.
|
_kwargs_post_fine_tune_request
|
"""Build the kwargs for the Post request, used by sync
Args:
prompt (str): prompt used in query
kwargs (dict): model kwargs in payload
Returns:
Dict[str, Union[str,dict]]: _description_
"""
_model_kwargs = self.model_kwargs or {}
_params = {**_model_kwargs, **kwargs}
multipliers = _params.get('multipliers', None)
return dict(url=f'{self.gradient_api_url}/models/{self.model_id}/fine-tune',
headers={'authorization': f'Bearer {self.gradient_access_token}',
'x-gradient-workspace-id': f'{self.gradient_workspace_id}', 'accept':
'application/json', 'content-type': 'application/json'}, json=dict(
samples=tuple({'inputs': input} for input in inputs) if multipliers is
None else tuple({'inputs': input, 'fineTuningParameters': {'multiplier':
multiplier}} for input, multiplier in zip(inputs, multipliers))))
|
def _kwargs_post_fine_tune_request(self, inputs: Sequence[str], kwargs:
Mapping[str, Any]) ->Mapping[str, Any]:
"""Build the kwargs for the Post request, used by sync
Args:
prompt (str): prompt used in query
kwargs (dict): model kwargs in payload
Returns:
Dict[str, Union[str,dict]]: _description_
"""
_model_kwargs = self.model_kwargs or {}
_params = {**_model_kwargs, **kwargs}
multipliers = _params.get('multipliers', None)
return dict(url=
f'{self.gradient_api_url}/models/{self.model_id}/fine-tune',
headers={'authorization': f'Bearer {self.gradient_access_token}',
'x-gradient-workspace-id': f'{self.gradient_workspace_id}',
'accept': 'application/json', 'content-type': 'application/json'},
json=dict(samples=tuple({'inputs': input} for input in inputs) if
multipliers is None else tuple({'inputs': input,
'fineTuningParameters': {'multiplier': multiplier}} for input,
multiplier in zip(inputs, multipliers))))
|
Build the kwargs for the Post request, used by sync
Args:
prompt (str): prompt used in query
kwargs (dict): model kwargs in payload
Returns:
Dict[str, Union[str,dict]]: _description_
|
_search
|
result = self.database.search(index=','.join(indices), body=query)
return str(result)
|
def _search(self, indices: List[str], query: str) ->str:
result = self.database.search(index=','.join(indices), body=query)
return str(result)
| null |
list_keys
|
"""List records in the database based on the provided filters.
Args:
before: Filter to list records updated before this time.
after: Filter to list records updated after this time.
group_ids: Filter to list records with specific group IDs.
limit: optional limit on the number of records to return.
Returns:
A list of keys for the matching records.
"""
|
@abstractmethod
def list_keys(self, *, before: Optional[float]=None, after: Optional[float]
=None, group_ids: Optional[Sequence[str]]=None, limit: Optional[int]=None
) ->List[str]:
"""List records in the database based on the provided filters.
Args:
before: Filter to list records updated before this time.
after: Filter to list records updated after this time.
group_ids: Filter to list records with specific group IDs.
limit: optional limit on the number of records to return.
Returns:
A list of keys for the matching records.
"""
|
List records in the database based on the provided filters.
Args:
before: Filter to list records updated before this time.
after: Filter to list records updated after this time.
group_ids: Filter to list records with specific group IDs.
limit: optional limit on the number of records to return.
Returns:
A list of keys for the matching records.
|
on_chain_end
|
"""Run when chain ends running."""
self.step += 1
self.chain_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update({'action': 'on_chain_end', 'outputs': outputs['output']})
resp.update(self.get_custom_callback_meta())
self.on_chain_end_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.run.log(resp)
|
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) ->None:
"""Run when chain ends running."""
self.step += 1
self.chain_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update({'action': 'on_chain_end', 'outputs': outputs['output']})
resp.update(self.get_custom_callback_meta())
self.on_chain_end_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.run.log(resp)
|
Run when chain ends running.
|
__init__
|
"""Initialize with Weaviate client."""
try:
import weaviate
except ImportError:
raise ImportError(
'Could not import weaviate python package. Please install it with `pip install weaviate-client`.'
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f'client should be an instance of weaviate.Client, got {type(client)}')
self._client = client
self._index_name = index_name
self._embedding = embedding
self._text_key = text_key
self._query_attrs = [self._text_key]
self.relevance_score_fn = relevance_score_fn
self._by_text = by_text
if attributes is not None:
self._query_attrs.extend(attributes)
|
def __init__(self, client: Any, index_name: str, text_key: str, embedding:
Optional[Embeddings]=None, attributes: Optional[List[str]]=None,
relevance_score_fn: Optional[Callable[[float], float]]=
_default_score_normalizer, by_text: bool=True):
"""Initialize with Weaviate client."""
try:
import weaviate
except ImportError:
raise ImportError(
'Could not import weaviate python package. Please install it with `pip install weaviate-client`.'
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f'client should be an instance of weaviate.Client, got {type(client)}'
)
self._client = client
self._index_name = index_name
self._embedding = embedding
self._text_key = text_key
self._query_attrs = [self._text_key]
self.relevance_score_fn = relevance_score_fn
self._by_text = by_text
if attributes is not None:
self._query_attrs.extend(attributes)
|
Initialize with Weaviate client.
|
test_chat_fireworks_multiple_completions
|
"""Test ChatFireworks wrapper with multiple completions."""
chat = ChatFireworks(model_kwargs={'n': 5})
message = HumanMessage(content='Hello')
response = chat._generate([message])
assert isinstance(response, ChatResult)
assert len(response.generations) == 5
for generation in response.generations:
assert isinstance(generation.message, BaseMessage)
assert isinstance(generation.message.content, str)
|
@pytest.mark.scheduled
def test_chat_fireworks_multiple_completions() ->None:
"""Test ChatFireworks wrapper with multiple completions."""
chat = ChatFireworks(model_kwargs={'n': 5})
message = HumanMessage(content='Hello')
response = chat._generate([message])
assert isinstance(response, ChatResult)
assert len(response.generations) == 5
for generation in response.generations:
assert isinstance(generation.message, BaseMessage)
assert isinstance(generation.message.content, str)
|
Test ChatFireworks wrapper with multiple completions.
|
_litm_reordering
|
"""Lost in the middle reorder: the less relevant documents will be at the
middle of the list and more relevant elements at beginning / end.
See: https://arxiv.org/abs//2307.03172"""
documents.reverse()
reordered_result = []
for i, value in enumerate(documents):
if i % 2 == 1:
reordered_result.append(value)
else:
reordered_result.insert(0, value)
return reordered_result
|
def _litm_reordering(documents: List[Document]) ->List[Document]:
"""Lost in the middle reorder: the less relevant documents will be at the
middle of the list and more relevant elements at beginning / end.
See: https://arxiv.org/abs//2307.03172"""
documents.reverse()
reordered_result = []
for i, value in enumerate(documents):
if i % 2 == 1:
reordered_result.append(value)
else:
reordered_result.insert(0, value)
return reordered_result
|
Lost in the middle reorder: the less relevant documents will be at the
middle of the list and more relevant elements at beginning / end.
See: https://arxiv.org/abs//2307.03172
|
_custom_parser
|
"""
The LLM response for `action_input` may be a multiline
string containing unescaped newlines, tabs or quotes. This function
replaces those characters with their escaped counterparts.
(newlines in JSON must be double-escaped: `\\n`)
"""
if isinstance(multiline_string, (bytes, bytearray)):
multiline_string = multiline_string.decode()
multiline_string = re.sub('("action_input"\\:\\s*")(.*)(")',
_replace_new_line, multiline_string, flags=re.DOTALL)
return multiline_string
|
def _custom_parser(multiline_string: str) ->str:
"""
The LLM response for `action_input` may be a multiline
string containing unescaped newlines, tabs or quotes. This function
replaces those characters with their escaped counterparts.
(newlines in JSON must be double-escaped: `\\n`)
"""
if isinstance(multiline_string, (bytes, bytearray)):
multiline_string = multiline_string.decode()
multiline_string = re.sub('("action_input"\\:\\s*")(.*)(")',
_replace_new_line, multiline_string, flags=re.DOTALL)
return multiline_string
|
The LLM response for `action_input` may be a multiline
string containing unescaped newlines, tabs or quotes. This function
replaces those characters with their escaped counterparts.
(newlines in JSON must be double-escaped: `\n`)
|
yield_keys
|
"""Yield keys in the store."""
if prefix:
pattern = self._get_prefixed_key(prefix)
else:
pattern = self._get_prefixed_key('*')
cursor, keys = self.client.scan(0, match=pattern)
for key in keys:
if self.namespace:
relative_key = key[len(self.namespace) + 1:]
yield relative_key
else:
yield key
while cursor != 0:
cursor, keys = self.client.scan(cursor, match=pattern)
for key in keys:
if self.namespace:
relative_key = key[len(self.namespace) + 1:]
yield relative_key
else:
yield key
|
def yield_keys(self, *, prefix: Optional[str]=None) ->Iterator[str]:
"""Yield keys in the store."""
if prefix:
pattern = self._get_prefixed_key(prefix)
else:
pattern = self._get_prefixed_key('*')
cursor, keys = self.client.scan(0, match=pattern)
for key in keys:
if self.namespace:
relative_key = key[len(self.namespace) + 1:]
yield relative_key
else:
yield key
while cursor != 0:
cursor, keys = self.client.scan(cursor, match=pattern)
for key in keys:
if self.namespace:
relative_key = key[len(self.namespace) + 1:]
yield relative_key
else:
yield key
|
Yield keys in the store.
|
test_opensearch_with_custom_field_name_appx_false
|
"""Test Approximate Search with custom field name appx true."""
text_input = ['add', 'test', 'text', 'method']
docsearch = OpenSearchVectorSearch.from_texts(text_input, FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL)
output = docsearch.similarity_search('add', k=1)
assert output == [Document(page_content='add')]
|
def test_opensearch_with_custom_field_name_appx_false() ->None:
"""Test Approximate Search with custom field name appx true."""
text_input = ['add', 'test', 'text', 'method']
docsearch = OpenSearchVectorSearch.from_texts(text_input,
FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL)
output = docsearch.similarity_search('add', k=1)
assert output == [Document(page_content='add')]
|
Test Approximate Search with custom field name appx true.
|
test_all
|
"""Use to catch obvious breaking changes."""
assert __all__ == sorted(__all__, key=str.lower)
assert __all__ == ['aindex', 'GraphIndexCreator', 'index', 'IndexingResult',
'SQLRecordManager', 'VectorstoreIndexCreator']
|
def test_all() ->None:
"""Use to catch obvious breaking changes."""
assert __all__ == sorted(__all__, key=str.lower)
assert __all__ == ['aindex', 'GraphIndexCreator', 'index',
'IndexingResult', 'SQLRecordManager', 'VectorstoreIndexCreator']
|
Use to catch obvious breaking changes.
|
set_meta
|
"""document and name are reserved arcee keys. Anything else is metadata"""
values['_is_meta'] = values.get('field_name') not in ['document', 'name']
return values
|
@root_validator()
def set_meta(cls, values: Dict) ->Dict:
"""document and name are reserved arcee keys. Anything else is metadata"""
values['_is_meta'] = values.get('field_name') not in ['document', 'name']
return values
|
document and name are reserved arcee keys. Anything else is metadata
|
test_trace_as_group_with_env_set
|
from langchain.chains.llm import LLMChain
os.environ['LANGCHAIN_TRACING_V2'] = 'true'
llm = OpenAI(temperature=0.9)
prompt = PromptTemplate(input_variables=['product'], template=
'What is a good name for a company that makes {product}?')
chain = LLMChain(llm=llm, prompt=prompt)
with trace_as_chain_group('my_group_env_set', inputs={'input': 'cars'}
) as group_manager:
chain.run(product='cars', callbacks=group_manager)
chain.run(product='computers', callbacks=group_manager)
final_res = chain.run(product='toys', callbacks=group_manager)
group_manager.on_chain_end({'output': final_res})
with trace_as_chain_group('my_group_2_env_set', inputs={'input': 'toys'}
) as group_manager:
final_res = chain.run(product='toys', callbacks=group_manager)
group_manager.on_chain_end({'output': final_res})
|
def test_trace_as_group_with_env_set() ->None:
from langchain.chains.llm import LLMChain
os.environ['LANGCHAIN_TRACING_V2'] = 'true'
llm = OpenAI(temperature=0.9)
prompt = PromptTemplate(input_variables=['product'], template=
'What is a good name for a company that makes {product}?')
chain = LLMChain(llm=llm, prompt=prompt)
with trace_as_chain_group('my_group_env_set', inputs={'input': 'cars'}
) as group_manager:
chain.run(product='cars', callbacks=group_manager)
chain.run(product='computers', callbacks=group_manager)
final_res = chain.run(product='toys', callbacks=group_manager)
group_manager.on_chain_end({'output': final_res})
with trace_as_chain_group('my_group_2_env_set', inputs={'input': 'toys'}
) as group_manager:
final_res = chain.run(product='toys', callbacks=group_manager)
group_manager.on_chain_end({'output': final_res})
| null |
_extract_fields
|
"""Grab the existing fields from the Collection"""
from transwarp_hippo_api.hippo_client import HippoTable
if isinstance(self.col, HippoTable):
schema = self.col.schema
logger.debug(f'[_extract_fields] schema:{schema}')
for x in schema:
self.fields.append(x.name)
logger.debug(f'04 [_extract_fields] fields:{self.fields}')
|
def _extract_fields(self) ->None:
"""Grab the existing fields from the Collection"""
from transwarp_hippo_api.hippo_client import HippoTable
if isinstance(self.col, HippoTable):
schema = self.col.schema
logger.debug(f'[_extract_fields] schema:{schema}')
for x in schema:
self.fields.append(x.name)
logger.debug(f'04 [_extract_fields] fields:{self.fields}')
|
Grab the existing fields from the Collection
|
test_ifixit_loader_device
|
web_path = 'https://www.ifixit.com/Device/Standard_iPad'
loader = IFixitLoader(web_path)
""" Teardowns are just guides by a different name """
assert loader.page_type == 'Device'
assert loader.id == 'Standard_iPad'
|
def test_ifixit_loader_device() ->None:
web_path = 'https://www.ifixit.com/Device/Standard_iPad'
loader = IFixitLoader(web_path)
""" Teardowns are just guides by a different name """
assert loader.page_type == 'Device'
assert loader.id == 'Standard_iPad'
| null |
_import_google_serper_tool_GoogleSerperRun
|
from langchain_community.tools.google_serper.tool import GoogleSerperRun
return GoogleSerperRun
|
def _import_google_serper_tool_GoogleSerperRun() ->Any:
from langchain_community.tools.google_serper.tool import GoogleSerperRun
return GoogleSerperRun
| null |
_import_file_management_DeleteFileTool
|
from langchain_community.tools.file_management import DeleteFileTool
return DeleteFileTool
|
def _import_file_management_DeleteFileTool() ->Any:
from langchain_community.tools.file_management import DeleteFileTool
return DeleteFileTool
| null |
_display_aggregate_results
|
if _is_jupyter_environment():
from IPython.display import HTML, display
display(HTML('<h3>Experiment Results:</h3>'))
display(aggregate_results)
else:
formatted_string = aggregate_results.to_string(float_format=lambda x:
f'{x:.2f}', justify='right')
print('\n Experiment Results:')
print(formatted_string)
|
def _display_aggregate_results(aggregate_results: pd.DataFrame) ->None:
if _is_jupyter_environment():
from IPython.display import HTML, display
display(HTML('<h3>Experiment Results:</h3>'))
display(aggregate_results)
else:
formatted_string = aggregate_results.to_string(float_format=lambda
x: f'{x:.2f}', justify='right')
print('\n Experiment Results:')
print(formatted_string)
| null |
test_visit_comparison
|
comparator, value, expected = triplet
actual = DEFAULT_TRANSLATOR.visit_comparison(Comparison(comparator=
comparator, attribute='foo', value=value))
assert expected == actual
|
@pytest.mark.parametrize('triplet', [(Comparator.EQ, 2, 'foo = 2'), (
Comparator.LT, 2, 'foo < 2'), (Comparator.LTE, 2, 'foo <= 2'), (
Comparator.GT, 2, 'foo > 2'), (Comparator.GTE, 2, 'foo >= 2'), (
Comparator.LIKE, 'bar', "foo LIKE '%bar%'")])
def test_visit_comparison(triplet: Tuple[Comparator, Any, str]) ->None:
comparator, value, expected = triplet
actual = DEFAULT_TRANSLATOR.visit_comparison(Comparison(comparator=
comparator, attribute='foo', value=value))
assert expected == actual
| null |
_run
|
"""Use the tool."""
query_params = {'text': query, 'language': self.language}
return self._call_eden_ai(query_params)
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the tool."""
query_params = {'text': query, 'language': self.language}
return self._call_eden_ai(query_params)
|
Use the tool.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.