method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
format_messages
"""Format messages from kwargs. Args: **kwargs: Keyword arguments to use for formatting. Returns: List of BaseMessage. """ value = kwargs.get(self.variable_name, []) if self.optional else kwargs[self .variable_name] if not isinstance(value, list): raise ValueError( f'variable {self.variable_name} should be a list of base messages, got {value}' ) for v in value: if not isinstance(v, BaseMessage): raise ValueError( f'variable {self.variable_name} should be a list of base messages, got {value}' ) return value
def format_messages(self, **kwargs: Any) ->List[BaseMessage]: """Format messages from kwargs. Args: **kwargs: Keyword arguments to use for formatting. Returns: List of BaseMessage. """ value = kwargs.get(self.variable_name, []) if self.optional else kwargs[ self.variable_name] if not isinstance(value, list): raise ValueError( f'variable {self.variable_name} should be a list of base messages, got {value}' ) for v in value: if not isinstance(v, BaseMessage): raise ValueError( f'variable {self.variable_name} should be a list of base messages, got {value}' ) return value
Format messages from kwargs. Args: **kwargs: Keyword arguments to use for formatting. Returns: List of BaseMessage.
_run
return f'{arg1} {arg2} {arg3}'
def _run(self, arg1: int, arg2: bool, arg3: Optional[dict]=None) ->str: return f'{arg1} {arg2} {arg3}'
null
lazy_load
"""Load Couchbase data into Document objects lazily.""" from datetime import timedelta self.cluster.wait_until_ready(timedelta(seconds=5)) result = self.cluster.query(self.query) for row in result: metadata_fields = self.metadata_fields page_content_fields = self.page_content_fields if not page_content_fields: page_content_fields = list(row.keys()) if not metadata_fields: metadata_fields = [] metadata = {field: row[field] for field in metadata_fields} document = '\n'.join(f'{k}: {v}' for k, v in row.items() if k in page_content_fields) yield Document(page_content=document, metadata=metadata)
def lazy_load(self) ->Iterator[Document]: """Load Couchbase data into Document objects lazily.""" from datetime import timedelta self.cluster.wait_until_ready(timedelta(seconds=5)) result = self.cluster.query(self.query) for row in result: metadata_fields = self.metadata_fields page_content_fields = self.page_content_fields if not page_content_fields: page_content_fields = list(row.keys()) if not metadata_fields: metadata_fields = [] metadata = {field: row[field] for field in metadata_fields} document = '\n'.join(f'{k}: {v}' for k, v in row.items() if k in page_content_fields) yield Document(page_content=document, metadata=metadata)
Load Couchbase data into Document objects lazily.
_run
mailbox = self.account.mailbox() if folder != '': mailbox = mailbox.get_folder(folder_name=folder) query = mailbox.q().search(query) messages = mailbox.get_messages(limit=max_results, query=query) output_messages = [] for message in messages: output_message = {} output_message['from'] = message.sender if truncate: output_message['body'] = message.body_preview[:truncate_limit] else: output_message['body'] = clean_body(message.body) output_message['subject'] = message.subject output_message['date'] = message.modified.strftime(UTC_FORMAT) output_message['to'] = [] for recipient in message.to._recipients: output_message['to'].append(str(recipient)) output_message['cc'] = [] for recipient in message.cc._recipients: output_message['cc'].append(str(recipient)) output_message['bcc'] = [] for recipient in message.bcc._recipients: output_message['bcc'].append(str(recipient)) output_messages.append(output_message) return output_messages
def _run(self, query: str, folder: str='', max_results: int=10, truncate: bool=True, run_manager: Optional[CallbackManagerForToolRun]=None, truncate_limit: int=150) ->List[Dict[str, Any]]: mailbox = self.account.mailbox() if folder != '': mailbox = mailbox.get_folder(folder_name=folder) query = mailbox.q().search(query) messages = mailbox.get_messages(limit=max_results, query=query) output_messages = [] for message in messages: output_message = {} output_message['from'] = message.sender if truncate: output_message['body'] = message.body_preview[:truncate_limit] else: output_message['body'] = clean_body(message.body) output_message['subject'] = message.subject output_message['date'] = message.modified.strftime(UTC_FORMAT) output_message['to'] = [] for recipient in message.to._recipients: output_message['to'].append(str(recipient)) output_message['cc'] = [] for recipient in message.cc._recipients: output_message['cc'].append(str(recipient)) output_message['bcc'] = [] for recipient in message.bcc._recipients: output_message['bcc'].append(str(recipient)) output_messages.append(output_message) return output_messages
null
get_num_tokens
"""Return number of tokens in text.""" return len(text.split())
def get_num_tokens(self, text: str) ->int: """Return number of tokens in text.""" return len(text.split())
Return number of tokens in text.
_config
config = ensure_config(config) if config.get('run_name') is None: try: run_name = callable.__name__ except AttributeError: run_name = None if run_name is not None: return patch_config(config, run_name=run_name) return config
def _config(self, config: Optional[RunnableConfig], callable: Callable[..., Any]) ->RunnableConfig: config = ensure_config(config) if config.get('run_name') is None: try: run_name = callable.__name__ except AttributeError: run_name = None if run_name is not None: return patch_config(config, run_name=run_name) return config
null
format_docs
return '\n\n'.join([d.page_content for d in docs])
def format_docs(docs): return '\n\n'.join([d.page_content for d in docs])
null
embed_documents
"""Call out to HuggingFaceHub's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ texts = [text.replace('\n', ' ') for text in texts] _model_kwargs = self.model_kwargs or {} responses = self.client.post(json={'inputs': texts, 'parameters': _model_kwargs}, task=self.task) return json.loads(responses.decode())
def embed_documents(self, texts: List[str]) ->List[List[float]]: """Call out to HuggingFaceHub's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ texts = [text.replace('\n', ' ') for text in texts] _model_kwargs = self.model_kwargs or {} responses = self.client.post(json={'inputs': texts, 'parameters': _model_kwargs}, task=self.task) return json.loads(responses.decode())
Call out to HuggingFaceHub's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text.
node_data
if isinstance(node.data, Runnable): try: data = str(node.data) if data.startswith('<') or data[0] != data[0].upper() or len(data. splitlines()) > 1: data = node.data.__class__.__name__ elif len(data) > 42: data = data[:42] + '...' except Exception: data = node.data.__class__.__name__ else: data = node.data.__name__ return data if not data.startswith('Runnable') else data[8:]
def node_data(node: Node) ->str: if isinstance(node.data, Runnable): try: data = str(node.data) if data.startswith('<') or data[0] != data[0].upper() or len(data .splitlines()) > 1: data = node.data.__class__.__name__ elif len(data) > 42: data = data[:42] + '...' except Exception: data = node.data.__class__.__name__ else: data = node.data.__name__ return data if not data.startswith('Runnable') else data[8:]
null
_JoinedStr
self.write('f') string = io.StringIO() self._fstring_JoinedStr(t, string.write) self.write(repr(string.getvalue()))
def _JoinedStr(self, t): self.write('f') string = io.StringIO() self._fstring_JoinedStr(t, string.write) self.write(repr(string.getvalue()))
null
knn_search
""" Perform a k-NN search on the Elasticsearch index. Args: query (str, optional): The query text to search for. k (int, optional): The number of nearest neighbors to return. query_vector (List[float], optional): The query vector to search for. model_id (str, optional): The ID of the model to use for transforming the query text into a vector. size (int, optional): The number of search results to return. source (bool, optional): Whether to return the source of the search results. fields (List[Mapping[str, Any]], optional): The fields to return in the search results. page_content (str, optional): The name of the field that contains the page content. Returns: A list of tuples, where each tuple contains a Document object and a score. """ if not source and (fields is None or not any(page_content in field for field in fields)): raise ValueError('If source=False `page_content` field must be in `fields`' ) knn_query_body = self._default_knn_query(query_vector=query_vector, query= query, model_id=model_id, k=k) response = self.client.search(index=self.index_name, knn=knn_query_body, size=size, source=source, fields=fields) hits = [hit for hit in response['hits']['hits']] docs_and_scores = [(Document(page_content=hit['_source'][page_content] if source else hit['fields'][page_content][0], metadata=hit['fields'] if fields else {}), hit['_score']) for hit in hits] return docs_and_scores
def knn_search(self, query: Optional[str]=None, k: Optional[int]=10, query_vector: Optional[List[float]]=None, model_id: Optional[str]=None, size: Optional[int]=10, source: Optional[bool]=True, fields: Optional[ Union[List[Mapping[str, Any]], Tuple[Mapping[str, Any], ...], None]]= None, page_content: Optional[str]='text') ->List[Tuple[Document, float]]: """ Perform a k-NN search on the Elasticsearch index. Args: query (str, optional): The query text to search for. k (int, optional): The number of nearest neighbors to return. query_vector (List[float], optional): The query vector to search for. model_id (str, optional): The ID of the model to use for transforming the query text into a vector. size (int, optional): The number of search results to return. source (bool, optional): Whether to return the source of the search results. fields (List[Mapping[str, Any]], optional): The fields to return in the search results. page_content (str, optional): The name of the field that contains the page content. Returns: A list of tuples, where each tuple contains a Document object and a score. """ if not source and (fields is None or not any(page_content in field for field in fields)): raise ValueError( 'If source=False `page_content` field must be in `fields`') knn_query_body = self._default_knn_query(query_vector=query_vector, query=query, model_id=model_id, k=k) response = self.client.search(index=self.index_name, knn=knn_query_body, size=size, source=source, fields=fields) hits = [hit for hit in response['hits']['hits']] docs_and_scores = [(Document(page_content=hit['_source'][page_content] if source else hit['fields'][page_content][0], metadata=hit['fields'] if fields else {}), hit['_score']) for hit in hits] return docs_and_scores
Perform a k-NN search on the Elasticsearch index. Args: query (str, optional): The query text to search for. k (int, optional): The number of nearest neighbors to return. query_vector (List[float], optional): The query vector to search for. model_id (str, optional): The ID of the model to use for transforming the query text into a vector. size (int, optional): The number of search results to return. source (bool, optional): Whether to return the source of the search results. fields (List[Mapping[str, Any]], optional): The fields to return in the search results. page_content (str, optional): The name of the field that contains the page content. Returns: A list of tuples, where each tuple contains a Document object and a score.
_invoke
assert isinstance(input, dict ), 'The input to RunnablePassthrough.assign() must be a dict.' return {**input, **self.mapper.invoke(input, patch_config(config, callbacks =run_manager.get_child()), **kwargs)}
def _invoke(self, input: Dict[str, Any], run_manager: CallbackManagerForChainRun, config: RunnableConfig, **kwargs: Any) ->Dict[ str, Any]: assert isinstance(input, dict ), 'The input to RunnablePassthrough.assign() must be a dict.' return {**input, **self.mapper.invoke(input, patch_config(config, callbacks=run_manager.get_child()), **kwargs)}
null
test_get_nfts_invalid_contract
contract_address = '0x111D4e82EA7eCA7F62c3fdf6D39A541be95Bf111' with pytest.raises(ValueError) as error_NoNfts: BlockchainDocumentLoader(contract_address).load() assert str(error_NoNfts.value ) == 'No NFTs found for contract address ' + contract_address
@pytest.mark.skipif(not alchemyKeySet, reason='Alchemy API key not provided.') def test_get_nfts_invalid_contract() ->None: contract_address = '0x111D4e82EA7eCA7F62c3fdf6D39A541be95Bf111' with pytest.raises(ValueError) as error_NoNfts: BlockchainDocumentLoader(contract_address).load() assert str(error_NoNfts.value ) == 'No NFTs found for contract address ' + contract_address
null
test_result_with_params_call
"""Test that call gives the correct answer with extra params.""" search = GoogleSearchAPIWrapper() output = search.results(query="What was Obama's first name?", num_results=5, search_params={'cr': 'us', 'safe': 'active'}) assert len(output)
def test_result_with_params_call() ->None: """Test that call gives the correct answer with extra params.""" search = GoogleSearchAPIWrapper() output = search.results(query="What was Obama's first name?", num_results=5, search_params={'cr': 'us', 'safe': 'active'}) assert len(output)
Test that call gives the correct answer with extra params.
_combine_llm_outputs
if llm_outputs[0] is None: return {} return llm_outputs[0]
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) ->dict: if llm_outputs[0] is None: return {} return llm_outputs[0]
null
__init__
try: import openllm except ImportError as e: raise ImportError( "Could not import openllm. Make sure to install it with 'pip install openllm.'" ) from e llm_kwargs = llm_kwargs or {} if server_url is not None: logger.debug("'server_url' is provided, returning a openllm.Client") assert model_id is None and model_name is None, "'server_url' and {'model_id', 'model_name'} are mutually exclusive" client_cls = (openllm.client.HTTPClient if server_type == 'http' else openllm.client.GrpcClient) client = client_cls(server_url) super().__init__(**{'server_url': server_url, 'server_type': server_type, 'llm_kwargs': llm_kwargs}) self._runner = None self._client = client else: assert model_name is not None, "Must provide 'model_name' or 'server_url'" runner = openllm.Runner(model_name=model_name, model_id=model_id, init_local=embedded, ensure_available=True, **llm_kwargs) super().__init__(**{'model_name': model_name, 'model_id': model_id, 'embedded': embedded, 'llm_kwargs': llm_kwargs}) self._client = None self._runner = runner
def __init__(self, model_name: Optional[str]=None, *, model_id: Optional[ str]=None, server_url: Optional[str]=None, server_type: Literal['grpc', 'http']='http', embedded: bool=True, **llm_kwargs: Any): try: import openllm except ImportError as e: raise ImportError( "Could not import openllm. Make sure to install it with 'pip install openllm.'" ) from e llm_kwargs = llm_kwargs or {} if server_url is not None: logger.debug("'server_url' is provided, returning a openllm.Client") assert model_id is None and model_name is None, "'server_url' and {'model_id', 'model_name'} are mutually exclusive" client_cls = (openllm.client.HTTPClient if server_type == 'http' else openllm.client.GrpcClient) client = client_cls(server_url) super().__init__(**{'server_url': server_url, 'server_type': server_type, 'llm_kwargs': llm_kwargs}) self._runner = None self._client = client else: assert model_name is not None, "Must provide 'model_name' or 'server_url'" runner = openllm.Runner(model_name=model_name, model_id=model_id, init_local=embedded, ensure_available=True, **llm_kwargs) super().__init__(**{'model_name': model_name, 'model_id': model_id, 'embedded': embedded, 'llm_kwargs': llm_kwargs}) self._client = None self._runner = runner
null
test_init_with_pipeline
"""Test initialization with a HF pipeline.""" from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline model_id = 'gpt2' tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, max_new_tokens=10) llm = HuggingFacePipeline(pipeline=pipe) output = llm('Say foo:') assert isinstance(output, str)
def test_init_with_pipeline() ->None: """Test initialization with a HF pipeline.""" from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline model_id = 'gpt2' tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, max_new_tokens=10) llm = HuggingFacePipeline(pipeline=pipe) output = llm('Say foo:') assert isinstance(output, str)
Test initialization with a HF pipeline.
_import_epsilla
from langchain_community.vectorstores.epsilla import Epsilla return Epsilla
def _import_epsilla() ->Any: from langchain_community.vectorstores.epsilla import Epsilla return Epsilla
null
test_add_documents
zep_vectorstore.add_documents(mock_documents) mock_collection.add_documents.assert_called_once_with( texts_metadatas_as_zep_documents)
@pytest.mark.requires('zep_python') def test_add_documents(zep_vectorstore: ZepVectorStore, mock_collection: 'DocumentCollection', mock_documents: List[Document], texts_metadatas_as_zep_documents: List['ZepDocument']) ->None: zep_vectorstore.add_documents(mock_documents) mock_collection.add_documents.assert_called_once_with( texts_metadatas_as_zep_documents)
null
_collect_test_results
wait_for_all_evaluators() all_eval_results = self._collect_metrics() results = self._merge_test_outputs(batch_results, all_eval_results) return TestResult(project_name=self.project.name, results=results)
def _collect_test_results(self, batch_results: List[Union[dict, str, LLMResult, ChatResult]]) ->TestResult: wait_for_all_evaluators() all_eval_results = self._collect_metrics() results = self._merge_test_outputs(batch_results, all_eval_results) return TestResult(project_name=self.project.name, results=results)
null
messages
"""Retrieve all session messages from DB""" message_blobs = self.blob_history.retrieve(self.session_id) items = [json.loads(message_blob) for message_blob in message_blobs] messages = messages_from_dict(items) return messages
@property def messages(self) ->List[BaseMessage]: """Retrieve all session messages from DB""" message_blobs = self.blob_history.retrieve(self.session_id) items = [json.loads(message_blob) for message_blob in message_blobs] messages = messages_from_dict(items) return messages
Retrieve all session messages from DB
delete_by_metadata
"""Delete by vector ID or other criteria. Args: ids: List of ids to delete. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ self.sync_client.delete_by_metadata(filter) return True
def delete_by_metadata(self, filter: Union[Dict[str, str], List[Dict[str, str]]], **kwargs: Any) ->Optional[bool]: """Delete by vector ID or other criteria. Args: ids: List of ids to delete. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ self.sync_client.delete_by_metadata(filter) return True
Delete by vector ID or other criteria. Args: ids: List of ids to delete. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented.
test_connect_file_rdf
""" Test loading online resource. """ berners_lee_card = 'http://www.w3.org/People/Berners-Lee/card' graph = RdfGraph(source_file=berners_lee_card, standard='rdf') query = """SELECT ?s ?p ?o WHERE { ?s ?p ?o }""" output = graph.query(query) assert len(output) == 86
def test_connect_file_rdf() ->None: """ Test loading online resource. """ berners_lee_card = 'http://www.w3.org/People/Berners-Lee/card' graph = RdfGraph(source_file=berners_lee_card, standard='rdf') query = 'SELECT ?s ?p ?o\nWHERE { ?s ?p ?o }' output = graph.query(query) assert len(output) == 86
Test loading online resource.
test_graph_cypher_qa_chain_prompt_selection_2
chain = GraphCypherQAChain.from_llm(llm=FakeLLM(), graph=FakeGraphStore(), verbose=True, return_intermediate_steps=False) assert chain.qa_chain.prompt == CYPHER_QA_PROMPT assert chain.cypher_generation_chain.prompt == CYPHER_GENERATION_PROMPT
def test_graph_cypher_qa_chain_prompt_selection_2() ->None: chain = GraphCypherQAChain.from_llm(llm=FakeLLM(), graph=FakeGraphStore (), verbose=True, return_intermediate_steps=False) assert chain.qa_chain.prompt == CYPHER_QA_PROMPT assert chain.cypher_generation_chain.prompt == CYPHER_GENERATION_PROMPT
null
requires_reference
"""Whether this evaluator requires a reference label.""" return False
@property def requires_reference(self) ->bool: """Whether this evaluator requires a reference label.""" return False
Whether this evaluator requires a reference label.
_get_news_api
news_api_key = kwargs['news_api_key'] chain = APIChain.from_llm_and_api_docs(llm, news_docs.NEWS_DOCS, headers={ 'X-Api-Key': news_api_key}, limit_to_domains=['https://newsapi.org/']) return Tool(name='News-API', description= 'Use this when you want to get information about the top headlines of current news stories. The input should be a question in natural language that this API can answer.' , func=chain.run)
def _get_news_api(llm: BaseLanguageModel, **kwargs: Any) ->BaseTool: news_api_key = kwargs['news_api_key'] chain = APIChain.from_llm_and_api_docs(llm, news_docs.NEWS_DOCS, headers={'X-Api-Key': news_api_key}, limit_to_domains=[ 'https://newsapi.org/']) return Tool(name='News-API', description= 'Use this when you want to get information about the top headlines of current news stories. The input should be a question in natural language that this API can answer.' , func=chain.run)
null
get_schema
"""Returns the schema of the Graph database""" pass
@property @abstractmethod def get_schema(self) ->str: """Returns the schema of the Graph database""" pass
Returns the schema of the Graph database
_load_vector_db_qa_with_sources_chain
if 'vectorstore' in kwargs: vectorstore = kwargs.pop('vectorstore') else: raise ValueError('`vectorstore` must be present.') if 'combine_documents_chain' in config: combine_documents_chain_config = config.pop('combine_documents_chain') combine_documents_chain = load_chain_from_config( combine_documents_chain_config) elif 'combine_documents_chain_path' in config: combine_documents_chain = load_chain(config.pop( 'combine_documents_chain_path')) else: raise ValueError( 'One of `combine_documents_chain` or `combine_documents_chain_path` must be present.' ) return VectorDBQAWithSourcesChain(combine_documents_chain= combine_documents_chain, vectorstore=vectorstore, **config)
def _load_vector_db_qa_with_sources_chain(config: dict, **kwargs: Any ) ->VectorDBQAWithSourcesChain: if 'vectorstore' in kwargs: vectorstore = kwargs.pop('vectorstore') else: raise ValueError('`vectorstore` must be present.') if 'combine_documents_chain' in config: combine_documents_chain_config = config.pop('combine_documents_chain') combine_documents_chain = load_chain_from_config( combine_documents_chain_config) elif 'combine_documents_chain_path' in config: combine_documents_chain = load_chain(config.pop( 'combine_documents_chain_path')) else: raise ValueError( 'One of `combine_documents_chain` or `combine_documents_chain_path` must be present.' ) return VectorDBQAWithSourcesChain(combine_documents_chain= combine_documents_chain, vectorstore=vectorstore, **config)
null
test_get_relevant_documents
query = 'Test query' relevant_documents = time_weighted_retriever.get_relevant_documents(query) want = [(doc, 0.5) for doc in _get_example_memories()] assert isinstance(relevant_documents, list) assert len(relevant_documents) == len(want) now = datetime.now() for doc in relevant_documents: assert now - timedelta(hours=1) < doc.metadata['last_accessed_at'] <= now for d in time_weighted_retriever.memory_stream: assert now - timedelta(hours=1) < d.metadata['last_accessed_at'] <= now
def test_get_relevant_documents(time_weighted_retriever: TimeWeightedVectorStoreRetriever) ->None: query = 'Test query' relevant_documents = time_weighted_retriever.get_relevant_documents(query) want = [(doc, 0.5) for doc in _get_example_memories()] assert isinstance(relevant_documents, list) assert len(relevant_documents) == len(want) now = datetime.now() for doc in relevant_documents: assert now - timedelta(hours=1) < doc.metadata['last_accessed_at' ] <= now for d in time_weighted_retriever.memory_stream: assert now - timedelta(hours=1) < d.metadata['last_accessed_at'] <= now
null
parse_from_results
try: from google.cloud.documentai_toolbox.utilities.gcs_utilities import split_gcs_uri from google.cloud.documentai_toolbox.wrappers.document import _get_shards from google.cloud.documentai_toolbox.wrappers.page import _text_from_layout except ImportError as exc: raise ImportError( 'documentai_toolbox package not found, please install it with `pip install google-cloud-documentai-toolbox`' ) from exc for result in results: gcs_bucket_name, gcs_prefix = split_gcs_uri(result.parsed_path) shards = _get_shards(gcs_bucket_name, gcs_prefix) yield from (Document(page_content=_text_from_layout(page.layout, shard. text), metadata={'page': page.page_number, 'source': result. source_path}) for shard in shards for page in shard.pages)
def parse_from_results(self, results: List[DocAIParsingResults]) ->Iterator[ Document]: try: from google.cloud.documentai_toolbox.utilities.gcs_utilities import split_gcs_uri from google.cloud.documentai_toolbox.wrappers.document import _get_shards from google.cloud.documentai_toolbox.wrappers.page import _text_from_layout except ImportError as exc: raise ImportError( 'documentai_toolbox package not found, please install it with `pip install google-cloud-documentai-toolbox`' ) from exc for result in results: gcs_bucket_name, gcs_prefix = split_gcs_uri(result.parsed_path) shards = _get_shards(gcs_bucket_name, gcs_prefix) yield from (Document(page_content=_text_from_layout(page.layout, shard.text), metadata={'page': page.page_number, 'source': result.source_path}) for shard in shards for page in shard.pages)
null
test_get_validated_relative_path
"""Safely resolve a path.""" root = Path(__file__).parent user_path = 'data/sub/file.txt' expected = root / user_path result = get_validated_relative_path(root, user_path) assert result == expected
def test_get_validated_relative_path() ->None: """Safely resolve a path.""" root = Path(__file__).parent user_path = 'data/sub/file.txt' expected = root / user_path result = get_validated_relative_path(root, user_path) assert result == expected
Safely resolve a path.
_get_provider
return self.model_id.split('.')[0]
def _get_provider(self) ->str: return self.model_id.split('.')[0]
null
_get_serpapi
return Tool(name='Search', description= 'A search engine. Useful for when you need to answer questions about current events. Input should be a search query.' , func=SerpAPIWrapper(**kwargs).run, coroutine=SerpAPIWrapper(**kwargs) .arun)
def _get_serpapi(**kwargs: Any) ->BaseTool: return Tool(name='Search', description= 'A search engine. Useful for when you need to answer questions about current events. Input should be a search query.' , func=SerpAPIWrapper(**kwargs).run, coroutine=SerpAPIWrapper(** kwargs).arun)
null
_streaming_response_template
return {'choices': [{'text': '', 'finish_reason': None, 'logprobs': None}]}
def _streaming_response_template() ->Dict[str, Any]: return {'choices': [{'text': '', 'finish_reason': None, 'logprobs': None}]}
null
_formatted_page_summary
return f"""Page: {page_title} Summary: {wiki_page.summary}"""
@staticmethod def _formatted_page_summary(page_title: str, wiki_page: Any) ->Optional[str]: return f'Page: {page_title}\nSummary: {wiki_page.summary}'
null
_llm_type
"""Return type of LLM.""" return 'cloudflare'
@property def _llm_type(self) ->str: """Return type of LLM.""" return 'cloudflare'
Return type of LLM.
__init__
""" Initialize with a file path. Args: file_path: The path to the file to load. mode: The mode to use for partitioning. See unstructured for details. Defaults to "single". **unstructured_kwargs: Additional keyword arguments to pass to unstructured. """ min_unstructured_version = '0.5.12' if not satisfies_min_unstructured_version(min_unstructured_version): raise ValueError( f'Partitioning rtf files is only supported in unstructured>={min_unstructured_version}.' ) super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def __init__(self, file_path: str, mode: str='single', ** unstructured_kwargs: Any): """ Initialize with a file path. Args: file_path: The path to the file to load. mode: The mode to use for partitioning. See unstructured for details. Defaults to "single". **unstructured_kwargs: Additional keyword arguments to pass to unstructured. """ min_unstructured_version = '0.5.12' if not satisfies_min_unstructured_version(min_unstructured_version): raise ValueError( f'Partitioning rtf files is only supported in unstructured>={min_unstructured_version}.' ) super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
Initialize with a file path. Args: file_path: The path to the file to load. mode: The mode to use for partitioning. See unstructured for details. Defaults to "single". **unstructured_kwargs: Additional keyword arguments to pass to unstructured.
on_tool_start
self.on_tool_start_common()
def on_tool_start(self, *args: Any, **kwargs: Any) ->Any: self.on_tool_start_common()
null
add_message
"""Add a self-created message to the store""" self.messages.append(message)
def add_message(self, message: BaseMessage) ->None: """Add a self-created message to the store""" self.messages.append(message)
Add a self-created message to the store
raise_value_error
"""Raise a value error.""" raise ValueError('x is too large')
def raise_value_error(x: int) ->int: """Raise a value error.""" raise ValueError('x is too large')
Raise a value error.
_import_ainetwork_owner
from langchain_community.tools.ainetwork.owner import AINOwnerOps return AINOwnerOps
def _import_ainetwork_owner() ->Any: from langchain_community.tools.ainetwork.owner import AINOwnerOps return AINOwnerOps
null
create_citation_fuzzy_match_chain
"""Create a citation fuzzy match chain. Args: llm: Language model to use for the chain. Returns: Chain (LLMChain) that can be used to answer questions with citations. """ output_parser = PydanticOutputFunctionsParser(pydantic_schema=QuestionAnswer) schema = QuestionAnswer.schema() function = {'name': schema['title'], 'description': schema['description'], 'parameters': schema} llm_kwargs = get_llm_kwargs(function) messages = [SystemMessage(content= 'You are a world class algorithm to answer questions with correct and exact citations.' ), HumanMessage(content='Answer question using the following context'), HumanMessagePromptTemplate.from_template('{context}'), HumanMessagePromptTemplate.from_template('Question: {question}'), HumanMessage(content= 'Tips: Make sure to cite your sources, and use the exact words from the context.' )] prompt = ChatPromptTemplate(messages=messages) chain = LLMChain(llm=llm, prompt=prompt, llm_kwargs=llm_kwargs, output_parser=output_parser) return chain
def create_citation_fuzzy_match_chain(llm: BaseLanguageModel) ->LLMChain: """Create a citation fuzzy match chain. Args: llm: Language model to use for the chain. Returns: Chain (LLMChain) that can be used to answer questions with citations. """ output_parser = PydanticOutputFunctionsParser(pydantic_schema= QuestionAnswer) schema = QuestionAnswer.schema() function = {'name': schema['title'], 'description': schema[ 'description'], 'parameters': schema} llm_kwargs = get_llm_kwargs(function) messages = [SystemMessage(content= 'You are a world class algorithm to answer questions with correct and exact citations.' ), HumanMessage(content= 'Answer question using the following context'), HumanMessagePromptTemplate.from_template('{context}'), HumanMessagePromptTemplate.from_template('Question: {question}'), HumanMessage(content= 'Tips: Make sure to cite your sources, and use the exact words from the context.' )] prompt = ChatPromptTemplate(messages=messages) chain = LLMChain(llm=llm, prompt=prompt, llm_kwargs=llm_kwargs, output_parser=output_parser) return chain
Create a citation fuzzy match chain. Args: llm: Language model to use for the chain. Returns: Chain (LLMChain) that can be used to answer questions with citations.
_run
"""Run the tool.""" return self.requests_wrapper.delete(_clean_url(url))
def _run(self, url: str, run_manager: Optional[CallbackManagerForToolRun]=None ) ->str: """Run the tool.""" return self.requests_wrapper.delete(_clean_url(url))
Run the tool.
test_load_file_pattern
"""Test that returns no documents when json file pattern specified.""" loader = BibtexLoader(file_path=str(BIBTEX_EXAMPLE_FILE), file_pattern= '[^:]+\\.json') docs = loader.load() assert len(docs) == 0
@pytest.mark.requires('fitz', 'bibtexparser') def test_load_file_pattern() ->None: """Test that returns no documents when json file pattern specified.""" loader = BibtexLoader(file_path=str(BIBTEX_EXAMPLE_FILE), file_pattern= '[^:]+\\.json') docs = loader.load() assert len(docs) == 0
Test that returns no documents when json file pattern specified.
test_chat_openai_invalid_streaming_params
"""Test that streaming correctly invokes on_llm_new_token callback.""" with pytest.raises(ValueError): ChatOpenAI(max_tokens=10, streaming=True, temperature=0, n=5)
def test_chat_openai_invalid_streaming_params() ->None: """Test that streaming correctly invokes on_llm_new_token callback.""" with pytest.raises(ValueError): ChatOpenAI(max_tokens=10, streaming=True, temperature=0, n=5)
Test that streaming correctly invokes on_llm_new_token callback.
embed_query
"""Call out to Gradient's embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ query = (f'{self.query_prompt_for_retrieval} {text}' if self. query_prompt_for_retrieval else text) return self.embed_documents([query])[0]
def embed_query(self, text: str) ->List[float]: """Call out to Gradient's embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ query = (f'{self.query_prompt_for_retrieval} {text}' if self. query_prompt_for_retrieval else text) return self.embed_documents([query])[0]
Call out to Gradient's embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text.
visit_comparison
value_type = 'valueText' value = comparison.value if isinstance(comparison.value, bool): value_type = 'valueBoolean' elif isinstance(comparison.value, float): value_type = 'valueNumber' elif isinstance(comparison.value, int): value_type = 'valueInt' elif isinstance(comparison.value, dict) and comparison.value.get('type' ) == 'date': value_type = 'valueDate' date = datetime.strptime(comparison.value['date'], '%Y-%m-%d') value = date.strftime('%Y-%m-%dT%H:%M:%SZ') filter = {'path': [comparison.attribute], 'operator': self._format_func( comparison.comparator), value_type: value} return filter
def visit_comparison(self, comparison: Comparison) ->Dict: value_type = 'valueText' value = comparison.value if isinstance(comparison.value, bool): value_type = 'valueBoolean' elif isinstance(comparison.value, float): value_type = 'valueNumber' elif isinstance(comparison.value, int): value_type = 'valueInt' elif isinstance(comparison.value, dict) and comparison.value.get('type' ) == 'date': value_type = 'valueDate' date = datetime.strptime(comparison.value['date'], '%Y-%m-%d') value = date.strftime('%Y-%m-%dT%H:%M:%SZ') filter = {'path': [comparison.attribute], 'operator': self._format_func (comparison.comparator), value_type: value} return filter
null
visit_comparison
comparator = self._format_func(comparison.comparator) processed_value = process_value(comparison.value) attribute = comparison.attribute return '( ' + attribute + ' ' + comparator + ' ' + processed_value + ' )'
def visit_comparison(self, comparison: Comparison) ->str: comparator = self._format_func(comparison.comparator) processed_value = process_value(comparison.value) attribute = comparison.attribute return '( ' + attribute + ' ' + comparator + ' ' + processed_value + ' )'
null
build_extra
"""Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get('model_kwargs', {}) for field_name in list(values): if field_name in extra: raise ValueError(f'Found {field_name} supplied twice.') if field_name not in all_required_field_names: warnings.warn( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f'Parameters {invalid_model_kwargs} should be specified explicitly. Instead they were passed in as part of `model_kwargs` parameter.' ) values['model_kwargs'] = extra return values
@root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) ->Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get('model_kwargs', {}) for field_name in list(values): if field_name in extra: raise ValueError(f'Found {field_name} supplied twice.') if field_name not in all_required_field_names: warnings.warn( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f'Parameters {invalid_model_kwargs} should be specified explicitly. Instead they were passed in as part of `model_kwargs` parameter.' ) values['model_kwargs'] = extra return values
Build extra kwargs from additional params that were passed in.
test_message_prompt_template_from_template_file
expected = ChatMessagePromptTemplate(prompt=PromptTemplate(template= """Question: {question} Answer:""", input_variables=['question']), role ='human') actual = ChatMessagePromptTemplate.from_template_file(Path(__file__).parent .parent / 'data' / 'prompt_file.txt', ['question'], role='human') assert expected == actual
def test_message_prompt_template_from_template_file() ->None: expected = ChatMessagePromptTemplate(prompt=PromptTemplate(template= """Question: {question} Answer:""", input_variables=['question']), role='human') actual = ChatMessagePromptTemplate.from_template_file(Path(__file__). parent.parent / 'data' / 'prompt_file.txt', ['question'], role='human') assert expected == actual
null
_moderate
if results['flagged']: error_str = "Text was found that violates OpenAI's content policy." if self.error: raise ValueError(error_str) else: return error_str return text
def _moderate(self, text: str, results: dict) ->str: if results['flagged']: error_str = "Text was found that violates OpenAI's content policy." if self.error: raise ValueError(error_str) else: return error_str return text
null
load_memory_variables
"""Return history buffer.""" entities = self._get_current_entities(inputs) summary_strings = [] for entity in entities: knowledge = self.kg.get_entity_knowledge(entity) if knowledge: summary = f"On {entity}: {'. '.join(knowledge)}." summary_strings.append(summary) context: Union[str, List] if not summary_strings: context = [] if self.return_messages else '' elif self.return_messages: context = [self.summary_message_cls(content=text) for text in summary_strings] else: context = '\n'.join(summary_strings) return {self.memory_key: context}
def load_memory_variables(self, inputs: Dict[str, Any]) ->Dict[str, Any]: """Return history buffer.""" entities = self._get_current_entities(inputs) summary_strings = [] for entity in entities: knowledge = self.kg.get_entity_knowledge(entity) if knowledge: summary = f"On {entity}: {'. '.join(knowledge)}." summary_strings.append(summary) context: Union[str, List] if not summary_strings: context = [] if self.return_messages else '' elif self.return_messages: context = [self.summary_message_cls(content=text) for text in summary_strings] else: context = '\n'.join(summary_strings) return {self.memory_key: context}
Return history buffer.
test_build_query_sql_with_where
vector = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0] q_str = self.rockset_vectorstore._build_query_sql(vector, Rockset. DistanceFunction.COSINE_SIM, 4, 'age >= 10') vector_str = ','.join(map(str, vector)) expected = f"""SELECT * EXCEPT({embedding_key}), COSINE_SIM({embedding_key}, [{vector_str}]) as dist FROM {workspace}.{collection_name} WHERE age >= 10 ORDER BY dist DESC LIMIT 4 """ assert q_str == expected
def test_build_query_sql_with_where(self) ->None: vector = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0] q_str = self.rockset_vectorstore._build_query_sql(vector, Rockset. DistanceFunction.COSINE_SIM, 4, 'age >= 10') vector_str = ','.join(map(str, vector)) expected = f"""SELECT * EXCEPT({embedding_key}), COSINE_SIM({embedding_key}, [{vector_str}]) as dist FROM {workspace}.{collection_name} WHERE age >= 10 ORDER BY dist DESC LIMIT 4 """ assert q_str == expected
null
load_messages
"""Retrieve the messages from Firestore""" if not self._document: raise ValueError('Document not initialized') doc = self._document.get() if doc.exists: data = doc.to_dict() if 'messages' in data and len(data['messages']) > 0: self.messages = messages_from_dict(data['messages'])
def load_messages(self) ->None: """Retrieve the messages from Firestore""" if not self._document: raise ValueError('Document not initialized') doc = self._document.get() if doc.exists: data = doc.to_dict() if 'messages' in data and len(data['messages']) > 0: self.messages = messages_from_dict(data['messages'])
Retrieve the messages from Firestore
input_keys
"""Expect input key. :meta private: """ return [self.input_docs_key, self.question_key]
@property def input_keys(self) ->List[str]: """Expect input key. :meta private: """ return [self.input_docs_key, self.question_key]
Expect input key. :meta private:
embed_query
"""Get embeddings for a single text. Args: text: The text to get embeddings for. Returns: List of embeddings. """ return self.embed_documents([text])[0]
def embed_query(self, text: str) ->List[float]: """Get embeddings for a single text. Args: text: The text to get embeddings for. Returns: List of embeddings. """ return self.embed_documents([text])[0]
Get embeddings for a single text. Args: text: The text to get embeddings for. Returns: List of embeddings.
ignore_retriever
"""Whether to ignore retriever callbacks.""" return self.ignore_retriever_
@property def ignore_retriever(self) ->bool: """Whether to ignore retriever callbacks.""" return self.ignore_retriever_
Whether to ignore retriever callbacks.
_format_func
map_dict = {Operator.AND: ' and ', Operator.OR: ' or ', Comparator.EQ: '=', Comparator.NE: '!=', Comparator.GT: '>', Comparator.GTE: '>=', Comparator.LT: '<', Comparator.LTE: '<='} self._validate_func(func) return map_dict[func]
def _format_func(self, func: Union[Operator, Comparator]) ->str: map_dict = {Operator.AND: ' and ', Operator.OR: ' or ', Comparator.EQ: '=', Comparator.NE: '!=', Comparator.GT: '>', Comparator.GTE: '>=', Comparator.LT: '<', Comparator.LTE: '<='} self._validate_func(func) return map_dict[func]
null
OutputType
"""The type of output this runnable produces specified as a type annotation.""" for cls in self.__class__.__orig_bases__: type_args = get_args(cls) if type_args and len(type_args) == 2: return type_args[1] raise TypeError( f"Runnable {self.get_name()} doesn't have an inferable OutputType. Override the OutputType property to specify the output type." )
@property def OutputType(self) ->Type[Output]: """The type of output this runnable produces specified as a type annotation.""" for cls in self.__class__.__orig_bases__: type_args = get_args(cls) if type_args and len(type_args) == 2: return type_args[1] raise TypeError( f"Runnable {self.get_name()} doesn't have an inferable OutputType. Override the OutputType property to specify the output type." )
The type of output this runnable produces specified as a type annotation.
embed_query
"""Embed a query using a Ollama deployed embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = f'{self.query_instruction}{text}' embedding = self._embed([instruction_pair])[0] return embedding
def embed_query(self, text: str) ->List[float]: """Embed a query using a Ollama deployed embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = f'{self.query_instruction}{text}' embedding = self._embed([instruction_pair])[0] return embedding
Embed a query using a Ollama deployed embedding model. Args: text: The text to embed. Returns: Embeddings for the text.
test_complete_text_and_metadata
""" Test loading a board cards with all metadata. """ from bs4 import BeautifulSoup trello_loader = TrelloLoader.from_credentials('QA', api_key='API_KEY', token='API_TOKEN') documents = trello_loader.load() self.assertEqual(len(documents), len(TRELLO_CARDS_QA), 'Card count matches.') soup = BeautifulSoup(documents[0].page_content, 'html.parser') self.assertTrue(len(soup.find_all()) == 0, 'There is not markup in Closed Card document content.') texts = ['Closed Card Title', 'This is the description of Closed Card.', 'Checklist 1', 'Item 1:pending', 'This is a comment on a Closed Card.'] for text in texts: self.assertTrue(text in documents[0].page_content) self.assertEqual(documents[0].metadata, {'title': 'Closed Card Title', 'id': '12350aca6952888df7975903', 'url': 'https://trello.com/card/12350aca6952888df7975903', 'labels': [], 'list': 'Done', 'closed': True, 'due_date': ''}, 'Metadata of Closed Card Matches.') soup = BeautifulSoup(documents[1].page_content, 'html.parser') self.assertTrue(len(soup.find_all()) == 0, 'There is not markup in Card 2 document content.') texts = ['Card 2', 'This is the description of Card 2.'] for text in texts: self.assertTrue(text in documents[1].page_content) self.assertEqual(documents[1].metadata, {'title': 'Card 2', 'id': '45650aca6952888df7975903', 'url': 'https://trello.com/card/45650aca6952888df7975903', 'labels': ['Medium', 'Task'], 'list': 'In Progress', 'closed': False, 'due_date': ''}, 'Metadata of Card 2 Matches.') soup = BeautifulSoup(documents[2].page_content, 'html.parser') self.assertTrue(len(soup.find_all()) == 0, 'There is not markup in Card 2 document content.') texts = ['Camera', 'camera limit to stage size:complete', "Use 'Impulse' Cinemachine feature for camera shake."] for text in texts: self.assertTrue(text in documents[2].page_content, text + ' is present.') self.assertEqual(documents[2].metadata, {'title': 'Camera', 'id': '55550aca6952888df7975903', 'url': 'https://trello.com/card/55550aca6952888df7975903', 'labels': ['Task'], 'list': 'Selected for Milestone', 'closed': False, 'due_date': ''}, 'Metadata of Camera Card matches.')
def test_complete_text_and_metadata(self) ->None: """ Test loading a board cards with all metadata. """ from bs4 import BeautifulSoup trello_loader = TrelloLoader.from_credentials('QA', api_key='API_KEY', token='API_TOKEN') documents = trello_loader.load() self.assertEqual(len(documents), len(TRELLO_CARDS_QA), 'Card count matches.') soup = BeautifulSoup(documents[0].page_content, 'html.parser') self.assertTrue(len(soup.find_all()) == 0, 'There is not markup in Closed Card document content.') texts = ['Closed Card Title', 'This is the description of Closed Card.', 'Checklist 1', 'Item 1:pending', 'This is a comment on a Closed Card.'] for text in texts: self.assertTrue(text in documents[0].page_content) self.assertEqual(documents[0].metadata, {'title': 'Closed Card Title', 'id': '12350aca6952888df7975903', 'url': 'https://trello.com/card/12350aca6952888df7975903', 'labels': [], 'list': 'Done', 'closed': True, 'due_date': ''}, 'Metadata of Closed Card Matches.') soup = BeautifulSoup(documents[1].page_content, 'html.parser') self.assertTrue(len(soup.find_all()) == 0, 'There is not markup in Card 2 document content.') texts = ['Card 2', 'This is the description of Card 2.'] for text in texts: self.assertTrue(text in documents[1].page_content) self.assertEqual(documents[1].metadata, {'title': 'Card 2', 'id': '45650aca6952888df7975903', 'url': 'https://trello.com/card/45650aca6952888df7975903', 'labels': [ 'Medium', 'Task'], 'list': 'In Progress', 'closed': False, 'due_date': ''}, 'Metadata of Card 2 Matches.') soup = BeautifulSoup(documents[2].page_content, 'html.parser') self.assertTrue(len(soup.find_all()) == 0, 'There is not markup in Card 2 document content.') texts = ['Camera', 'camera limit to stage size:complete', "Use 'Impulse' Cinemachine feature for camera shake."] for text in texts: self.assertTrue(text in documents[2].page_content, text + ' is present.') self.assertEqual(documents[2].metadata, {'title': 'Camera', 'id': '55550aca6952888df7975903', 'url': 'https://trello.com/card/55550aca6952888df7975903', 'labels': [ 'Task'], 'list': 'Selected for Milestone', 'closed': False, 'due_date': ''}, 'Metadata of Camera Card matches.')
Test loading a board cards with all metadata.
test_edenai_call
"""Test simple call to edenai's image moderation endpoint.""" image_moderation = EdenAiExplicitImageTool(providers=['amazon']) output = image_moderation('https://static.javatpoint.com/images/objects.jpg') assert image_moderation.name == 'edenai_image_explicit_content_detection' assert image_moderation.feature == 'image' assert image_moderation.subfeature == 'explicit_content' assert isinstance(output, str)
def test_edenai_call() ->None: """Test simple call to edenai's image moderation endpoint.""" image_moderation = EdenAiExplicitImageTool(providers=['amazon']) output = image_moderation( 'https://static.javatpoint.com/images/objects.jpg') assert image_moderation.name == 'edenai_image_explicit_content_detection' assert image_moderation.feature == 'image' assert image_moderation.subfeature == 'explicit_content' assert isinstance(output, str)
Test simple call to edenai's image moderation endpoint.
_get_schema
"""Get the schema for a table.""" try: result = self.run( f'EVALUATE TOPN({self.sample_rows_in_table_info}, {table})') self.schemas[table] = json_to_md(result['results'][0]['tables'][0]['rows']) except Timeout: logger.warning('Timeout while getting table info for %s', table) self.schemas[table] = 'unknown' except Exception as exc: logger.warning('Error while getting table info for %s: %s', table, exc) self.schemas[table] = 'unknown'
def _get_schema(self, table: str) ->None: """Get the schema for a table.""" try: result = self.run( f'EVALUATE TOPN({self.sample_rows_in_table_info}, {table})') self.schemas[table] = json_to_md(result['results'][0]['tables'][0][ 'rows']) except Timeout: logger.warning('Timeout while getting table info for %s', table) self.schemas[table] = 'unknown' except Exception as exc: logger.warning('Error while getting table info for %s: %s', table, exc) self.schemas[table] = 'unknown'
Get the schema for a table.
__init__
"""initialize the SemaDB vector store.""" self.collection_name = collection_name self.vector_size = vector_size self.api_key = api_key or get_from_env('api_key', 'SEMADB_API_KEY') self._embedding = embedding self.distance_strategy = distance_strategy
def __init__(self, collection_name: str, vector_size: int, embedding: Embeddings, distance_strategy: DistanceStrategy=DistanceStrategy. EUCLIDEAN_DISTANCE, api_key: str=''): """initialize the SemaDB vector store.""" self.collection_name = collection_name self.vector_size = vector_size self.api_key = api_key or get_from_env('api_key', 'SEMADB_API_KEY') self._embedding = embedding self.distance_strategy = distance_strategy
initialize the SemaDB vector store.
test_tensorflowhub_embedding_query
"""Test tensorflowhub embeddings.""" document = 'foo bar' embedding = TensorflowHubEmbeddings() output = embedding.embed_query(document) assert len(output) == 512
def test_tensorflowhub_embedding_query() ->None: """Test tensorflowhub embeddings.""" document = 'foo bar' embedding = TensorflowHubEmbeddings() output = embedding.embed_query(document) assert len(output) == 512
Test tensorflowhub embeddings.
load_dataset
"""Load a dataset from the `LangChainDatasets on HuggingFace <https://huggingface.co/LangChainDatasets>`_. Args: uri: The uri of the dataset to load. Returns: A list of dictionaries, each representing a row in the dataset. **Prerequisites** .. code-block:: shell pip install datasets Examples -------- .. code-block:: python from langchain.evaluation import load_dataset ds = load_dataset("llm-math") """ try: from datasets import load_dataset except ImportError: raise ImportError( 'load_dataset requires the `datasets` package. Please install with `pip install datasets`' ) dataset = load_dataset(f'LangChainDatasets/{uri}') return [d for d in dataset['train']]
def load_dataset(uri: str) ->List[Dict]: """Load a dataset from the `LangChainDatasets on HuggingFace <https://huggingface.co/LangChainDatasets>`_. Args: uri: The uri of the dataset to load. Returns: A list of dictionaries, each representing a row in the dataset. **Prerequisites** .. code-block:: shell pip install datasets Examples -------- .. code-block:: python from langchain.evaluation import load_dataset ds = load_dataset("llm-math") """ try: from datasets import load_dataset except ImportError: raise ImportError( 'load_dataset requires the `datasets` package. Please install with `pip install datasets`' ) dataset = load_dataset(f'LangChainDatasets/{uri}') return [d for d in dataset['train']]
Load a dataset from the `LangChainDatasets on HuggingFace <https://huggingface.co/LangChainDatasets>`_. Args: uri: The uri of the dataset to load. Returns: A list of dictionaries, each representing a row in the dataset. **Prerequisites** .. code-block:: shell pip install datasets Examples -------- .. code-block:: python from langchain.evaluation import load_dataset ds = load_dataset("llm-math")
test_solidity_code_splitter
splitter = RecursiveCharacterTextSplitter.from_language(Language.SOL, chunk_size=CHUNK_SIZE, chunk_overlap=0) code = """pragma solidity ^0.8.20; contract HelloWorld { function add(uint a, uint b) pure public returns(uint) { return a + b; } } """ chunks = splitter.split_text(code) assert chunks == ['pragma solidity', '^0.8.20;', 'contract', 'HelloWorld {', 'function', 'add(uint a,', 'uint b) pure', 'public', 'returns(uint) {', 'return a', '+ b;', '}\n }']
def test_solidity_code_splitter() ->None: splitter = RecursiveCharacterTextSplitter.from_language(Language.SOL, chunk_size=CHUNK_SIZE, chunk_overlap=0) code = """pragma solidity ^0.8.20; contract HelloWorld { function add(uint a, uint b) pure public returns(uint) { return a + b; } } """ chunks = splitter.split_text(code) assert chunks == ['pragma solidity', '^0.8.20;', 'contract', 'HelloWorld {', 'function', 'add(uint a,', 'uint b) pure', 'public', 'returns(uint) {', 'return a', '+ b;', '}\n }']
null
validate_environment
"""Validate that api key and python package exists in environment.""" values['anyscale_api_base'] = get_from_dict_or_env(values, 'anyscale_api_base', 'ANYSCALE_API_BASE') values['anyscale_api_key'] = convert_to_secret_str(get_from_dict_or_env( values, 'anyscale_api_key', 'ANYSCALE_API_KEY')) try: import openai values['client'] = openai.ChatCompletion except ImportError: raise ImportError( 'Could not import openai python package. Please install it with `pip install openai`.' ) if values['streaming'] and values['n'] > 1: raise ValueError('Cannot stream results when n > 1.') if values['streaming'] and values['best_of'] > 1: raise ValueError('Cannot stream results when best_of > 1.') return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" values['anyscale_api_base'] = get_from_dict_or_env(values, 'anyscale_api_base', 'ANYSCALE_API_BASE') values['anyscale_api_key'] = convert_to_secret_str(get_from_dict_or_env (values, 'anyscale_api_key', 'ANYSCALE_API_KEY')) try: import openai values['client'] = openai.ChatCompletion except ImportError: raise ImportError( 'Could not import openai python package. Please install it with `pip install openai`.' ) if values['streaming'] and values['n'] > 1: raise ValueError('Cannot stream results when n > 1.') if values['streaming'] and values['best_of'] > 1: raise ValueError('Cannot stream results when best_of > 1.') return values
Validate that api key and python package exists in environment.
_format_url
expected_path_param = re.findall('{(.*?)}', url) new_params = {} for param in expected_path_param: clean_param = param.lstrip('.;').rstrip('*') val = path_params[clean_param] if isinstance(val, list): if param[0] == '.': sep = '.' if param[-1] == '*' else ',' new_val = '.' + sep.join(val) elif param[0] == ';': sep = f'{clean_param}=' if param[-1] == '*' else ',' new_val = f'{clean_param}=' + sep.join(val) else: new_val = ','.join(val) elif isinstance(val, dict): kv_sep = '=' if param[-1] == '*' else ',' kv_strs = [kv_sep.join((k, v)) for k, v in val.items()] if param[0] == '.': sep = '.' new_val = '.' elif param[0] == ';': sep = ';' new_val = ';' else: sep = ',' new_val = '' new_val += sep.join(kv_strs) elif param[0] == '.': new_val = f'.{val}' elif param[0] == ';': new_val = f';{clean_param}={val}' else: new_val = val new_params[param] = new_val return url.format(**new_params)
def _format_url(url: str, path_params: dict) ->str: expected_path_param = re.findall('{(.*?)}', url) new_params = {} for param in expected_path_param: clean_param = param.lstrip('.;').rstrip('*') val = path_params[clean_param] if isinstance(val, list): if param[0] == '.': sep = '.' if param[-1] == '*' else ',' new_val = '.' + sep.join(val) elif param[0] == ';': sep = f'{clean_param}=' if param[-1] == '*' else ',' new_val = f'{clean_param}=' + sep.join(val) else: new_val = ','.join(val) elif isinstance(val, dict): kv_sep = '=' if param[-1] == '*' else ',' kv_strs = [kv_sep.join((k, v)) for k, v in val.items()] if param[0] == '.': sep = '.' new_val = '.' elif param[0] == ';': sep = ';' new_val = ';' else: sep = ',' new_val = '' new_val += sep.join(kv_strs) elif param[0] == '.': new_val = f'.{val}' elif param[0] == ';': new_val = f';{clean_param}={val}' else: new_val = val new_params[param] = new_val return url.format(**new_params)
null
get_repl_context
"""Gets the notebook REPL context if running inside a Databricks notebook. Returns None otherwise. """ try: from dbruntime.databricks_repl_context import get_context return get_context() except ImportError: raise ImportError( 'Cannot access dbruntime, not running inside a Databricks notebook.')
def get_repl_context() ->Any: """Gets the notebook REPL context if running inside a Databricks notebook. Returns None otherwise. """ try: from dbruntime.databricks_repl_context import get_context return get_context() except ImportError: raise ImportError( 'Cannot access dbruntime, not running inside a Databricks notebook.' )
Gets the notebook REPL context if running inside a Databricks notebook. Returns None otherwise.
test_exceptions_raised_while_parsing
"""Test exceptions raised correctly while using JSON parser.""" chat_generation = ChatGeneration(message=bad_message) with pytest.raises(OutputParserException): JsonOutputFunctionsParser().parse_result([chat_generation])
@pytest.mark.parametrize('bad_message', [HumanMessage(content= 'This is a test message'), AIMessage(content='This is a test message', additional_kwargs={}), AIMessage(content='This is a test message', additional_kwargs={'function_call': {'name': 'function_name', 'arguments': {}}}), AIMessage(content='This is a test message', additional_kwargs={'function_call': {'name': 'function_name', 'arguments': 'noqweqwe'}})]) def test_exceptions_raised_while_parsing(bad_message: BaseMessage) ->None: """Test exceptions raised correctly while using JSON parser.""" chat_generation = ChatGeneration(message=bad_message) with pytest.raises(OutputParserException): JsonOutputFunctionsParser().parse_result([chat_generation])
Test exceptions raised correctly while using JSON parser.
test_solver_question
"""Test question about solving algebraic equations that needs sympy""" question = 'What are the solutions to this equation x**2 - x?' output = fake_llm_symbolic_math_chain.run(question) assert output == 'Answer: {0, 1}'
def test_solver_question(fake_llm_symbolic_math_chain: LLMSymbolicMathChain ) ->None: """Test question about solving algebraic equations that needs sympy""" question = 'What are the solutions to this equation x**2 - x?' output = fake_llm_symbolic_math_chain.run(question) assert output == 'Answer: {0, 1}'
Test question about solving algebraic equations that needs sympy
similarity_search
"""Run similarity search. Args: query: search query text. k: Number of Documents to return. Defaults to 4. filter: Filter on metadata properties, e.g. { "str_property": "foo", "int_property": 123 } brute_force: Whether to use brute force search. Defaults to False. fraction_lists_to_search: Optional percentage of lists to search, must be in range 0.0 and 1.0, exclusive. If Node, uses service's default which is 0.05. Returns: List of Documents most similar to the query vector. """ tuples = self.similarity_search_with_score(query, k, filter, brute_force, fraction_lists_to_search, **kwargs) return [i[0] for i in tuples]
def similarity_search(self, query: str, k: int=DEFAULT_TOP_K, filter: Optional[Dict[str, Any]]=None, brute_force: bool=False, fraction_lists_to_search: Optional[float]=None, **kwargs: Any) ->List[ Document]: """Run similarity search. Args: query: search query text. k: Number of Documents to return. Defaults to 4. filter: Filter on metadata properties, e.g. { "str_property": "foo", "int_property": 123 } brute_force: Whether to use brute force search. Defaults to False. fraction_lists_to_search: Optional percentage of lists to search, must be in range 0.0 and 1.0, exclusive. If Node, uses service's default which is 0.05. Returns: List of Documents most similar to the query vector. """ tuples = self.similarity_search_with_score(query, k, filter, brute_force, fraction_lists_to_search, **kwargs) return [i[0] for i in tuples]
Run similarity search. Args: query: search query text. k: Number of Documents to return. Defaults to 4. filter: Filter on metadata properties, e.g. { "str_property": "foo", "int_property": 123 } brute_force: Whether to use brute force search. Defaults to False. fraction_lists_to_search: Optional percentage of lists to search, must be in range 0.0 and 1.0, exclusive. If Node, uses service's default which is 0.05. Returns: List of Documents most similar to the query vector.
test_empty_steps
assert format_log_to_messages([]) == []
def test_empty_steps() ->None: assert format_log_to_messages([]) == []
null
test_get_validated_relative_path_errs_for_symlink_outside_root
"""Test that symlink pointing outside of root directory is not allowed.""" with TemporaryDirectory() as temp_dir: root = Path(temp_dir) user_path = 'symlink_outside_root' outside_path = Path('/bin/bash') symlink_path = root / user_path symlink_path.symlink_to(outside_path) match = re.escape( f'Path {user_path} is outside of the allowed directory {root.resolve()}' ) with pytest.raises(FileValidationError, match=match): get_validated_relative_path(root, user_path) symlink_path.unlink()
def test_get_validated_relative_path_errs_for_symlink_outside_root() ->None: """Test that symlink pointing outside of root directory is not allowed.""" with TemporaryDirectory() as temp_dir: root = Path(temp_dir) user_path = 'symlink_outside_root' outside_path = Path('/bin/bash') symlink_path = root / user_path symlink_path.symlink_to(outside_path) match = re.escape( f'Path {user_path} is outside of the allowed directory {root.resolve()}' ) with pytest.raises(FileValidationError, match=match): get_validated_relative_path(root, user_path) symlink_path.unlink()
Test that symlink pointing outside of root directory is not allowed.
add_texts
"""Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. is_duplicate_texts: Optional whether to duplicate texts. Defaults to True. kwargs: any possible extend parameters in the future. Returns: List of ids from adding the texts into the vectorstore. """ if self.awadb_client is None: raise ValueError('AwaDB client is None!!!') embeddings = None if self.using_table_name in self.table2embeddings: embeddings = self.table2embeddings[self.using_table_name].embed_documents( list(texts)) return self.awadb_client.AddTexts('embedding_text', 'text_embedding', texts, embeddings, metadatas, is_duplicate_texts)
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, is_duplicate_texts: Optional[bool]=None, **kwargs: Any) ->List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. is_duplicate_texts: Optional whether to duplicate texts. Defaults to True. kwargs: any possible extend parameters in the future. Returns: List of ids from adding the texts into the vectorstore. """ if self.awadb_client is None: raise ValueError('AwaDB client is None!!!') embeddings = None if self.using_table_name in self.table2embeddings: embeddings = self.table2embeddings[self.using_table_name ].embed_documents(list(texts)) return self.awadb_client.AddTexts('embedding_text', 'text_embedding', texts, embeddings, metadatas, is_duplicate_texts)
Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. is_duplicate_texts: Optional whether to duplicate texts. Defaults to True. kwargs: any possible extend parameters in the future. Returns: List of ids from adding the texts into the vectorstore.
_create_chat_result
if 'function_call' in response: additional_kwargs = {'function_call': dict(response.get('function_call', {}))} else: additional_kwargs = {} generations = [ChatGeneration(message=AIMessage(content=response.get( 'result'), additional_kwargs={**additional_kwargs}))] token_usage = response.get('usage', {}) llm_output = {'token_usage': token_usage, 'model_name': self.model_name} return ChatResult(generations=generations, llm_output=llm_output)
def _create_chat_result(self, response: Mapping[str, Any]) ->ChatResult: if 'function_call' in response: additional_kwargs = {'function_call': dict(response.get( 'function_call', {}))} else: additional_kwargs = {} generations = [ChatGeneration(message=AIMessage(content=response.get( 'result'), additional_kwargs={**additional_kwargs}))] token_usage = response.get('usage', {}) llm_output = {'token_usage': token_usage, 'model_name': self.model_name} return ChatResult(generations=generations, llm_output=llm_output)
null
create_qa_with_sources_chain
"""Create a question answering chain that returns an answer with sources. Args: llm: Language model to use for the chain. verbose: Whether to print the details of the chain **kwargs: Keyword arguments to pass to `create_qa_with_structure_chain`. Returns: Chain (LLMChain) that can be used to answer questions with citations. """ return create_qa_with_structure_chain(llm, AnswerWithSources, verbose= verbose, **kwargs)
def create_qa_with_sources_chain(llm: BaseLanguageModel, verbose: bool= False, **kwargs: Any) ->LLMChain: """Create a question answering chain that returns an answer with sources. Args: llm: Language model to use for the chain. verbose: Whether to print the details of the chain **kwargs: Keyword arguments to pass to `create_qa_with_structure_chain`. Returns: Chain (LLMChain) that can be used to answer questions with citations. """ return create_qa_with_structure_chain(llm, AnswerWithSources, verbose= verbose, **kwargs)
Create a question answering chain that returns an answer with sources. Args: llm: Language model to use for the chain. verbose: Whether to print the details of the chain **kwargs: Keyword arguments to pass to `create_qa_with_structure_chain`. Returns: Chain (LLMChain) that can be used to answer questions with citations.
failed
return self.status == 'failed'
def failed(self) ->bool: return self.status == 'failed'
null
test_load_success
docs = retriever.get_relevant_documents(query='chatgpt') assert len(docs) == 3 assert_docs(docs)
def test_load_success(retriever: PubMedRetriever) ->None: docs = retriever.get_relevant_documents(query='chatgpt') assert len(docs) == 3 assert_docs(docs)
null
fetch_place_details
try: place_details = self.google_map_client.place(place_id) place_details['place_id'] = place_id formatted_details = self.format_place_details(place_details) return formatted_details except Exception as e: logging.error(f'An Error occurred while fetching place details: {e}') return None
def fetch_place_details(self, place_id: str) ->Optional[str]: try: place_details = self.google_map_client.place(place_id) place_details['place_id'] = place_id formatted_details = self.format_place_details(place_details) return formatted_details except Exception as e: logging.error(f'An Error occurred while fetching place details: {e}') return None
null
from_string
"""Create a KnowledgeTriple from a string.""" subject, predicate, object_ = triple_string.strip().split(', ') subject = subject[1:] object_ = object_[:-1] return cls(subject, predicate, object_)
@classmethod def from_string(cls, triple_string: str) ->'KnowledgeTriple': """Create a KnowledgeTriple from a string.""" subject, predicate, object_ = triple_string.strip().split(', ') subject = subject[1:] object_ = object_[:-1] return cls(subject, predicate, object_)
Create a KnowledgeTriple from a string.
test_similarity_search_with_metadata
"""Test end to end construction and search with a metadata filter. This test requires a column named "a" of type integer to be present in the Xata table.""" texts = ['foo', 'foo', 'foo'] metadatas = [{'a': i} for i in range(len(texts))] docsearch = XataVectorStore.from_texts(api_key=os.getenv('XATA_API_KEY'), db_url=os.getenv('XATA_DB_URL'), texts=texts, embedding= embedding_openai, metadatas=metadatas) docsearch.wait_for_indexing(ndocs=3) output = docsearch.similarity_search('foo', k=1, filter={'a': 1}) assert output == [Document(page_content='foo', metadata={'a': 1})] docsearch.delete(delete_all=True)
def test_similarity_search_with_metadata(self, embedding_openai: OpenAIEmbeddings) ->None: """Test end to end construction and search with a metadata filter. This test requires a column named "a" of type integer to be present in the Xata table.""" texts = ['foo', 'foo', 'foo'] metadatas = [{'a': i} for i in range(len(texts))] docsearch = XataVectorStore.from_texts(api_key=os.getenv('XATA_API_KEY' ), db_url=os.getenv('XATA_DB_URL'), texts=texts, embedding= embedding_openai, metadatas=metadatas) docsearch.wait_for_indexing(ndocs=3) output = docsearch.similarity_search('foo', k=1, filter={'a': 1}) assert output == [Document(page_content='foo', metadata={'a': 1})] docsearch.delete(delete_all=True)
Test end to end construction and search with a metadata filter. This test requires a column named "a" of type integer to be present in the Xata table.
_import_vald
from langchain_community.vectorstores.vald import Vald return Vald
def _import_vald() ->Any: from langchain_community.vectorstores.vald import Vald return Vald
null
setup_class
collection = get_collection() assert collection.count_documents({}) == 0
@classmethod def setup_class(cls) ->None: collection = get_collection() assert collection.count_documents({}) == 0
null
test_api_key_is_string
embeddings = GoogleGenerativeAIEmbeddings(model='models/embedding-001', google_api_key='secret-api-key') assert isinstance(embeddings.google_api_key, SecretStr)
def test_api_key_is_string() ->None: embeddings = GoogleGenerativeAIEmbeddings(model='models/embedding-001', google_api_key='secret-api-key') assert isinstance(embeddings.google_api_key, SecretStr)
null
invoke
config = ensure_config(config) configurable = config.get('configurable', {}) for id_, mapper in zip(self.ids, self.keys.values()): if mapper is not None: configurable[id_](mapper.invoke(input, config)) else: configurable[id_](input) return input
def invoke(self, input: Any, config: Optional[RunnableConfig]=None) ->Any: config = ensure_config(config) configurable = config.get('configurable', {}) for id_, mapper in zip(self.ids, self.keys.values()): if mapper is not None: configurable[id_](mapper.invoke(input, config)) else: configurable[id_](input) return input
null
update
"""Upsert records into the SQLite database.""" if group_ids is None: group_ids = [None] * len(keys) if len(keys) != len(group_ids): raise ValueError( f'Number of keys ({len(keys)}) does not match number of group_ids ({len(group_ids)})' ) update_time = self.get_time() if time_at_least and update_time < time_at_least: raise AssertionError(f'Time sync issue: {update_time} < {time_at_least}') records_to_upsert = [{'key': key, 'namespace': self.namespace, 'updated_at': update_time, 'group_id': group_id} for key, group_id in zip(keys, group_ids)] with self._make_session() as session: if self.dialect == 'sqlite': from sqlalchemy.dialects.sqlite import insert as sqlite_insert insert_stmt = sqlite_insert(UpsertionRecord).values(records_to_upsert) stmt = insert_stmt.on_conflict_do_update([UpsertionRecord.key, UpsertionRecord.namespace], set_=dict(updated_at=insert_stmt. excluded.updated_at, group_id=insert_stmt.excluded.group_id)) elif self.dialect == 'postgresql': from sqlalchemy.dialects.postgresql import insert as pg_insert insert_stmt = pg_insert(UpsertionRecord).values(records_to_upsert) stmt = insert_stmt.on_conflict_do_update('uix_key_namespace', set_= dict(updated_at=insert_stmt.excluded.updated_at, group_id= insert_stmt.excluded.group_id)) else: raise NotImplementedError(f'Unsupported dialect {self.dialect}') session.execute(stmt) session.commit()
def update(self, keys: Sequence[str], *, group_ids: Optional[Sequence[ Optional[str]]]=None, time_at_least: Optional[float]=None) ->None: """Upsert records into the SQLite database.""" if group_ids is None: group_ids = [None] * len(keys) if len(keys) != len(group_ids): raise ValueError( f'Number of keys ({len(keys)}) does not match number of group_ids ({len(group_ids)})' ) update_time = self.get_time() if time_at_least and update_time < time_at_least: raise AssertionError( f'Time sync issue: {update_time} < {time_at_least}') records_to_upsert = [{'key': key, 'namespace': self.namespace, 'updated_at': update_time, 'group_id': group_id} for key, group_id in zip(keys, group_ids)] with self._make_session() as session: if self.dialect == 'sqlite': from sqlalchemy.dialects.sqlite import insert as sqlite_insert insert_stmt = sqlite_insert(UpsertionRecord).values( records_to_upsert) stmt = insert_stmt.on_conflict_do_update([UpsertionRecord.key, UpsertionRecord.namespace], set_=dict(updated_at= insert_stmt.excluded.updated_at, group_id=insert_stmt. excluded.group_id)) elif self.dialect == 'postgresql': from sqlalchemy.dialects.postgresql import insert as pg_insert insert_stmt = pg_insert(UpsertionRecord).values(records_to_upsert) stmt = insert_stmt.on_conflict_do_update('uix_key_namespace', set_=dict(updated_at=insert_stmt.excluded.updated_at, group_id=insert_stmt.excluded.group_id)) else: raise NotImplementedError(f'Unsupported dialect {self.dialect}') session.execute(stmt) session.commit()
Upsert records into the SQLite database.
test_gradient_llm_sync_batch
mocker.patch('requests.post', side_effect=mocked_requests_post) llm = GradientLLM(**setup) assert llm.gradient_access_token == _GRADIENT_SECRET assert llm.gradient_api_url == _GRADIENT_BASE_URL assert llm.gradient_workspace_id == _GRADIENT_WORKSPACE_ID assert llm.model_id == _MODEL_ID inputs = ['Say foo:', 'Say baz:', 'Say foo again'] response = llm._generate(inputs) want = ['bar', 'baz', 'bar'] assert len(response.generations) == len(inputs) for i, gen in enumerate(response.generations): assert gen[0].text == want[i]
@pytest.mark.parametrize('setup', [dict(gradient_api_url=_GRADIENT_BASE_URL, gradient_access_token=_GRADIENT_SECRET, gradient_workspace_id= _GRADIENT_WORKSPACE_ID, model=_MODEL_ID)]) def test_gradient_llm_sync_batch(mocker: MockerFixture, setup: dict) ->None: mocker.patch('requests.post', side_effect=mocked_requests_post) llm = GradientLLM(**setup) assert llm.gradient_access_token == _GRADIENT_SECRET assert llm.gradient_api_url == _GRADIENT_BASE_URL assert llm.gradient_workspace_id == _GRADIENT_WORKSPACE_ID assert llm.model_id == _MODEL_ID inputs = ['Say foo:', 'Say baz:', 'Say foo again'] response = llm._generate(inputs) want = ['bar', 'baz', 'bar'] assert len(response.generations) == len(inputs) for i, gen in enumerate(response.generations): assert gen[0].text == want[i]
null
test__convert_delta_to_message_assistant
delta = {'role': 'assistant', 'content': 'foo'} result = _convert_delta_to_message_chunk(delta, AIMessageChunk) expected_output = AIMessageChunk(content='foo') assert result == expected_output
def test__convert_delta_to_message_assistant() ->None: delta = {'role': 'assistant', 'content': 'foo'} result = _convert_delta_to_message_chunk(delta, AIMessageChunk) expected_output = AIMessageChunk(content='foo') assert result == expected_output
null
on_retriever_error
"""Run when Retriever errors.""" retrieval_run = self._get_run(run_id, run_type='retriever') retrieval_run.error = self._get_stacktrace(error) retrieval_run.end_time = datetime.now(timezone.utc) retrieval_run.events.append({'name': 'error', 'time': retrieval_run.end_time}) self._end_trace(retrieval_run) self._on_retriever_error(retrieval_run) return retrieval_run
def on_retriever_error(self, error: BaseException, *, run_id: UUID, ** kwargs: Any) ->Run: """Run when Retriever errors.""" retrieval_run = self._get_run(run_id, run_type='retriever') retrieval_run.error = self._get_stacktrace(error) retrieval_run.end_time = datetime.now(timezone.utc) retrieval_run.events.append({'name': 'error', 'time': retrieval_run. end_time}) self._end_trace(retrieval_run) self._on_retriever_error(retrieval_run) return retrieval_run
Run when Retriever errors.
test_handles_empty_input_list
output = format_to_openai_function_messages([]) assert output == []
def test_handles_empty_input_list() ->None: output = format_to_openai_function_messages([]) assert output == []
null
load_and_split
"""Load all documents and split them into sentences.""" raise NotImplementedError( 'Loading and splitting is not yet implemented for generic loaders. When they will be implemented they will be added via the initializer. This method should not be used going forward.' )
def load_and_split(self, text_splitter: Optional[TextSplitter]=None) ->List[ Document]: """Load all documents and split them into sentences.""" raise NotImplementedError( 'Loading and splitting is not yet implemented for generic loaders. When they will be implemented they will be added via the initializer. This method should not be used going forward.' )
Load all documents and split them into sentences.
similarity_search_by_vector_with_relevance_scores
match_documents_params = self.match_args(query, filter) query_builder = self._client.rpc(self.query_name, match_documents_params) if postgrest_filter: query_builder.params = query_builder.params.set('and', f'({postgrest_filter})') query_builder.params = query_builder.params.set('limit', k) res = query_builder.execute() match_result = [(Document(metadata=search.get('metadata', {}), page_content =search.get('content', '')), search.get('similarity', 0.0)) for search in res.data if search.get('content')] if score_threshold is not None: match_result = [(doc, similarity) for doc, similarity in match_result if similarity >= score_threshold] if len(match_result) == 0: warnings.warn( f'No relevant docs were retrieved using the relevance score threshold {score_threshold}' ) return match_result
def similarity_search_by_vector_with_relevance_scores(self, query: List[ float], k: int, filter: Optional[Dict[str, Any]]=None, postgrest_filter: Optional[str]=None, score_threshold: Optional[float]=None) ->List[Tuple [Document, float]]: match_documents_params = self.match_args(query, filter) query_builder = self._client.rpc(self.query_name, match_documents_params) if postgrest_filter: query_builder.params = query_builder.params.set('and', f'({postgrest_filter})') query_builder.params = query_builder.params.set('limit', k) res = query_builder.execute() match_result = [(Document(metadata=search.get('metadata', {}), page_content=search.get('content', '')), search.get('similarity', 0.0)) for search in res.data if search.get('content')] if score_threshold is not None: match_result = [(doc, similarity) for doc, similarity in match_result if similarity >= score_threshold] if len(match_result) == 0: warnings.warn( f'No relevant docs were retrieved using the relevance score threshold {score_threshold}' ) return match_result
null
test_load_success_init_args_more
retriever = WikipediaRetriever(lang='en', top_k_results=20, load_all_available_meta=False) docs = retriever.get_relevant_documents('HUNTER X HUNTER') assert len(docs) == 20 assert_docs(docs, all_meta=False)
def test_load_success_init_args_more() ->None: retriever = WikipediaRetriever(lang='en', top_k_results=20, load_all_available_meta=False) docs = retriever.get_relevant_documents('HUNTER X HUNTER') assert len(docs) == 20 assert_docs(docs, all_meta=False)
null
memory_variables
return list(self.memories.keys())
@property def memory_variables(self) ->List[str]: return list(self.memories.keys())
null
_llm_type
return 'litellm-chat'
@property def _llm_type(self) ->str: return 'litellm-chat'
null
_import_slack_send_message
from langchain_community.tools.slack.send_message import SlackSendMessage return SlackSendMessage
def _import_slack_send_message() ->Any: from langchain_community.tools.slack.send_message import SlackSendMessage return SlackSendMessage
null
_get_docs
"""Get docs to run questioning over.""" return inputs.pop(self.input_docs_key)
def _get_docs(self, inputs: Dict[str, Any], *, run_manager: CallbackManagerForChainRun) ->List[Document]: """Get docs to run questioning over.""" return inputs.pop(self.input_docs_key)
Get docs to run questioning over.
on_llm_error
"""Run when LLM errors. Args: error (BaseException): The error that occurred. kwargs (Any): Additional keyword arguments. - response (LLMResult): The response which was generated before the error occurred. """
def on_llm_error(self, error: BaseException, *, run_id: UUID, parent_run_id: Optional[UUID]=None, **kwargs: Any) ->Any: """Run when LLM errors. Args: error (BaseException): The error that occurred. kwargs (Any): Additional keyword arguments. - response (LLMResult): The response which was generated before the error occurred. """
Run when LLM errors. Args: error (BaseException): The error that occurred. kwargs (Any): Additional keyword arguments. - response (LLMResult): The response which was generated before the error occurred.