method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
embeddings
return self.embedding_function
@property def embeddings(self) ->Embeddings: return self.embedding_function
null
get_navigable_strings
"""Get all navigable strings from a BeautifulSoup element. Args: element: A BeautifulSoup element. Returns: A generator of strings. """ from bs4 import NavigableString, Tag for child in cast(Tag, element).children: if isinstance(child, Tag): yield from get_navigable_strings(child) elif isinstance(child, NavigableString): if element.name == 'a' and (href := element.get('href')): yield f'{child.strip()} ({href})' else: yield child.strip()
def get_navigable_strings(element: Any) ->Iterator[str]: """Get all navigable strings from a BeautifulSoup element. Args: element: A BeautifulSoup element. Returns: A generator of strings. """ from bs4 import NavigableString, Tag for child in cast(Tag, element).children: if isinstance(child, Tag): yield from get_navigable_strings(child) elif isinstance(child, NavigableString): if element.name == 'a' and (href := element.get('href')): yield f'{child.strip()} ({href})' else: yield child.strip()
Get all navigable strings from a BeautifulSoup element. Args: element: A BeautifulSoup element. Returns: A generator of strings.
from_data
priority = None if data['priority'] is None else data['priority']['priority'] return cls(id=data['id'], name=data['name'], text_content=data[ 'text_content'], description=data['description'], status=data['status'] ['status'], creator_id=data['creator']['id'], creator_username=data[ 'creator']['username'], creator_email=data['creator']['email'], assignees=data['assignees'], watchers=data['watchers'], priority= priority, due_date=data['due_date'], start_date=data['start_date'], points=data['points'], team_id=data['team_id'], project_id=data[ 'project']['id'])
@classmethod def from_data(cls, data: Dict[str, Any]) ->'Task': priority = None if data['priority'] is None else data['priority'][ 'priority'] return cls(id=data['id'], name=data['name'], text_content=data[ 'text_content'], description=data['description'], status=data[ 'status']['status'], creator_id=data['creator']['id'], creator_username=data['creator']['username'], creator_email=data[ 'creator']['email'], assignees=data['assignees'], watchers=data[ 'watchers'], priority=priority, due_date=data['due_date'], start_date=data['start_date'], points=data['points'], team_id=data[ 'team_id'], project_id=data['project']['id'])
null
get_pydantic_field_names
"""Get field names, including aliases, for a pydantic class. Args: pydantic_cls: Pydantic class.""" all_required_field_names = set() for field in pydantic_cls.__fields__.values(): all_required_field_names.add(field.name) if field.has_alias: all_required_field_names.add(field.alias) return all_required_field_names
def get_pydantic_field_names(pydantic_cls: Any) ->Set[str]: """Get field names, including aliases, for a pydantic class. Args: pydantic_cls: Pydantic class.""" all_required_field_names = set() for field in pydantic_cls.__fields__.values(): all_required_field_names.add(field.name) if field.has_alias: all_required_field_names.add(field.alias) return all_required_field_names
Get field names, including aliases, for a pydantic class. Args: pydantic_cls: Pydantic class.
_get_llm_string
if self.is_lc_serializable(): params = {**kwargs, **{'stop': stop}} param_string = str(sorted([(k, v) for k, v in params.items()])) llm_string = dumps(self) return llm_string + '---' + param_string else: params = self._get_invocation_params(stop=stop, **kwargs) params = {**params, **kwargs} return str(sorted([(k, v) for k, v in params.items()]))
def _get_llm_string(self, stop: Optional[List[str]]=None, **kwargs: Any) ->str: if self.is_lc_serializable(): params = {**kwargs, **{'stop': stop}} param_string = str(sorted([(k, v) for k, v in params.items()])) llm_string = dumps(self) return llm_string + '---' + param_string else: params = self._get_invocation_params(stop=stop, **kwargs) params = {**params, **kwargs} return str(sorted([(k, v) for k, v in params.items()]))
null
import_pandas
"""Import the pandas python package and raise an error if it is not installed.""" try: import pandas except ImportError: raise ImportError( 'This callback manager requires the `pandas` python package installed. Please install it with `pip install pandas`' ) return pandas
def import_pandas() ->Any: """Import the pandas python package and raise an error if it is not installed.""" try: import pandas except ImportError: raise ImportError( 'This callback manager requires the `pandas` python package installed. Please install it with `pip install pandas`' ) return pandas
Import the pandas python package and raise an error if it is not installed.
_extract_query_params
"""Extract the query params from the deserialized input.""" query_params = {} for param in self.param_mapping.query_params: if param in args: query_params[param] = args.pop(param) return query_params
def _extract_query_params(self, args: Dict[str, str]) ->Dict[str, str]: """Extract the query params from the deserialized input.""" query_params = {} for param in self.param_mapping.query_params: if param in args: query_params[param] = args.pop(param) return query_params
Extract the query params from the deserialized input.
test_max_marginal_relevance_search
"""Test max marginal relevance search.""" texts = ['foo', 'bar', 'baz'] docsearch = ElasticsearchStore.from_texts(texts, FakeEmbeddings(), ** elasticsearch_connection, index_name=index_name, strategy= ElasticsearchStore.ExactRetrievalStrategy()) mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=3, fetch_k=3) sim_output = docsearch.similarity_search(texts[0], k=3) assert mmr_output == sim_output mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=2, fetch_k=3) assert len(mmr_output) == 2 assert mmr_output[0].page_content == texts[0] assert mmr_output[1].page_content == texts[1] mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=2, fetch_k =3, lambda_mult=0.1) assert len(mmr_output) == 2 assert mmr_output[0].page_content == texts[0] assert mmr_output[1].page_content == texts[2] mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=3, fetch_k=2) assert len(mmr_output) == 2
def test_max_marginal_relevance_search(self, elasticsearch_connection: dict, index_name: str) ->None: """Test max marginal relevance search.""" texts = ['foo', 'bar', 'baz'] docsearch = ElasticsearchStore.from_texts(texts, FakeEmbeddings(), ** elasticsearch_connection, index_name=index_name, strategy= ElasticsearchStore.ExactRetrievalStrategy()) mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=3, fetch_k=3) sim_output = docsearch.similarity_search(texts[0], k=3) assert mmr_output == sim_output mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=2, fetch_k=3) assert len(mmr_output) == 2 assert mmr_output[0].page_content == texts[0] assert mmr_output[1].page_content == texts[1] mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=2, fetch_k=3, lambda_mult=0.1) assert len(mmr_output) == 2 assert mmr_output[0].page_content == texts[0] assert mmr_output[1].page_content == texts[2] mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=3, fetch_k=2) assert len(mmr_output) == 2
Test max marginal relevance search.
__init__
"""Initialize the visitor.""" self.source: Optional[str] = None self.count = 0
def __init__(self) ->None: """Initialize the visitor.""" self.source: Optional[str] = None self.count = 0
Initialize the visitor.
embed_query
"""Compute query embedding using an OctoAI instruct model.""" text = text.replace('\n', ' ') return self._compute_embeddings([text], self.query_instruction)[0]
def embed_query(self, text: str) ->List[float]: """Compute query embedding using an OctoAI instruct model.""" text = text.replace('\n', ' ') return self._compute_embeddings([text], self.query_instruction)[0]
Compute query embedding using an OctoAI instruct model.
lazy_load
"""Lazily load the file.""" response = requests.post('https://2markdown.com/api/2md', headers={ 'X-Api-Key': self.api_key}, json={'url': self.url}) text = response.json()['article'] metadata = {'source': self.url} yield Document(page_content=text, metadata=metadata)
def lazy_load(self) ->Iterator[Document]: """Lazily load the file.""" response = requests.post('https://2markdown.com/api/2md', headers={ 'X-Api-Key': self.api_key}, json={'url': self.url}) text = response.json()['article'] metadata = {'source': self.url} yield Document(page_content=text, metadata=metadata)
Lazily load the file.
run
if mode == 'get_issues': return self.get_issues() elif mode == 'get_issue': return json.dumps(self.get_issue(int(query))) elif mode == 'comment_on_issue': return self.comment_on_issue(query) elif mode == 'create_file': return self.create_file(query) elif mode == 'create_pull_request': return self.create_pull_request(query) elif mode == 'read_file': return self.read_file(query) elif mode == 'update_file': return self.update_file(query) elif mode == 'delete_file': return self.delete_file(query) else: raise ValueError('Invalid mode' + mode)
def run(self, mode: str, query: str) ->str: if mode == 'get_issues': return self.get_issues() elif mode == 'get_issue': return json.dumps(self.get_issue(int(query))) elif mode == 'comment_on_issue': return self.comment_on_issue(query) elif mode == 'create_file': return self.create_file(query) elif mode == 'create_pull_request': return self.create_pull_request(query) elif mode == 'read_file': return self.read_file(query) elif mode == 'update_file': return self.update_file(query) elif mode == 'delete_file': return self.delete_file(query) else: raise ValueError('Invalid mode' + mode)
null
llm
return _get_llm(max_tokens=10)
@pytest.fixture def llm() ->AzureOpenAI: return _get_llm(max_tokens=10)
null
on_retriever_end
self.on_retriever_end_common()
def on_retriever_end(self, *args: Any, **kwargs: Any) ->Any: self.on_retriever_end_common()
null
__add
faiss = dependable_faiss_import() if not isinstance(self.docstore, AddableMixin): raise ValueError( f'If trying to add texts, the underlying docstore should support adding items, which {self.docstore} does not' ) _len_check_if_sized(texts, metadatas, 'texts', 'metadatas') _metadatas = metadatas or ({} for _ in texts) documents = [Document(page_content=t, metadata=m) for t, m in zip(texts, _metadatas)] _len_check_if_sized(documents, embeddings, 'documents', 'embeddings') _len_check_if_sized(documents, ids, 'documents', 'ids') vector = np.array(embeddings, dtype=np.float32) if self._normalize_L2: faiss.normalize_L2(vector) self.index.add(vector) ids = ids or [str(uuid.uuid4()) for _ in texts] self.docstore.add({id_: doc for id_, doc in zip(ids, documents)}) starting_len = len(self.index_to_docstore_id) index_to_id = {(starting_len + j): id_ for j, id_ in enumerate(ids)} self.index_to_docstore_id.update(index_to_id) return ids
def __add(self, texts: Iterable[str], embeddings: Iterable[List[float]], metadatas: Optional[Iterable[dict]]=None, ids: Optional[List[str]]=None ) ->List[str]: faiss = dependable_faiss_import() if not isinstance(self.docstore, AddableMixin): raise ValueError( f'If trying to add texts, the underlying docstore should support adding items, which {self.docstore} does not' ) _len_check_if_sized(texts, metadatas, 'texts', 'metadatas') _metadatas = metadatas or ({} for _ in texts) documents = [Document(page_content=t, metadata=m) for t, m in zip(texts, _metadatas)] _len_check_if_sized(documents, embeddings, 'documents', 'embeddings') _len_check_if_sized(documents, ids, 'documents', 'ids') vector = np.array(embeddings, dtype=np.float32) if self._normalize_L2: faiss.normalize_L2(vector) self.index.add(vector) ids = ids or [str(uuid.uuid4()) for _ in texts] self.docstore.add({id_: doc for id_, doc in zip(ids, documents)}) starting_len = len(self.index_to_docstore_id) index_to_id = {(starting_len + j): id_ for j, id_ in enumerate(ids)} self.index_to_docstore_id.update(index_to_id) return ids
null
_stream
messages, params = self._get_chat_params([prompt], stop) params = {**params, **kwargs, 'stream': True} for stream_resp in completion_with_retry(self, messages=messages, run_manager=run_manager, **params): if not isinstance(stream_resp, dict): stream_resp = stream_resp.dict() token = stream_resp['choices'][0]['delta'].get('content', '') chunk = GenerationChunk(text=token) yield chunk if run_manager: run_manager.on_llm_new_token(token, chunk=chunk)
def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[ GenerationChunk]: messages, params = self._get_chat_params([prompt], stop) params = {**params, **kwargs, 'stream': True} for stream_resp in completion_with_retry(self, messages=messages, run_manager=run_manager, **params): if not isinstance(stream_resp, dict): stream_resp = stream_resp.dict() token = stream_resp['choices'][0]['delta'].get('content', '') chunk = GenerationChunk(text=token) yield chunk if run_manager: run_manager.on_llm_new_token(token, chunk=chunk)
null
_import_predictionguard
from langchain_community.llms.predictionguard import PredictionGuard return PredictionGuard
def _import_predictionguard() ->Any: from langchain_community.llms.predictionguard import PredictionGuard return PredictionGuard
null
_type
return 'conversational'
@property def _type(self) ->str: return 'conversational'
null
from_documents
""" Create a Bagel vectorstore from a list of documents. Args: documents (List[Document]): List of Document objects to add to the Bagel vectorstore. embedding (Optional[List[float]]): List of embedding. ids (Optional[List[str]]): List of IDs. Defaults to None. cluster_name (str): The name of the BagelDB cluster. client_settings (Optional[bagel.config.Settings]): Client settings. client (Optional[bagel.Client]): Bagel client instance. cluster_metadata (Optional[Dict]): Metadata associated with the Bagel cluster. Defaults to None. Returns: Bagel: Bagel vectorstore. """ texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return cls.from_texts(texts=texts, embedding=embedding, metadatas=metadatas, ids=ids, cluster_name=cluster_name, client_settings=client_settings, client=client, cluster_metadata=cluster_metadata, **kwargs)
@classmethod def from_documents(cls: Type[Bagel], documents: List[Document], embedding: Optional[Embeddings]=None, ids: Optional[List[str]]=None, cluster_name: str=_LANGCHAIN_DEFAULT_CLUSTER_NAME, client_settings: Optional[bagel. config.Settings]=None, client: Optional[bagel.Client]=None, cluster_metadata: Optional[Dict]=None, **kwargs: Any) ->Bagel: """ Create a Bagel vectorstore from a list of documents. Args: documents (List[Document]): List of Document objects to add to the Bagel vectorstore. embedding (Optional[List[float]]): List of embedding. ids (Optional[List[str]]): List of IDs. Defaults to None. cluster_name (str): The name of the BagelDB cluster. client_settings (Optional[bagel.config.Settings]): Client settings. client (Optional[bagel.Client]): Bagel client instance. cluster_metadata (Optional[Dict]): Metadata associated with the Bagel cluster. Defaults to None. Returns: Bagel: Bagel vectorstore. """ texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return cls.from_texts(texts=texts, embedding=embedding, metadatas= metadatas, ids=ids, cluster_name=cluster_name, client_settings= client_settings, client=client, cluster_metadata=cluster_metadata, **kwargs)
Create a Bagel vectorstore from a list of documents. Args: documents (List[Document]): List of Document objects to add to the Bagel vectorstore. embedding (Optional[List[float]]): List of embedding. ids (Optional[List[str]]): List of IDs. Defaults to None. cluster_name (str): The name of the BagelDB cluster. client_settings (Optional[bagel.config.Settings]): Client settings. client (Optional[bagel.Client]): Bagel client instance. cluster_metadata (Optional[Dict]): Metadata associated with the Bagel cluster. Defaults to None. Returns: Bagel: Bagel vectorstore.
_wrapped_fn
return contexts.pop().run(fn, *args)
def _wrapped_fn(*args: Any) ->T: return contexts.pop().run(fn, *args)
null
test_redis_semantic_cache_multi
set_llm_cache(RedisSemanticCache(embedding=FakeEmbeddings(), redis_url= REDIS_TEST_URL, score_threshold=0.1)) llm = FakeLLM() params = llm.dict() params['stop'] = None llm_string = str(sorted([(k, v) for k, v in params.items()])) get_llm_cache().update('foo', llm_string, [Generation(text='fizz'), Generation(text='Buzz')]) output = llm.generate(['bar']) expected_output = LLMResult(generations=[[Generation(text='fizz'), Generation(text='Buzz')]], llm_output={}) assert output == expected_output get_llm_cache().clear(llm_string=llm_string)
def test_redis_semantic_cache_multi() ->None: set_llm_cache(RedisSemanticCache(embedding=FakeEmbeddings(), redis_url= REDIS_TEST_URL, score_threshold=0.1)) llm = FakeLLM() params = llm.dict() params['stop'] = None llm_string = str(sorted([(k, v) for k, v in params.items()])) get_llm_cache().update('foo', llm_string, [Generation(text='fizz'), Generation(text='Buzz')]) output = llm.generate(['bar']) expected_output = LLMResult(generations=[[Generation(text='fizz'), Generation(text='Buzz')]], llm_output={}) assert output == expected_output get_llm_cache().clear(llm_string=llm_string)
null
test_konko_chat_test
"""Evaluate basic ChatKonko functionality.""" chat_instance = ChatKonko(max_tokens=10) msg = HumanMessage(content='Hi') chat_response = chat_instance([msg]) assert isinstance(chat_response, BaseMessage) assert isinstance(chat_response.content, str)
def test_konko_chat_test() ->None: """Evaluate basic ChatKonko functionality.""" chat_instance = ChatKonko(max_tokens=10) msg = HumanMessage(content='Hi') chat_response = chat_instance([msg]) assert isinstance(chat_response, BaseMessage) assert isinstance(chat_response.content, str)
Evaluate basic ChatKonko functionality.
_llm_type
"""Return type of llm.""" return 'vllm-openai'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'vllm-openai'
Return type of llm.
test_self_hosted_embedding_query
"""Test self-hosted custom embeddings.""" query = 'foo bar' gpu = get_remote_instance() embedding = SelfHostedEmbeddings(model_load_fn=get_pipeline, hardware=gpu, inference_fn=inference_fn) output = embedding.embed_query(query) assert len(output) == 50265
def test_self_hosted_embedding_query() ->None: """Test self-hosted custom embeddings.""" query = 'foo bar' gpu = get_remote_instance() embedding = SelfHostedEmbeddings(model_load_fn=get_pipeline, hardware= gpu, inference_fn=inference_fn) output = embedding.embed_query(query) assert len(output) == 50265
Test self-hosted custom embeddings.
__is_headers_available_for_non_html
_unstructured_version = self.__version.split('-')[0] unstructured_version = tuple([int(x) for x in _unstructured_version.split('.')] ) return unstructured_version >= (0, 5, 13)
def __is_headers_available_for_non_html(self) ->bool: _unstructured_version = self.__version.split('-')[0] unstructured_version = tuple([int(x) for x in _unstructured_version. split('.')]) return unstructured_version >= (0, 5, 13)
null
score_response
score = 200 return score
def score_response(self, inputs: Dict[str, Any], llm_response: str, event: pick_best_chain.PickBestEvent) ->float: score = 200 return score
null
_run
"""Run the tool.""" results = self.api_resource.users().messages().list(userId='me', q=query, maxResults=max_results).execute().get(resource.value, []) if resource == Resource.THREADS: return self._parse_threads(results) elif resource == Resource.MESSAGES: return self._parse_messages(results) else: raise NotImplementedError(f'Resource of type {resource} not implemented.')
def _run(self, query: str, resource: Resource=Resource.MESSAGES, max_results: int=10, run_manager: Optional[CallbackManagerForToolRun]=None ) ->List[Dict[str, Any]]: """Run the tool.""" results = self.api_resource.users().messages().list(userId='me', q= query, maxResults=max_results).execute().get(resource.value, []) if resource == Resource.THREADS: return self._parse_threads(results) elif resource == Resource.MESSAGES: return self._parse_messages(results) else: raise NotImplementedError( f'Resource of type {resource} not implemented.')
Run the tool.
from_llm
"""Create a new evaluator from an LLM."""
@classmethod @abstractmethod def from_llm(cls, llm: BaseLanguageModel, **kwargs: Any) ->LLMEvalChain: """Create a new evaluator from an LLM."""
Create a new evaluator from an LLM.
detect_file_encodings
"""Try to detect the file encoding. Returns a list of `FileEncoding` tuples with the detected encodings ordered by confidence. Args: file_path: The path to the file to detect the encoding for. timeout: The timeout in seconds for the encoding detection. """ import chardet def read_and_detect(file_path: str) ->List[dict]: with open(file_path, 'rb') as f: rawdata = f.read() return cast(List[dict], chardet.detect_all(rawdata)) with concurrent.futures.ThreadPoolExecutor() as executor: future = executor.submit(read_and_detect, file_path) try: encodings = future.result(timeout=timeout) except concurrent.futures.TimeoutError: raise TimeoutError( f'Timeout reached while detecting encoding for {file_path}') if all(encoding['encoding'] is None for encoding in encodings): raise RuntimeError(f'Could not detect encoding for {file_path}') return [FileEncoding(**enc) for enc in encodings if enc['encoding'] is not None ]
def detect_file_encodings(file_path: str, timeout: int=5) ->List[FileEncoding]: """Try to detect the file encoding. Returns a list of `FileEncoding` tuples with the detected encodings ordered by confidence. Args: file_path: The path to the file to detect the encoding for. timeout: The timeout in seconds for the encoding detection. """ import chardet def read_and_detect(file_path: str) ->List[dict]: with open(file_path, 'rb') as f: rawdata = f.read() return cast(List[dict], chardet.detect_all(rawdata)) with concurrent.futures.ThreadPoolExecutor() as executor: future = executor.submit(read_and_detect, file_path) try: encodings = future.result(timeout=timeout) except concurrent.futures.TimeoutError: raise TimeoutError( f'Timeout reached while detecting encoding for {file_path}') if all(encoding['encoding'] is None for encoding in encodings): raise RuntimeError(f'Could not detect encoding for {file_path}') return [FileEncoding(**enc) for enc in encodings if enc['encoding'] is not None]
Try to detect the file encoding. Returns a list of `FileEncoding` tuples with the detected encodings ordered by confidence. Args: file_path: The path to the file to detect the encoding for. timeout: The timeout in seconds for the encoding detection.
is_lc_serializable
return False
@classmethod def is_lc_serializable(cls) ->bool: return False
null
test_run_success
output = api_client.run('HUNTER X HUNTER') assert 'Yoshihiro Togashi' in output
def test_run_success(api_client: WikipediaAPIWrapper) ->None: output = api_client.run('HUNTER X HUNTER') assert 'Yoshihiro Togashi' in output
null
test_character_text_splitter_separtor_empty_doc
"""Test edge cases are separators.""" text = 'f b' splitter = CharacterTextSplitter(separator=' ', chunk_size=2, chunk_overlap=0) output = splitter.split_text(text) expected_output = ['f', 'b'] assert output == expected_output
def test_character_text_splitter_separtor_empty_doc() ->None: """Test edge cases are separators.""" text = 'f b' splitter = CharacterTextSplitter(separator=' ', chunk_size=2, chunk_overlap=0) output = splitter.split_text(text) expected_output = ['f', 'b'] assert output == expected_output
Test edge cases are separators.
set_model_kwargs
if v: assert 'prompt' not in v, "model_kwargs must not contain key 'prompt'" assert 'stop' not in v, "model_kwargs must not contain key 'stop'" return v
@validator('model_kwargs', always=True) def set_model_kwargs(cls, v: Optional[Dict[str, Any]]) ->Optional[Dict[str, Any]]: if v: assert 'prompt' not in v, "model_kwargs must not contain key 'prompt'" assert 'stop' not in v, "model_kwargs must not contain key 'stop'" return v
null
test_similarity_search_by_vector_not_supported_for_managed_embedding
index = mock_index(DELTA_SYNC_INDEX_MANAGED_EMBEDDINGS) index.similarity_search.return_value = EXAMPLE_SEARCH_RESPONSE vectorsearch = default_databricks_vector_search(index) query_embedding = DEFAULT_EMBEDDING_MODEL.embed_query('foo') filters = {'some filter': True} limit = 7 with pytest.raises(ValueError) as ex: vectorsearch.similarity_search_by_vector(query_embedding, k=limit, filters=filters) assert '`similarity_search_by_vector` is not supported for index with Databricks-managed embeddings.' in str( ex.value)
@pytest.mark.requires('databricks', 'databricks.vector_search') def test_similarity_search_by_vector_not_supported_for_managed_embedding( ) ->None: index = mock_index(DELTA_SYNC_INDEX_MANAGED_EMBEDDINGS) index.similarity_search.return_value = EXAMPLE_SEARCH_RESPONSE vectorsearch = default_databricks_vector_search(index) query_embedding = DEFAULT_EMBEDDING_MODEL.embed_query('foo') filters = {'some filter': True} limit = 7 with pytest.raises(ValueError) as ex: vectorsearch.similarity_search_by_vector(query_embedding, k=limit, filters=filters) assert '`similarity_search_by_vector` is not supported for index with Databricks-managed embeddings.' in str( ex.value)
null
test_get_media_metadata_location
"""Test for retrieving media metadata location from NASA Image and Video Library""" nasa = NasaAPIWrapper() output = nasa.run('get_media_metadata_location', 'as11-40-5874') assert output is not None
def test_get_media_metadata_location() ->None: """Test for retrieving media metadata location from NASA Image and Video Library""" nasa = NasaAPIWrapper() output = nasa.run('get_media_metadata_location', 'as11-40-5874') assert output is not None
Test for retrieving media metadata location from NASA Image and Video Library
_invoke
return self._pick(input)
def _invoke(self, input: Dict[str, Any]) ->Dict[str, Any]: return self._pick(input)
null
__init__
"""Initialize an EncodedStore.""" self.store = store self.key_encoder = key_encoder self.value_serializer = value_serializer self.value_deserializer = value_deserializer
def __init__(self, store: BaseStore[str, Any], key_encoder: Callable[[K], str], value_serializer: Callable[[V], bytes], value_deserializer: Callable[[Any], V]) ->None: """Initialize an EncodedStore.""" self.store = store self.key_encoder = key_encoder self.value_serializer = value_serializer self.value_deserializer = value_deserializer
Initialize an EncodedStore.
__init__
super().__init__(blob_loader, blob_parser) self.num_workers = num_workers
def __init__(self, blob_loader: BlobLoader, blob_parser: BaseBlobParser, num_workers: int=4) ->None: super().__init__(blob_loader, blob_parser) self.num_workers = num_workers
null
embed_query
"""Embed a query using a YandexGPT embeddings models. Args: text: The text to embed. Returns: Embeddings for the text. """ return _embed_with_retry(self, texts=[text])[0]
def embed_query(self, text: str) ->List[float]: """Embed a query using a YandexGPT embeddings models. Args: text: The text to embed. Returns: Embeddings for the text. """ return _embed_with_retry(self, texts=[text])[0]
Embed a query using a YandexGPT embeddings models. Args: text: The text to embed. Returns: Embeddings for the text.
ensure_config
"""Ensure that a config is a dict with all keys present. Args: config (Optional[RunnableConfig], optional): The config to ensure. Defaults to None. Returns: RunnableConfig: The ensured config. """ empty = RunnableConfig(tags=[], metadata={}, callbacks=None, recursion_limit=25 ) if (var_config := var_child_runnable_config.get()): empty.update(cast(RunnableConfig, {k: v for k, v in var_config.items() if v is not None})) if config is not None: empty.update(cast(RunnableConfig, {k: v for k, v in config.items() if v is not None})) return empty
def ensure_config(config: Optional[RunnableConfig]=None) ->RunnableConfig: """Ensure that a config is a dict with all keys present. Args: config (Optional[RunnableConfig], optional): The config to ensure. Defaults to None. Returns: RunnableConfig: The ensured config. """ empty = RunnableConfig(tags=[], metadata={}, callbacks=None, recursion_limit=25) if (var_config := var_child_runnable_config.get()): empty.update(cast(RunnableConfig, {k: v for k, v in var_config. items() if v is not None})) if config is not None: empty.update(cast(RunnableConfig, {k: v for k, v in config.items() if v is not None})) return empty
Ensure that a config is a dict with all keys present. Args: config (Optional[RunnableConfig], optional): The config to ensure. Defaults to None. Returns: RunnableConfig: The ensured config.
token_or_credential_present
"""Validate that at least one of token and credentials is present.""" if 'token' in values or 'credential' in values: return values raise ValueError('Please provide either a credential or a token.')
@root_validator(pre=True, allow_reuse=True) def token_or_credential_present(cls, values: Dict[str, Any]) ->Dict[str, Any]: """Validate that at least one of token and credentials is present.""" if 'token' in values or 'credential' in values: return values raise ValueError('Please provide either a credential or a token.')
Validate that at least one of token and credentials is present.
_import_myscale
from langchain_community.vectorstores.myscale import MyScale return MyScale
def _import_myscale() ->Any: from langchain_community.vectorstores.myscale import MyScale return MyScale
null
completion_with_retry
"""Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _completion_with_retry(**_kwargs: Any) ->Any: return make_request(llm, **_kwargs) return _completion_with_retry(**kwargs)
def completion_with_retry(llm: Nebula, **kwargs: Any) ->Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _completion_with_retry(**_kwargs: Any) ->Any: return make_request(llm, **_kwargs) return _completion_with_retry(**kwargs)
Use tenacity to retry the completion call.
_identifying_params
return {'input_func': self.input_func.__name__, 'message_func': self. message_func.__name__}
@property def _identifying_params(self) ->Dict[str, Any]: return {'input_func': self.input_func.__name__, 'message_func': self. message_func.__name__}
null
_google_serper_api_results
headers = {'X-API-KEY': self.serper_api_key or '', 'Content-Type': 'application/json'} params = {'q': search_term, **{key: value for key, value in kwargs.items() if value is not None}} response = requests.post(f'https://google.serper.dev/{search_type}', headers=headers, params=params) response.raise_for_status() search_results = response.json() return search_results
def _google_serper_api_results(self, search_term: str, search_type: str= 'search', **kwargs: Any) ->dict: headers = {'X-API-KEY': self.serper_api_key or '', 'Content-Type': 'application/json'} params = {'q': search_term, **{key: value for key, value in kwargs. items() if value is not None}} response = requests.post(f'https://google.serper.dev/{search_type}', headers=headers, params=params) response.raise_for_status() search_results = response.json() return search_results
null
_import_bing_search_tool_BingSearchResults
from langchain_community.tools.bing_search.tool import BingSearchResults return BingSearchResults
def _import_bing_search_tool_BingSearchResults() ->Any: from langchain_community.tools.bing_search.tool import BingSearchResults return BingSearchResults
null
get_image
if isinstance(layout_object, pdfminer.layout.LTImage): return layout_object if isinstance(layout_object, pdfminer.layout.LTContainer): for child in layout_object: return get_image(child) else: return None
def get_image(layout_object: Any) ->Any: if isinstance(layout_object, pdfminer.layout.LTImage): return layout_object if isinstance(layout_object, pdfminer.layout.LTContainer): for child in layout_object: return get_image(child) else: return None
null
leave
"""Decrease the indentation level.""" self._indent -= 1
def leave(self): """Decrease the indentation level.""" self._indent -= 1
Decrease the indentation level.
output_keys
"""Return the singular output key. :meta private: """ if not self.return_intermediate_steps: return [self.output_key] else: return [self.output_key, INTERMEDIATE_STEPS_KEY]
@property def output_keys(self) ->List[str]: """Return the singular output key. :meta private: """ if not self.return_intermediate_steps: return [self.output_key] else: return [self.output_key, INTERMEDIATE_STEPS_KEY]
Return the singular output key. :meta private:
_generate
"""Call OpenAI generate and then call PromptLayer API to log the request.""" from promptlayer.utils import get_api_key, promptlayer_api_request request_start_time = datetime.datetime.now().timestamp() generated_responses = super()._generate(prompts, stop, run_manager) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = {'text': generation.text, 'llm_output': generated_responses. llm_output} params = {**self._identifying_params, **kwargs} pl_request_id = promptlayer_api_request('langchain.PromptLayerOpenAI', 'langchain', [prompt], params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id= self.return_pl_id) if self.return_pl_id: if generation.generation_info is None or not isinstance(generation. generation_info, dict): generation.generation_info = {} generation.generation_info['pl_request_id'] = pl_request_id return generated_responses
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->LLMResult: """Call OpenAI generate and then call PromptLayer API to log the request.""" from promptlayer.utils import get_api_key, promptlayer_api_request request_start_time = datetime.datetime.now().timestamp() generated_responses = super()._generate(prompts, stop, run_manager) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = {'text': generation.text, 'llm_output': generated_responses. llm_output} params = {**self._identifying_params, **kwargs} pl_request_id = promptlayer_api_request('langchain.PromptLayerOpenAI', 'langchain', [prompt], params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id) if self.return_pl_id: if generation.generation_info is None or not isinstance(generation .generation_info, dict): generation.generation_info = {} generation.generation_info['pl_request_id'] = pl_request_id return generated_responses
Call OpenAI generate and then call PromptLayer API to log the request.
update
"""Update cache based on prompt and llm_string.""" doc_id = self._make_id(prompt, llm_string) blob = _dumps_generations(return_val) self.collection.upsert({'_id': doc_id, 'body_blob': blob})
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE ) ->None: """Update cache based on prompt and llm_string.""" doc_id = self._make_id(prompt, llm_string) blob = _dumps_generations(return_val) self.collection.upsert({'_id': doc_id, 'body_blob': blob})
Update cache based on prompt and llm_string.
_create_chat_result
generations = [] for candidate in response['candidates']: message = ChatMLflowAIGateway._convert_dict_to_message(candidate['message'] ) message_metadata = candidate.get('metadata', {}) gen = ChatGeneration(message=message, generation_info=dict( message_metadata)) generations.append(gen) response_metadata = response.get('metadata', {}) return ChatResult(generations=generations, llm_output=response_metadata)
@staticmethod def _create_chat_result(response: Mapping[str, Any]) ->ChatResult: generations = [] for candidate in response['candidates']: message = ChatMLflowAIGateway._convert_dict_to_message(candidate[ 'message']) message_metadata = candidate.get('metadata', {}) gen = ChatGeneration(message=message, generation_info=dict( message_metadata)) generations.append(gen) response_metadata = response.get('metadata', {}) return ChatResult(generations=generations, llm_output=response_metadata)
null
_import_apify
from langchain_community.utilities.apify import ApifyWrapper return ApifyWrapper
def _import_apify() ->Any: from langchain_community.utilities.apify import ApifyWrapper return ApifyWrapper
null
plan
log = '' for action, observation in intermediate_steps: log += ( f'<tool>{action.tool}</tool><tool_input>{action.tool_input}</tool_input><observation>{observation}</observation>' ) tools = '' for tool in self.tools: tools += f'{tool.name}: {tool.description}\n' inputs = {'intermediate_steps': log, 'tools': tools, 'question': kwargs[ 'input'], 'stop': ['</tool_input>', '</final_answer>']} response = self.llm_chain(inputs, callbacks=callbacks) return response[self.llm_chain.output_key]
def plan(self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks=None, **kwargs: Any) ->Union[AgentAction, AgentFinish]: log = '' for action, observation in intermediate_steps: log += ( f'<tool>{action.tool}</tool><tool_input>{action.tool_input}</tool_input><observation>{observation}</observation>' ) tools = '' for tool in self.tools: tools += f'{tool.name}: {tool.description}\n' inputs = {'intermediate_steps': log, 'tools': tools, 'question': kwargs ['input'], 'stop': ['</tool_input>', '</final_answer>']} response = self.llm_chain(inputs, callbacks=callbacks) return response[self.llm_chain.output_key]
null
_similarity_index_search_with_score
"""Search k embeddings similar to the query embedding. Returns a list of (index, distance) tuples.""" if not self._neighbors_fitted: raise SKLearnVectorStoreException( 'No data was added to SKLearnVectorStore.') neigh_dists, neigh_idxs = self._neighbors.kneighbors([query_embedding], n_neighbors=k) return list(zip(neigh_idxs[0], neigh_dists[0]))
def _similarity_index_search_with_score(self, query_embedding: List[float], *, k: int=DEFAULT_K, **kwargs: Any) ->List[Tuple[int, float]]: """Search k embeddings similar to the query embedding. Returns a list of (index, distance) tuples.""" if not self._neighbors_fitted: raise SKLearnVectorStoreException( 'No data was added to SKLearnVectorStore.') neigh_dists, neigh_idxs = self._neighbors.kneighbors([query_embedding], n_neighbors=k) return list(zip(neigh_idxs[0], neigh_dists[0]))
Search k embeddings similar to the query embedding. Returns a list of (index, distance) tuples.
on_tool_end
"""If not the final action, print out observation.""" if observation_prefix is not None: print_text(f'\n{observation_prefix}') print_text(output, color=color or self.color) if llm_prefix is not None: print_text(f'\n{llm_prefix}')
def on_tool_end(self, output: str, color: Optional[str]=None, observation_prefix: Optional[str]=None, llm_prefix: Optional[str]=None, **kwargs: Any) ->None: """If not the final action, print out observation.""" if observation_prefix is not None: print_text(f'\n{observation_prefix}') print_text(output, color=color or self.color) if llm_prefix is not None: print_text(f'\n{llm_prefix}')
If not the final action, print out observation.
test_add_recognizer_operator
""" Test add recognizer and anonymize a new type of entity and with a custom operator """ from presidio_analyzer import PatternRecognizer from presidio_anonymizer.entities import OperatorConfig from langchain_experimental.data_anonymizer import PresidioAnonymizer anonymizer = PresidioAnonymizer(analyzed_fields=[]) titles_list = ['Sir', 'Madam', 'Professor'] custom_recognizer = PatternRecognizer(supported_entity='TITLE', deny_list= titles_list) anonymizer.add_recognizer(custom_recognizer) text = 'Madam Jane Doe was here.' anonymized_text = anonymizer.anonymize(text) assert anonymized_text == '<TITLE> Jane Doe was here.' custom_operator = {'TITLE': OperatorConfig('replace', {'new_value': 'Dear'})} anonymizer.add_operators(custom_operator) anonymized_text = anonymizer.anonymize(text) assert anonymized_text == 'Dear Jane Doe was here.'
@pytest.mark.requires('presidio_analyzer', 'presidio_anonymizer', 'faker') def test_add_recognizer_operator() ->None: """ Test add recognizer and anonymize a new type of entity and with a custom operator """ from presidio_analyzer import PatternRecognizer from presidio_anonymizer.entities import OperatorConfig from langchain_experimental.data_anonymizer import PresidioAnonymizer anonymizer = PresidioAnonymizer(analyzed_fields=[]) titles_list = ['Sir', 'Madam', 'Professor'] custom_recognizer = PatternRecognizer(supported_entity='TITLE', deny_list=titles_list) anonymizer.add_recognizer(custom_recognizer) text = 'Madam Jane Doe was here.' anonymized_text = anonymizer.anonymize(text) assert anonymized_text == '<TITLE> Jane Doe was here.' custom_operator = {'TITLE': OperatorConfig('replace', {'new_value': 'Dear'})} anonymizer.add_operators(custom_operator) anonymized_text = anonymizer.anonymize(text) assert anonymized_text == 'Dear Jane Doe was here.'
Test add recognizer and anonymize a new type of entity and with a custom operator
test_resolve_criteria_str
assert CriteriaEvalChain.resolve_criteria('helpfulness') == {'helpfulness': _SUPPORTED_CRITERIA[Criteria.HELPFULNESS]} assert CriteriaEvalChain.resolve_criteria('correctness') == {'correctness': _SUPPORTED_CRITERIA[Criteria.CORRECTNESS]}
def test_resolve_criteria_str() ->None: assert CriteriaEvalChain.resolve_criteria('helpfulness') == {'helpfulness': _SUPPORTED_CRITERIA[Criteria.HELPFULNESS]} assert CriteriaEvalChain.resolve_criteria('correctness') == {'correctness': _SUPPORTED_CRITERIA[Criteria.CORRECTNESS]}
null
main
print('Hello World!') return 0
def main() ->int: print('Hello World!') return 0
null
copy
"""Copy the callback manager.""" return self.__class__(handlers=self.handlers, inheritable_handlers=self. inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata)
def copy(self: T) ->T: """Copy the callback manager.""" return self.__class__(handlers=self.handlers, inheritable_handlers=self .inheritable_handlers, parent_run_id=self.parent_run_id, tags=self. tags, inheritable_tags=self.inheritable_tags, metadata=self. metadata, inheritable_metadata=self.inheritable_metadata)
Copy the callback manager.
evaluate
"""Synchronously process the HTML content of the page.""" from unstructured.partition.html import partition_html for selector in (self.remove_selectors or []): elements = page.locator(selector).all() for element in elements: if element.is_visible(): element.evaluate('element => element.remove()') page_source = page.content() elements = partition_html(text=page_source) return '\n\n'.join([str(el) for el in elements])
def evaluate(self, page: 'Page', browser: 'Browser', response: 'Response' ) ->str: """Synchronously process the HTML content of the page.""" from unstructured.partition.html import partition_html for selector in (self.remove_selectors or []): elements = page.locator(selector).all() for element in elements: if element.is_visible(): element.evaluate('element => element.remove()') page_source = page.content() elements = partition_html(text=page_source) return '\n\n'.join([str(el) for el in elements])
Synchronously process the HTML content of the page.
clear
"""Pass."""
def clear(self) ->None: """Pass."""
Pass.
_BoolOp
self.write('(') s = ' %s ' % self.boolops[t.op.__class__] interleave(lambda : self.write(s), self.dispatch, t.values) self.write(')')
def _BoolOp(self, t): self.write('(') s = ' %s ' % self.boolops[t.op.__class__] interleave(lambda : self.write(s), self.dispatch, t.values) self.write(')')
null
test_tool_calls_merge
chunks: List[dict] = [dict(content=''), dict(content='', additional_kwargs= {'tool_calls': [{'index': 0, 'id': 'call_CwGAsESnXehQEjiAIWzinlva', 'function': {'arguments': '', 'name': 'person'}, 'type': 'function'}]}), dict(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': None, 'function': {'arguments': '{"na', 'name': None}, 'type': None}]}), dict(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': None, 'function': {'arguments': 'me": ', 'name': None}, 'type': None}]} ), dict(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': None, 'function': {'arguments': '"jane"', 'name': None}, 'type': None}] }), dict(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': None, 'function': {'arguments': ', "a', 'name': None}, 'type': None}]}), dict(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': None, 'function': {'arguments': 'ge": ', 'name': None}, 'type': None}]}), dict(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': None, 'function': {'arguments': '2}', 'name': None}, 'type': None}]}), dict(content='', additional_kwargs={'tool_calls': [{'index': 1, 'id': 'call_zXSIylHvc5x3JUAPcHZR5GZI', 'function': {'arguments': '', 'name': 'person'}, 'type': 'function'}]}), dict(content='', additional_kwargs={'tool_calls': [{'index': 1, 'id': None, 'function': {'arguments': '{"na', 'name': None}, 'type': None}]}), dict(content='', additional_kwargs={'tool_calls': [{'index': 1, 'id': None, 'function': {'arguments': 'me": ', 'name': None}, 'type': None}]}), dict(content='', additional_kwargs={'tool_calls': [{'index': 1, 'id': None, 'function': {'arguments': '"bob",', 'name': None}, 'type': None}]}), dict(content= '', additional_kwargs={'tool_calls': [{'index': 1, 'id': None, 'function': {'arguments': ' "ag', 'name': None}, 'type': None}]}), dict (content='', additional_kwargs={'tool_calls': [{'index': 1, 'id': None, 'function': {'arguments': 'e": 3', 'name': None}, 'type': None}]}), dict(content='', additional_kwargs={'tool_calls': [{'index': 1, 'id': None, 'function': {'arguments': '}', 'name': None}, 'type': None}]}), dict(content='')] final = None for chunk in chunks: msg = AIMessageChunk(**chunk) if final is None: final = msg else: final = final + msg assert final == AIMessageChunk(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_CwGAsESnXehQEjiAIWzinlva', 'function': { 'arguments': '{"name": "jane", "age": 2}', 'name': 'person'}, 'type': 'function'}, {'index': 1, 'id': 'call_zXSIylHvc5x3JUAPcHZR5GZI', 'function': {'arguments': '{"name": "bob", "age": 3}', 'name': 'person' }, 'type': 'function'}]})
def test_tool_calls_merge() ->None: chunks: List[dict] = [dict(content=''), dict(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_CwGAsESnXehQEjiAIWzinlva', 'function': {'arguments': '', 'name': 'person'}, 'type': 'function'}]}), dict(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': None, 'function': {'arguments': '{"na', 'name': None}, 'type': None}]}), dict(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': None, 'function': {'arguments': 'me": ', 'name': None}, 'type': None}]}), dict(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': None, 'function': {'arguments': '"jane"', 'name': None}, 'type': None}]}), dict(content='', additional_kwargs ={'tool_calls': [{'index': 0, 'id': None, 'function': {'arguments': ', "a', 'name': None}, 'type': None}]}), dict(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': None, 'function': {'arguments': 'ge": ', 'name': None}, 'type': None}]}), dict(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': None, 'function': {'arguments': '2}', 'name': None}, 'type': None}]}), dict(content='', additional_kwargs={'tool_calls': [{ 'index': 1, 'id': 'call_zXSIylHvc5x3JUAPcHZR5GZI', 'function': { 'arguments': '', 'name': 'person'}, 'type': 'function'}]}), dict( content='', additional_kwargs={'tool_calls': [{'index': 1, 'id': None, 'function': {'arguments': '{"na', 'name': None}, 'type': None }]}), dict(content='', additional_kwargs={'tool_calls': [{'index': 1, 'id': None, 'function': {'arguments': 'me": ', 'name': None}, 'type': None}]}), dict(content='', additional_kwargs={'tool_calls': [{'index': 1, 'id': None, 'function': {'arguments': '"bob",', 'name': None}, 'type': None}]}), dict(content='', additional_kwargs ={'tool_calls': [{'index': 1, 'id': None, 'function': {'arguments': ' "ag', 'name': None}, 'type': None}]}), dict(content='', additional_kwargs={'tool_calls': [{'index': 1, 'id': None, 'function': {'arguments': 'e": 3', 'name': None}, 'type': None}]}), dict(content='', additional_kwargs={'tool_calls': [{'index': 1, 'id': None, 'function': {'arguments': '}', 'name': None}, 'type': None}]}), dict(content='')] final = None for chunk in chunks: msg = AIMessageChunk(**chunk) if final is None: final = msg else: final = final + msg assert final == AIMessageChunk(content='', additional_kwargs={ 'tool_calls': [{'index': 0, 'id': 'call_CwGAsESnXehQEjiAIWzinlva', 'function': {'arguments': '{"name": "jane", "age": 2}', 'name': 'person'}, 'type': 'function'}, {'index': 1, 'id': 'call_zXSIylHvc5x3JUAPcHZR5GZI', 'function': {'arguments': '{"name": "bob", "age": 3}', 'name': 'person'}, 'type': 'function'}]})
null
_import_textgen
from langchain_community.llms.textgen import TextGen return TextGen
def _import_textgen() ->Any: from langchain_community.llms.textgen import TextGen return TextGen
null
test_python_repl_print
program = """ import numpy as np v1 = np.array([1, 2, 3]) v2 = np.array([4, 5, 6]) dot_product = np.dot(v1, v2) print("The dot product is {:d}.".format(dot_product)) """ tool = PythonREPLTool() assert tool.run(program) == 'The dot product is 32.\n'
def test_python_repl_print() ->None: program = """ import numpy as np v1 = np.array([1, 2, 3]) v2 = np.array([4, 5, 6]) dot_product = np.dot(v1, v2) print("The dot product is {:d}.".format(dot_product)) """ tool = PythonREPLTool() assert tool.run(program) == 'The dot product is 32.\n'
null
vectara_query
"""Run a Vectara query Args: query: Text to look up documents similar to. config: VectaraQueryConfig object Returns: A list of k Documents matching the given query If summary is enabled, last document is the summary text with 'summary'=True """ if isinstance(config.mmr_config, dict): config.mmr_config = MMRConfig(**config.mmr_config) if isinstance(config.summary_config, dict): config.summary_config = SummaryConfig(**config.summary_config) data = {'query': [{'query': query, 'start': 0, 'numResults': config. mmr_config.mmr_k if config.mmr_config.is_enabled else config.k, 'contextConfig': {'sentencesBefore': config.n_sentence_context, 'sentencesAfter': config.n_sentence_context}, 'corpusKey': [{ 'customerId': self._vectara_customer_id, 'corpusId': self. _vectara_corpus_id, 'metadataFilter': config.filter, 'lexicalInterpolationConfig': {'lambda': config.lambda_val}}]}]} if config.mmr_config.is_enabled: data['query'][0]['rerankingConfig'] = {'rerankerId': 272725718, 'mmrConfig': {'diversityBias': config.mmr_config.diversity_bias}} if config.summary_config.is_enabled: data['query'][0]['summary'] = [{'maxSummarizedResults': config. summary_config.max_results, 'responseLang': config.summary_config. response_lang}] response = self._session.post(headers=self._get_post_headers(), url= 'https://api.vectara.io/v1/query', data=json.dumps(data), timeout=self. vectara_api_timeout) if response.status_code != 200: logger.error('Query failed %s', f'(code {response.status_code}, reason {response.reason}, details {response.text})' ) return [], '' result = response.json() if config.score_threshold: responses = [r for r in result['responseSet'][0]['response'] if r[ 'score'] > config.score_threshold] else: responses = result['responseSet'][0]['response'] documents = result['responseSet'][0]['document'] metadatas = [] for x in responses: md = {m['name']: m['value'] for m in x['metadata']} doc_num = x['documentIndex'] doc_md = {m['name']: m['value'] for m in documents[doc_num]['metadata']} if 'source' not in doc_md: doc_md['source'] = 'vectara' md.update(doc_md) metadatas.append(md) res = [(Document(page_content=x['text'], metadata=md), x['score']) for x, md in zip(responses, metadatas)] if config.mmr_config.is_enabled: res = res[:config.k] if config.summary_config.is_enabled: summary = result['responseSet'][0]['summary'][0]['text'] res.append((Document(page_content=summary, metadata={'summary': True}), 0.0)) return res
def vectara_query(self, query: str, config: VectaraQueryConfig, **kwargs: Any ) ->List[Tuple[Document, float]]: """Run a Vectara query Args: query: Text to look up documents similar to. config: VectaraQueryConfig object Returns: A list of k Documents matching the given query If summary is enabled, last document is the summary text with 'summary'=True """ if isinstance(config.mmr_config, dict): config.mmr_config = MMRConfig(**config.mmr_config) if isinstance(config.summary_config, dict): config.summary_config = SummaryConfig(**config.summary_config) data = {'query': [{'query': query, 'start': 0, 'numResults': config. mmr_config.mmr_k if config.mmr_config.is_enabled else config.k, 'contextConfig': {'sentencesBefore': config.n_sentence_context, 'sentencesAfter': config.n_sentence_context}, 'corpusKey': [{ 'customerId': self._vectara_customer_id, 'corpusId': self. _vectara_corpus_id, 'metadataFilter': config.filter, 'lexicalInterpolationConfig': {'lambda': config.lambda_val}}]}]} if config.mmr_config.is_enabled: data['query'][0]['rerankingConfig'] = {'rerankerId': 272725718, 'mmrConfig': {'diversityBias': config.mmr_config.diversity_bias}} if config.summary_config.is_enabled: data['query'][0]['summary'] = [{'maxSummarizedResults': config. summary_config.max_results, 'responseLang': config. summary_config.response_lang}] response = self._session.post(headers=self._get_post_headers(), url= 'https://api.vectara.io/v1/query', data=json.dumps(data), timeout= self.vectara_api_timeout) if response.status_code != 200: logger.error('Query failed %s', f'(code {response.status_code}, reason {response.reason}, details {response.text})' ) return [], '' result = response.json() if config.score_threshold: responses = [r for r in result['responseSet'][0]['response'] if r[ 'score'] > config.score_threshold] else: responses = result['responseSet'][0]['response'] documents = result['responseSet'][0]['document'] metadatas = [] for x in responses: md = {m['name']: m['value'] for m in x['metadata']} doc_num = x['documentIndex'] doc_md = {m['name']: m['value'] for m in documents[doc_num]['metadata'] } if 'source' not in doc_md: doc_md['source'] = 'vectara' md.update(doc_md) metadatas.append(md) res = [(Document(page_content=x['text'], metadata=md), x['score']) for x, md in zip(responses, metadatas)] if config.mmr_config.is_enabled: res = res[:config.k] if config.summary_config.is_enabled: summary = result['responseSet'][0]['summary'][0]['text'] res.append((Document(page_content=summary, metadata={'summary': True}), 0.0)) return res
Run a Vectara query Args: query: Text to look up documents similar to. config: VectaraQueryConfig object Returns: A list of k Documents matching the given query If summary is enabled, last document is the summary text with 'summary'=True
query
""" Executes when a search is performed on the store. Args: query_vector: The query vector, or None if not using vector-based query. query: The text query, or None if not using text-based query. k: The total number of results to retrieve. fetch_k: The number of results to fetch initially. vector_query_field: The field containing the vector representations in the index. text_field: The field containing the text data in the index. filter: List of filter clauses to apply to the query. similarity: The similarity strategy to use, or None if not using one. Returns: Dict: The Elasticsearch query body. """
@abstractmethod def query(self, query_vector: Union[List[float], None], query: Union[str, None], *, k: int, fetch_k: int, vector_query_field: str, text_field: str, filter: List[dict], similarity: Union[DistanceStrategy, None]) ->Dict: """ Executes when a search is performed on the store. Args: query_vector: The query vector, or None if not using vector-based query. query: The text query, or None if not using text-based query. k: The total number of results to retrieve. fetch_k: The number of results to fetch initially. vector_query_field: The field containing the vector representations in the index. text_field: The field containing the text data in the index. filter: List of filter clauses to apply to the query. similarity: The similarity strategy to use, or None if not using one. Returns: Dict: The Elasticsearch query body. """
Executes when a search is performed on the store. Args: query_vector: The query vector, or None if not using vector-based query. query: The text query, or None if not using text-based query. k: The total number of results to retrieve. fetch_k: The number of results to fetch initially. vector_query_field: The field containing the vector representations in the index. text_field: The field containing the text data in the index. filter: List of filter clauses to apply to the query. similarity: The similarity strategy to use, or None if not using one. Returns: Dict: The Elasticsearch query body.
print_task_result
print('\x1b[93m\x1b[1m' + """ *****TASK RESULT***** """ + '\x1b[0m\x1b[0m') print(result)
def print_task_result(self, result: str) ->None: print('\x1b[93m\x1b[1m' + '\n*****TASK RESULT*****\n' + '\x1b[0m\x1b[0m') print(result)
null
_iterate_files
"""Iterate over the files in a directory or zip file. Args: path (str): Path to the directory or zip file. Yields: str: The path to each file. """ if os.path.isfile(path): yield path elif os.path.isdir(path): for root, _, files in os.walk(path): for file in files: if file.endswith('.txt'): yield os.path.join(root, file) elif zipfile.is_zipfile(path): with zipfile.ZipFile(path) as zip_file: for file in zip_file.namelist(): if file.endswith('.txt'): yield zip_file.extract(file)
def _iterate_files(self, path: str) ->Iterator[str]: """Iterate over the files in a directory or zip file. Args: path (str): Path to the directory or zip file. Yields: str: The path to each file. """ if os.path.isfile(path): yield path elif os.path.isdir(path): for root, _, files in os.walk(path): for file in files: if file.endswith('.txt'): yield os.path.join(root, file) elif zipfile.is_zipfile(path): with zipfile.ZipFile(path) as zip_file: for file in zip_file.namelist(): if file.endswith('.txt'): yield zip_file.extract(file)
Iterate over the files in a directory or zip file. Args: path (str): Path to the directory or zip file. Yields: str: The path to each file.
on_text
"""Run when text is received. Args: text (str): The received text. Returns: Any: The result of the callback. """ handle_event(self.handlers, 'on_text', None, text, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs)
def on_text(self, text: str, **kwargs: Any) ->Any: """Run when text is received. Args: text (str): The received text. Returns: Any: The result of the callback. """ handle_event(self.handlers, 'on_text', None, text, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs)
Run when text is received. Args: text (str): The received text. Returns: Any: The result of the callback.
_generate
return ChatResult(generations=[ChatGeneration(message=AIMessage(content='', additional_kwargs={'function_call': {'name': 'accept', 'arguments': """{ "draft": "turtles" }"""}}))])
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]= None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->ChatResult: return ChatResult(generations=[ChatGeneration(message=AIMessage(content ='', additional_kwargs={'function_call': {'name': 'accept', 'arguments': """{ "draft": "turtles" }"""}}))])
null
_process_end_trace
if not run.parent_run_id: pass else: span = self._span_map[run.id] span.set_outputs(outputs=run.outputs) span.__api__end__()
def _process_end_trace(self, run: 'Run') ->None: if not run.parent_run_id: pass else: span = self._span_map[run.id] span.set_outputs(outputs=run.outputs) span.__api__end__()
null
output_keys
"""Return output key. :meta private: """ return [self.output_key]
@property def output_keys(self) ->List[str]: """Return output key. :meta private: """ return [self.output_key]
Return output key. :meta private:
_identifying_params
"""Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return {**{'model_kwargs': _model_kwargs}}
@property def _identifying_params(self) ->Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return {**{'model_kwargs': _model_kwargs}}
Get the identifying parameters.
_prepare_request
"""Prepare the request details for the DataForSEO SERP API.""" if self.api_login is None or self.api_password is None: raise ValueError('api_login or api_password is not provided') cred = base64.b64encode(f'{self.api_login}:{self.api_password}'.encode('utf-8') ).decode('utf-8') headers = {'Authorization': f'Basic {cred}', 'Content-Type': 'application/json' } obj = {'keyword': quote(keyword)} obj = {**obj, **self.default_params, **self.params} data = [obj] _url = ( f"https://api.dataforseo.com/v3/serp/{obj['se_name']}/{obj['se_type']}/live/advanced" ) return {'url': _url, 'headers': headers, 'data': data}
def _prepare_request(self, keyword: str) ->dict: """Prepare the request details for the DataForSEO SERP API.""" if self.api_login is None or self.api_password is None: raise ValueError('api_login or api_password is not provided') cred = base64.b64encode(f'{self.api_login}:{self.api_password}'.encode( 'utf-8')).decode('utf-8') headers = {'Authorization': f'Basic {cred}', 'Content-Type': 'application/json'} obj = {'keyword': quote(keyword)} obj = {**obj, **self.default_params, **self.params} data = [obj] _url = ( f"https://api.dataforseo.com/v3/serp/{obj['se_name']}/{obj['se_type']}/live/advanced" ) return {'url': _url, 'headers': headers, 'data': data}
Prepare the request details for the DataForSEO SERP API.
test_web_path_parameter
web_base_loader = WebBaseLoader(web_paths=['https://www.example.com']) assert web_base_loader.web_paths == ['https://www.example.com'] web_base_loader = WebBaseLoader(web_path=['https://www.example.com']) assert web_base_loader.web_paths == ['https://www.example.com'] web_base_loader = WebBaseLoader(web_path='https://www.example.com') assert web_base_loader.web_paths == ['https://www.example.com']
def test_web_path_parameter(self) ->None: web_base_loader = WebBaseLoader(web_paths=['https://www.example.com']) assert web_base_loader.web_paths == ['https://www.example.com'] web_base_loader = WebBaseLoader(web_path=['https://www.example.com']) assert web_base_loader.web_paths == ['https://www.example.com'] web_base_loader = WebBaseLoader(web_path='https://www.example.com') assert web_base_loader.web_paths == ['https://www.example.com']
null
test_init_fail_index_none
with pytest.raises(TypeError) as ex: DatabricksVectorSearch(None) assert 'index must be of type VectorSearchIndex.' in str(ex.value)
@pytest.mark.requires('databricks', 'databricks.vector_search') def test_init_fail_index_none() ->None: with pytest.raises(TypeError) as ex: DatabricksVectorSearch(None) assert 'index must be of type VectorSearchIndex.' in str(ex.value)
null
distance_metric
return request.param
@pytest.fixture(params=['L1', 'L2', 'max', 'cos']) def distance_metric(request: FixtureRequest) ->str: return request.param
null
test_couchbase_import
"""Test that the Couchbase document loader can be imported.""" from langchain_community.document_loaders import CouchbaseLoader
def test_couchbase_import() ->None: """Test that the Couchbase document loader can be imported.""" from langchain_community.document_loaders import CouchbaseLoader
Test that the Couchbase document loader can be imported.
_load_llm_bash_chain
from langchain_experimental.llm_bash.base import LLMBashChain llm_chain = None if 'llm_chain' in config: llm_chain_config = config.pop('llm_chain') llm_chain = load_chain_from_config(llm_chain_config) elif 'llm_chain_path' in config: llm_chain = load_chain(config.pop('llm_chain_path')) elif 'llm' in config: llm_config = config.pop('llm') llm = load_llm_from_config(llm_config) elif 'llm_path' in config: llm = load_llm(config.pop('llm_path')) else: raise ValueError('One of `llm_chain` or `llm_chain_path` must be present.') if 'prompt' in config: prompt_config = config.pop('prompt') prompt = load_prompt_from_config(prompt_config) elif 'prompt_path' in config: prompt = load_prompt(config.pop('prompt_path')) if llm_chain: return LLMBashChain(llm_chain=llm_chain, prompt=prompt, **config) else: return LLMBashChain(llm=llm, prompt=prompt, **config)
def _load_llm_bash_chain(config: dict, **kwargs: Any) ->Any: from langchain_experimental.llm_bash.base import LLMBashChain llm_chain = None if 'llm_chain' in config: llm_chain_config = config.pop('llm_chain') llm_chain = load_chain_from_config(llm_chain_config) elif 'llm_chain_path' in config: llm_chain = load_chain(config.pop('llm_chain_path')) elif 'llm' in config: llm_config = config.pop('llm') llm = load_llm_from_config(llm_config) elif 'llm_path' in config: llm = load_llm(config.pop('llm_path')) else: raise ValueError( 'One of `llm_chain` or `llm_chain_path` must be present.') if 'prompt' in config: prompt_config = config.pop('prompt') prompt = load_prompt_from_config(prompt_config) elif 'prompt_path' in config: prompt = load_prompt(config.pop('prompt_path')) if llm_chain: return LLMBashChain(llm_chain=llm_chain, prompt=prompt, **config) else: return LLMBashChain(llm=llm, prompt=prompt, **config)
null
test_run_llm_all_formats
llm = FakeLLM() _run_llm(llm, inputs, mock.MagicMock())
@pytest.mark.parametrize('inputs', _VALID_PROMPTS + _VALID_MESSAGES) def test_run_llm_all_formats(inputs: Dict[str, Any]) ->None: llm = FakeLLM() _run_llm(llm, inputs, mock.MagicMock())
null
on_llm_new_token
self._llm_token_stream += _convert_newlines(token) self._llm_token_writer_idx = self._container.markdown(self. _llm_token_stream, index=self._llm_token_writer_idx)
def on_llm_new_token(self, token: str, **kwargs: Any) ->None: self._llm_token_stream += _convert_newlines(token) self._llm_token_writer_idx = self._container.markdown(self. _llm_token_stream, index=self._llm_token_writer_idx)
null
output_keys
"""Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys
@property def output_keys(self) ->List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys
Return the output keys. :meta private:
__init__
"""Initialize with DashVector collection.""" try: import dashvector except ImportError: raise ValueError( 'Could not import dashvector python package. Please install it with `pip install dashvector`.' ) if not isinstance(collection, dashvector.Collection): raise ValueError( f'collection should be an instance of dashvector.Collection, bug got {type(collection)}' ) self._collection = collection self._embedding = embedding self._text_field = text_field
def __init__(self, collection: Any, embedding: Embeddings, text_field: str): """Initialize with DashVector collection.""" try: import dashvector except ImportError: raise ValueError( 'Could not import dashvector python package. Please install it with `pip install dashvector`.' ) if not isinstance(collection, dashvector.Collection): raise ValueError( f'collection should be an instance of dashvector.Collection, bug got {type(collection)}' ) self._collection = collection self._embedding = embedding self._text_field = text_field
Initialize with DashVector collection.
test_chat_prompt_template_indexing
message1 = SystemMessage(content='foo') message2 = HumanMessage(content='bar') message3 = HumanMessage(content='baz') template = ChatPromptTemplate.from_messages([message1, message2, message3]) assert template[0] == message1 assert template[1] == message2 slice_template = template[1:] assert slice_template[0] == message2 assert len(slice_template) == 2
def test_chat_prompt_template_indexing() ->None: message1 = SystemMessage(content='foo') message2 = HumanMessage(content='bar') message3 = HumanMessage(content='baz') template = ChatPromptTemplate.from_messages([message1, message2, message3]) assert template[0] == message1 assert template[1] == message2 slice_template = template[1:] assert slice_template[0] == message2 assert len(slice_template) == 2
null
_key_encoder
"""Encode a key.""" return namespace + str(_hash_string_to_uuid(key))
def _key_encoder(key: str, namespace: str) ->str: """Encode a key.""" return namespace + str(_hash_string_to_uuid(key))
Encode a key.
create_vector_extension
try: with Session(self._bind) as session: statement = sqlalchemy.text( 'BEGIN;SELECT pg_advisory_xact_lock(1573678846307946496);CREATE EXTENSION IF NOT EXISTS vector;COMMIT;' ) session.execute(statement) session.commit() except Exception as e: raise Exception(f'Failed to create vector extension: {e}') from e
def create_vector_extension(self) ->None: try: with Session(self._bind) as session: statement = sqlalchemy.text( 'BEGIN;SELECT pg_advisory_xact_lock(1573678846307946496);CREATE EXTENSION IF NOT EXISTS vector;COMMIT;' ) session.execute(statement) session.commit() except Exception as e: raise Exception(f'Failed to create vector extension: {e}') from e
null
test_empty_intermediate_steps
output = format_log_to_str([]) assert output == ''
def test_empty_intermediate_steps() ->None: output = format_log_to_str([]) assert output == ''
null
test_selector_add_example
"""Test NGramOverlapExampleSelector can add an example.""" new_example = {'input': 'Spot plays fetch.', 'output': 'foo4'} selector.add_example(new_example) sentence = 'Spot can run.' output = selector.select_examples({'input': sentence}) assert output == [EXAMPLES[2], EXAMPLES[0]] + [new_example] + [EXAMPLES[1]]
def test_selector_add_example(selector: NGramOverlapExampleSelector) ->None: """Test NGramOverlapExampleSelector can add an example.""" new_example = {'input': 'Spot plays fetch.', 'output': 'foo4'} selector.add_example(new_example) sentence = 'Spot can run.' output = selector.select_examples({'input': sentence}) assert output == [EXAMPLES[2], EXAMPLES[0]] + [new_example] + [EXAMPLES[1]]
Test NGramOverlapExampleSelector can add an example.
embed_documents
"""Call the base embeddings.""" return self.base_embeddings.embed_documents(texts)
def embed_documents(self, texts: List[str]) ->List[List[float]]: """Call the base embeddings.""" return self.base_embeddings.embed_documents(texts)
Call the base embeddings.
similarity_search_with_score_id
embedding_vector = self.embedding.embed_query(query) return self.similarity_search_with_score_id_by_vector(embedding= embedding_vector, k=k, filter=filter)
def similarity_search_with_score_id(self, query: str, k: int=4, filter: Optional[Dict[str, str]]=None) ->List[Tuple[Document, float, str]]: embedding_vector = self.embedding.embed_query(query) return self.similarity_search_with_score_id_by_vector(embedding= embedding_vector, k=k, filter=filter)
null
test_timescalevector_with_index
"""Test deleting functionality.""" texts = ['bar', 'baz'] docs = [Document(page_content=t, metadata={'a': 'b'}) for t in texts] docsearch = TimescaleVector.from_documents(documents=docs, collection_name= 'test_collection', embedding=FakeEmbeddingsWithAdaDimension(), service_url=SERVICE_URL, pre_delete_collection=True) texts = ['foo'] meta = [{'b': 'c'}] docsearch.add_texts(texts, meta) docsearch.create_index() output = docsearch.similarity_search('bar', k=10) assert len(output) == 3 docsearch.drop_index() docsearch.create_index(index_type=TimescaleVector.IndexType. TIMESCALE_VECTOR, max_alpha=1.0, num_neighbors=50) docsearch.drop_index() docsearch.create_index('tsv', max_alpha=1.0, num_neighbors=50) docsearch.drop_index() docsearch.create_index('ivfflat', num_lists=20, num_records=1000) docsearch.drop_index() docsearch.create_index('hnsw', m=16, ef_construction=64)
def test_timescalevector_with_index() ->None: """Test deleting functionality.""" texts = ['bar', 'baz'] docs = [Document(page_content=t, metadata={'a': 'b'}) for t in texts] docsearch = TimescaleVector.from_documents(documents=docs, collection_name='test_collection', embedding= FakeEmbeddingsWithAdaDimension(), service_url=SERVICE_URL, pre_delete_collection=True) texts = ['foo'] meta = [{'b': 'c'}] docsearch.add_texts(texts, meta) docsearch.create_index() output = docsearch.similarity_search('bar', k=10) assert len(output) == 3 docsearch.drop_index() docsearch.create_index(index_type=TimescaleVector.IndexType. TIMESCALE_VECTOR, max_alpha=1.0, num_neighbors=50) docsearch.drop_index() docsearch.create_index('tsv', max_alpha=1.0, num_neighbors=50) docsearch.drop_index() docsearch.create_index('ivfflat', num_lists=20, num_records=1000) docsearch.drop_index() docsearch.create_index('hnsw', m=16, ef_construction=64)
Test deleting functionality.
_import_symblai_nebula
from langchain_community.llms.symblai_nebula import Nebula return Nebula
def _import_symblai_nebula() ->Any: from langchain_community.llms.symblai_nebula import Nebula return Nebula
null
validate_return_direct_tool
"""Validate that tools are compatible with agent.""" agent = values['agent'] tools = values['tools'] if isinstance(agent, BaseMultiActionAgent): for tool in tools: if tool.return_direct: raise ValueError( 'Tools that have `return_direct=True` are not allowed in multi-action agents' ) return values
@root_validator() def validate_return_direct_tool(cls, values: Dict) ->Dict: """Validate that tools are compatible with agent.""" agent = values['agent'] tools = values['tools'] if isinstance(agent, BaseMultiActionAgent): for tool in tools: if tool.return_direct: raise ValueError( 'Tools that have `return_direct=True` are not allowed in multi-action agents' ) return values
Validate that tools are compatible with agent.
get_unique_config_specs
"""Get the unique config specs from a sequence of config specs.""" grouped = groupby(sorted(specs, key=lambda s: (s.id, *(s.dependencies or [] ))), lambda s: s.id) unique: List[ConfigurableFieldSpec] = [] for id, dupes in grouped: first = next(dupes) others = list(dupes) if len(others) == 0: unique.append(first) elif all(o == first for o in others): unique.append(first) else: raise ValueError( f'RunnableSequence contains conflicting config specsfor {id}: {[first] + others}' ) return unique
def get_unique_config_specs(specs: Iterable[ConfigurableFieldSpec]) ->List[ ConfigurableFieldSpec]: """Get the unique config specs from a sequence of config specs.""" grouped = groupby(sorted(specs, key=lambda s: (s.id, *(s.dependencies or []))), lambda s: s.id) unique: List[ConfigurableFieldSpec] = [] for id, dupes in grouped: first = next(dupes) others = list(dupes) if len(others) == 0: unique.append(first) elif all(o == first for o in others): unique.append(first) else: raise ValueError( f'RunnableSequence contains conflicting config specsfor {id}: {[first] + others}' ) return unique
Get the unique config specs from a sequence of config specs.
test_anonymize
"""Test anonymizing a name in a simple sentence""" from langchain_experimental.data_anonymizer import PresidioAnonymizer text = 'Hello, my name is John Doe.' anonymizer = PresidioAnonymizer(analyzed_fields=analyzed_fields) anonymized_text = anonymizer.anonymize(text) assert ('John Doe' in anonymized_text) == should_contain
@pytest.mark.requires('presidio_analyzer', 'presidio_anonymizer', 'faker') @pytest.mark.parametrize('analyzed_fields,should_contain', [(['PERSON'], False), (['PHONE_NUMBER'], True), (None, False)]) def test_anonymize(analyzed_fields: List[str], should_contain: bool) ->None: """Test anonymizing a name in a simple sentence""" from langchain_experimental.data_anonymizer import PresidioAnonymizer text = 'Hello, my name is John Doe.' anonymizer = PresidioAnonymizer(analyzed_fields=analyzed_fields) anonymized_text = anonymizer.anonymize(text) assert ('John Doe' in anonymized_text) == should_contain
Test anonymizing a name in a simple sentence
test_pgvector_with_custom_connection
"""Test construction using a custom connection.""" texts = ['foo', 'bar', 'baz'] engine = sqlalchemy.create_engine(CONNECTION_STRING) with engine.connect() as connection: docsearch = PGVector.from_texts(texts=texts, collection_name= 'test_collection', embedding=FakeEmbeddingsWithAdaDimension(), connection_string=CONNECTION_STRING, pre_delete_collection=True, connection=connection) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
def test_pgvector_with_custom_connection() ->None: """Test construction using a custom connection.""" texts = ['foo', 'bar', 'baz'] engine = sqlalchemy.create_engine(CONNECTION_STRING) with engine.connect() as connection: docsearch = PGVector.from_texts(texts=texts, collection_name= 'test_collection', embedding=FakeEmbeddingsWithAdaDimension(), connection_string=CONNECTION_STRING, pre_delete_collection=True, connection=connection) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
Test construction using a custom connection.
_chat_stream_with_aggregation
final_chunk: Optional[ChatGenerationChunk] = None for stream_resp in self._create_chat_stream(messages, stop, **kwargs): if stream_resp: chunk = _chat_stream_response_to_chat_generation_chunk(stream_resp) if final_chunk is None: final_chunk = chunk else: final_chunk += chunk if run_manager: run_manager.on_llm_new_token(chunk.text, verbose=verbose) if final_chunk is None: raise ValueError('No data received from Ollama stream.') return final_chunk
def _chat_stream_with_aggregation(self, messages: List[BaseMessage], stop: Optional[List[str]]=None, run_manager: Optional[ CallbackManagerForLLMRun]=None, verbose: bool=False, **kwargs: Any ) ->ChatGenerationChunk: final_chunk: Optional[ChatGenerationChunk] = None for stream_resp in self._create_chat_stream(messages, stop, **kwargs): if stream_resp: chunk = _chat_stream_response_to_chat_generation_chunk(stream_resp) if final_chunk is None: final_chunk = chunk else: final_chunk += chunk if run_manager: run_manager.on_llm_new_token(chunk.text, verbose=verbose) if final_chunk is None: raise ValueError('No data received from Ollama stream.') return final_chunk
null
from_existing_index
""" Get instance of an existing Neo4j vector index. This method will return the instance of the store without inserting any new embeddings. Neo4j credentials are required in the form of `url`, `username`, and `password` and optional `database` parameters along with the `index_name` definition. """ if search_type == SearchType.HYBRID and not keyword_index_name: raise ValueError( 'keyword_index name has to be specified when using hybrid search option' ) store = cls(embedding=embedding, index_name=index_name, keyword_index_name= keyword_index_name, search_type=search_type, **kwargs) embedding_dimension = store.retrieve_existing_index() if not embedding_dimension: raise ValueError( 'The specified vector index name does not exist. Make sure to check if you spelled it correctly' ) if not store.embedding_dimension == embedding_dimension: raise ValueError( f"""The provided embedding function and vector index dimensions do not match. Embedding function dimension: {store.embedding_dimension} Vector index dimension: {embedding_dimension}""" ) if search_type == SearchType.HYBRID: fts_node_label = store.retrieve_existing_fts_index() if not fts_node_label: raise ValueError( 'The specified keyword index name does not exist. Make sure to check if you spelled it correctly' ) elif not fts_node_label == store.node_label: raise ValueError( "Vector and keyword index don't index the same node label") return store
@classmethod def from_existing_index(cls: Type[Neo4jVector], embedding: Embeddings, index_name: str, search_type: SearchType=DEFAULT_SEARCH_TYPE, keyword_index_name: Optional[str]=None, **kwargs: Any) ->Neo4jVector: """ Get instance of an existing Neo4j vector index. This method will return the instance of the store without inserting any new embeddings. Neo4j credentials are required in the form of `url`, `username`, and `password` and optional `database` parameters along with the `index_name` definition. """ if search_type == SearchType.HYBRID and not keyword_index_name: raise ValueError( 'keyword_index name has to be specified when using hybrid search option' ) store = cls(embedding=embedding, index_name=index_name, keyword_index_name=keyword_index_name, search_type=search_type, ** kwargs) embedding_dimension = store.retrieve_existing_index() if not embedding_dimension: raise ValueError( 'The specified vector index name does not exist. Make sure to check if you spelled it correctly' ) if not store.embedding_dimension == embedding_dimension: raise ValueError( f"""The provided embedding function and vector index dimensions do not match. Embedding function dimension: {store.embedding_dimension} Vector index dimension: {embedding_dimension}""" ) if search_type == SearchType.HYBRID: fts_node_label = store.retrieve_existing_fts_index() if not fts_node_label: raise ValueError( 'The specified keyword index name does not exist. Make sure to check if you spelled it correctly' ) elif not fts_node_label == store.node_label: raise ValueError( "Vector and keyword index don't index the same node label") return store
Get instance of an existing Neo4j vector index. This method will return the instance of the store without inserting any new embeddings. Neo4j credentials are required in the form of `url`, `username`, and `password` and optional `database` parameters along with the `index_name` definition.