method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
_llm_type
return 'chat_glm'
@property def _llm_type(self) ->str: return 'chat_glm'
null
test_search_call
"""Test that call gives the correct answer from search.""" search = GoogleSerperAPIWrapper() output = search.run("What was Obama's first name?") assert 'Barack Hussein Obama II' in output
def test_search_call() ->None: """Test that call gives the correct answer from search.""" search = GoogleSerperAPIWrapper() output = search.run("What was Obama's first name?") assert 'Barack Hussein Obama II' in output
Test that call gives the correct answer from search.
_get_python_function_required_args
"""Get the required arguments for a Python function.""" spec = inspect.getfullargspec(function) required = spec.args[:-len(spec.defaults)] if spec.defaults else spec.args required += [k for k in spec.kwonlyargs if k not in (spec.kwonlydefaults or {}) ] is_class = type(function) is type if is_class and required[0] == 'self': required = required[1:] return required
def _get_python_function_required_args(function: Callable) ->List[str]: """Get the required arguments for a Python function.""" spec = inspect.getfullargspec(function) required = spec.args[:-len(spec.defaults)] if spec.defaults else spec.args required += [k for k in spec.kwonlyargs if k not in (spec. kwonlydefaults or {})] is_class = type(function) is type if is_class and required[0] == 'self': required = required[1:] return required
Get the required arguments for a Python function.
__init__
"""Initialize the IUGU resource. Args: resource: The name of the resource to fetch. api_token: The IUGU API token to use. """ self.resource = resource api_token = api_token or get_from_env('api_token', 'IUGU_API_TOKEN') self.headers = {'Authorization': f'Bearer {api_token}'}
def __init__(self, resource: str, api_token: Optional[str]=None) ->None: """Initialize the IUGU resource. Args: resource: The name of the resource to fetch. api_token: The IUGU API token to use. """ self.resource = resource api_token = api_token or get_from_env('api_token', 'IUGU_API_TOKEN') self.headers = {'Authorization': f'Bearer {api_token}'}
Initialize the IUGU resource. Args: resource: The name of the resource to fetch. api_token: The IUGU API token to use.
test_with_include_parameter
"""Test end to end construction and include parameter.""" texts = ['hello bagel', 'this is langchain'] docsearch = Bagel.from_texts(cluster_name='testing', texts=texts) output = docsearch.get(include=['embeddings']) assert output['embeddings'] is not None output = docsearch.get() assert output['embeddings'] is None docsearch.delete_cluster()
def test_with_include_parameter() ->None: """Test end to end construction and include parameter.""" texts = ['hello bagel', 'this is langchain'] docsearch = Bagel.from_texts(cluster_name='testing', texts=texts) output = docsearch.get(include=['embeddings']) assert output['embeddings'] is not None output = docsearch.get() assert output['embeddings'] is None docsearch.delete_cluster()
Test end to end construction and include parameter.
mset
"""Set the values for the given keys.""" encoded_pairs = [(self.key_encoder(key), self.value_serializer(value)) for key, value in key_value_pairs] self.store.mset(encoded_pairs)
def mset(self, key_value_pairs: Sequence[Tuple[K, V]]) ->None: """Set the values for the given keys.""" encoded_pairs = [(self.key_encoder(key), self.value_serializer(value)) for key, value in key_value_pairs] self.store.mset(encoded_pairs)
Set the values for the given keys.
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
test_fireworks_streaming
"""Test stream completion.""" generator = llm.stream("Who's the best quarterback in the NFL?") assert isinstance(generator, Generator) for token in generator: assert isinstance(token, str)
@pytest.mark.scheduled def test_fireworks_streaming(llm: Fireworks) ->None: """Test stream completion.""" generator = llm.stream("Who's the best quarterback in the NFL?") assert isinstance(generator, Generator) for token in generator: assert isinstance(token, str)
Test stream completion.
run
"""Run Places search and get k number of places that exists that match.""" search_results = self.google_map_client.places(query)['results'] num_to_return = len(search_results) places = [] if num_to_return == 0: return 'Google Places did not find any places that match the description' num_to_return = num_to_return if self.top_k_results is None else min( num_to_return, self.top_k_results) for i in range(num_to_return): result = search_results[i] details = self.fetch_place_details(result['place_id']) if details is not None: places.append(details) return '\n'.join([f'{i + 1}. {item}' for i, item in enumerate(places)])
def run(self, query: str) ->str: """Run Places search and get k number of places that exists that match.""" search_results = self.google_map_client.places(query)['results'] num_to_return = len(search_results) places = [] if num_to_return == 0: return ( 'Google Places did not find any places that match the description') num_to_return = num_to_return if self.top_k_results is None else min( num_to_return, self.top_k_results) for i in range(num_to_return): result = search_results[i] details = self.fetch_place_details(result['place_id']) if details is not None: places.append(details) return '\n'.join([f'{i + 1}. {item}' for i, item in enumerate(places)])
Run Places search and get k number of places that exists that match.
_auth
"""Authenticates the OneDrive API client Returns: The authenticated Account object. """ try: from O365 import Account, FileSystemTokenBackend except ImportError: raise ImportError( 'O365 package not found, please install it with `pip install o365`') if self.auth_with_token: token_storage = _O365TokenStorage() token_path = token_storage.token_path token_backend = FileSystemTokenBackend(token_path=token_path.parent, token_filename=token_path.name) account = Account(credentials=(self.settings.client_id, self.settings. client_secret.get_secret_value()), scopes=self._scopes, token_backend=token_backend, **{'raise_http_errors': False}) else: token_backend = FileSystemTokenBackend(token_path=Path.home() / '.credentials') account = Account(credentials=(self.settings.client_id, self.settings. client_secret.get_secret_value()), scopes=self._scopes, token_backend=token_backend, **{'raise_http_errors': False}) account.authenticate() return account
def _auth(self) ->Account: """Authenticates the OneDrive API client Returns: The authenticated Account object. """ try: from O365 import Account, FileSystemTokenBackend except ImportError: raise ImportError( 'O365 package not found, please install it with `pip install o365`' ) if self.auth_with_token: token_storage = _O365TokenStorage() token_path = token_storage.token_path token_backend = FileSystemTokenBackend(token_path=token_path.parent, token_filename=token_path.name) account = Account(credentials=(self.settings.client_id, self. settings.client_secret.get_secret_value()), scopes=self._scopes, token_backend=token_backend, **{'raise_http_errors': False}) else: token_backend = FileSystemTokenBackend(token_path=Path.home() / '.credentials') account = Account(credentials=(self.settings.client_id, self. settings.client_secret.get_secret_value()), scopes=self._scopes, token_backend=token_backend, **{'raise_http_errors': False}) account.authenticate() return account
Authenticates the OneDrive API client Returns: The authenticated Account object.
_import_arcee
from langchain_community.llms.arcee import Arcee return Arcee
def _import_arcee() ->Any: from langchain_community.llms.arcee import Arcee return Arcee
null
_load_rapidfuzz
""" Load the RapidFuzz library. Raises: ImportError: If the rapidfuzz library is not installed. Returns: Any: The rapidfuzz.distance module. """ try: import rapidfuzz except ImportError: raise ImportError( 'Please install the rapidfuzz library to use the FuzzyMatchStringEvaluator.Please install it with `pip install rapidfuzz`.' ) return rapidfuzz.distance
def _load_rapidfuzz() ->Any: """ Load the RapidFuzz library. Raises: ImportError: If the rapidfuzz library is not installed. Returns: Any: The rapidfuzz.distance module. """ try: import rapidfuzz except ImportError: raise ImportError( 'Please install the rapidfuzz library to use the FuzzyMatchStringEvaluator.Please install it with `pip install rapidfuzz`.' ) return rapidfuzz.distance
Load the RapidFuzz library. Raises: ImportError: If the rapidfuzz library is not installed. Returns: Any: The rapidfuzz.distance module.
_create_engine
return sqlalchemy.create_engine(url=self.connection_string, **self.engine_args)
def _create_engine(self) ->sqlalchemy.engine.Engine: return sqlalchemy.create_engine(url=self.connection_string, **self. engine_args)
null
test_simple_memory
"""Test SimpleMemory.""" memory = SimpleMemory(memories={'baz': 'foo'}) output = memory.load_memory_variables({}) assert output == {'baz': 'foo'} assert ['baz'] == memory.memory_variables
def test_simple_memory() ->None: """Test SimpleMemory.""" memory = SimpleMemory(memories={'baz': 'foo'}) output = memory.load_memory_variables({}) assert output == {'baz': 'foo'} assert ['baz'] == memory.memory_variables
Test SimpleMemory.
_call
if self.streaming: completion = '' for chunk in self._stream(prompt, stop, run_manager, **kwargs): completion += chunk.text return completion params = self._convert_prompt_msg_params(prompt, **kwargs) response = self.client.chat(params) return response.get('choice', {}).get('message', {}).get('content', '')
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: if self.streaming: completion = '' for chunk in self._stream(prompt, stop, run_manager, **kwargs): completion += chunk.text return completion params = self._convert_prompt_msg_params(prompt, **kwargs) response = self.client.chat(params) return response.get('choice', {}).get('message', {}).get('content', '')
null
from_texts
""" Return VectorStore initialized from texts and embeddings. Postgres connection string is required "Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. """ embeddings = embedding.embed_documents(list(texts)) return cls.__from(texts, embeddings, embedding, metadatas=metadatas, ids= ids, collection_name=collection_name, distance_strategy= distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs)
@classmethod def from_texts(cls: Type[PGVector], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, collection_name: str= _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy =DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]]=None, pre_delete_collection: bool=False, **kwargs: Any) ->PGVector: """ Return VectorStore initialized from texts and embeddings. Postgres connection string is required "Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. """ embeddings = embedding.embed_documents(list(texts)) return cls.__from(texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, distance_strategy= distance_strategy, pre_delete_collection=pre_delete_collection, ** kwargs)
Return VectorStore initialized from texts and embeddings. Postgres connection string is required "Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable.
test_importable_all
for path in glob.glob('../experimental/langchain_experimental/*'): relative_path = Path(path).parts[-1] if relative_path.endswith('.typed'): continue module_name = relative_path.split('.')[0] module = importlib.import_module('langchain_experimental.' + module_name) all_ = getattr(module, '__all__', []) for cls_ in all_: getattr(module, cls_)
def test_importable_all() ->None: for path in glob.glob('../experimental/langchain_experimental/*'): relative_path = Path(path).parts[-1] if relative_path.endswith('.typed'): continue module_name = relative_path.split('.')[0] module = importlib.import_module('langchain_experimental.' + module_name) all_ = getattr(module, '__all__', []) for cls_ in all_: getattr(module, cls_)
null
test_hybrid_score_normalization
"""Test if we can get two 1.0 documents with RRF""" text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts) text_embedding_pairs = list(zip(['foo'], text_embeddings)) docsearch = Neo4jVector.from_embeddings(text_embeddings= text_embedding_pairs, embedding=FakeEmbeddingsWithOsDimension(), url= url, username=username, password=password, pre_delete_collection=True, search_type=SearchType.HYBRID) rrf_query = _get_search_index_query(SearchType.HYBRID).rstrip( 'WITH node, max(score) AS score ORDER BY score DESC LIMIT $k').replace( 'UNION', 'UNION ALL') + 'RETURN node.text AS text, score LIMIT 2' output = docsearch.query(rrf_query, params={'index': 'vector', 'k': 1, 'embedding': FakeEmbeddingsWithOsDimension().embed_query('foo'), 'query': 'foo', 'keyword_index': 'keyword'}) assert output == [{'text': 'foo', 'score': 1.0}, {'text': 'foo', 'score': 1.0}] drop_vector_indexes(docsearch)
def test_hybrid_score_normalization() ->None: """Test if we can get two 1.0 documents with RRF""" text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts) text_embedding_pairs = list(zip(['foo'], text_embeddings)) docsearch = Neo4jVector.from_embeddings(text_embeddings= text_embedding_pairs, embedding=FakeEmbeddingsWithOsDimension(), url=url, username=username, password=password, pre_delete_collection=True, search_type=SearchType.HYBRID) rrf_query = _get_search_index_query(SearchType.HYBRID).rstrip( 'WITH node, max(score) AS score ORDER BY score DESC LIMIT $k').replace( 'UNION', 'UNION ALL') + 'RETURN node.text AS text, score LIMIT 2' output = docsearch.query(rrf_query, params={'index': 'vector', 'k': 1, 'embedding': FakeEmbeddingsWithOsDimension().embed_query('foo'), 'query': 'foo', 'keyword_index': 'keyword'}) assert output == [{'text': 'foo', 'score': 1.0}, {'text': 'foo', 'score': 1.0}] drop_vector_indexes(docsearch)
Test if we can get two 1.0 documents with RRF
_validate_prompt
if DOCUMENTS_KEY not in prompt.input_variables: raise ValueError( f'Prompt must accept {DOCUMENTS_KEY} as an input variable. Received prompt with input variables: {prompt.input_variables}' )
def _validate_prompt(prompt: BasePromptTemplate) ->None: if DOCUMENTS_KEY not in prompt.input_variables: raise ValueError( f'Prompt must accept {DOCUMENTS_KEY} as an input variable. Received prompt with input variables: {prompt.input_variables}' )
null
_import_requests_tool_RequestsGetTool
from langchain_community.tools.requests.tool import RequestsGetTool return RequestsGetTool
def _import_requests_tool_RequestsGetTool() ->Any: from langchain_community.tools.requests.tool import RequestsGetTool return RequestsGetTool
null
delete
"""DELETE the URL and return the text.""" return requests.delete(url, headers=self.headers, auth=self.auth, **kwargs)
def delete(self, url: str, **kwargs: Any) ->requests.Response: """DELETE the URL and return the text.""" return requests.delete(url, headers=self.headers, auth=self.auth, **kwargs)
DELETE the URL and return the text.
_create_action_request
data = self._create_action_payload(instructions, params, preview_only) return Request('POST', self._create_action_url(action_id), json=data)
def _create_action_request(self, action_id: str, instructions: str, params: Optional[Dict]=None, preview_only=False) ->Request: data = self._create_action_payload(instructions, params, preview_only) return Request('POST', self._create_action_url(action_id), json=data)
null
test_large_batches
documents = ['foo bar' for _ in range(0, 251)] model_uscentral1 = VertexAIEmbeddings(location='us-central1') model_asianortheast1 = VertexAIEmbeddings(location='asia-northeast1') model_uscentral1.embed_documents(documents) model_asianortheast1.embed_documents(documents) assert model_uscentral1.instance['batch_size'] >= 250 assert model_asianortheast1.instance['batch_size'] < 50
def test_large_batches() ->None: documents = ['foo bar' for _ in range(0, 251)] model_uscentral1 = VertexAIEmbeddings(location='us-central1') model_asianortheast1 = VertexAIEmbeddings(location='asia-northeast1') model_uscentral1.embed_documents(documents) model_asianortheast1.embed_documents(documents) assert model_uscentral1.instance['batch_size'] >= 250 assert model_asianortheast1.instance['batch_size'] < 50
null
test_character_text_splitter_long
"""Test splitting by character count on long words.""" text = 'foo bar baz a a' splitter = CharacterTextSplitter(separator=' ', chunk_size=3, chunk_overlap=1) output = splitter.split_text(text) expected_output = ['foo', 'bar', 'baz', 'a a'] assert output == expected_output
def test_character_text_splitter_long() ->None: """Test splitting by character count on long words.""" text = 'foo bar baz a a' splitter = CharacterTextSplitter(separator=' ', chunk_size=3, chunk_overlap=1) output = splitter.split_text(text) expected_output = ['foo', 'bar', 'baz', 'a a'] assert output == expected_output
Test splitting by character count on long words.
evaluation_name
return 'Contextual Accuracy'
@property def evaluation_name(self) ->str: return 'Contextual Accuracy'
null
test_amazontextract_loader_failures
two_page_pdf = str(Path(__file__).parent.parent / 'examples/multi-page-forms-sample-2-page.pdf') loader = AmazonTextractPDFLoader(two_page_pdf) with pytest.raises(ValueError): loader.load()
@pytest.mark.skip(reason='Requires AWS credentials to run') def test_amazontextract_loader_failures() ->None: two_page_pdf = str(Path(__file__).parent.parent / 'examples/multi-page-forms-sample-2-page.pdf') loader = AmazonTextractPDFLoader(two_page_pdf) with pytest.raises(ValueError): loader.load()
null
_create_message_dicts
params = self._client_params if stop is not None: if 'stop' in params: raise ValueError('`stop` found in both the input and default params.') params['stop'] = stop message_dicts = [_convert_message_to_dict(m) for m in messages] return message_dicts, params
def _create_message_dicts(self, messages: List[BaseMessage], stop: Optional [List[str]]) ->Tuple[List[Dict[str, Any]], Dict[str, Any]]: params = self._client_params if stop is not None: if 'stop' in params: raise ValueError( '`stop` found in both the input and default params.') params['stop'] = stop message_dicts = [_convert_message_to_dict(m) for m in messages] return message_dicts, params
null
_format_func
self._validate_func(func) return f'${func.value}'
def _format_func(self, func: Union[Operator, Comparator]) ->str: self._validate_func(func) return f'${func.value}'
null
test_aleph_alpha_call
"""Test valid call to cohere.""" llm = AlephAlpha(maximum_tokens=10) output = llm('Say foo:') assert isinstance(output, str)
def test_aleph_alpha_call() ->None: """Test valid call to cohere.""" llm = AlephAlpha(maximum_tokens=10) output = llm('Say foo:') assert isinstance(output, str)
Test valid call to cohere.
index_exists
"""Verifies if the specified index name during instance construction exists on the collection Returns: Returns True on success and False if no such index exists on the collection """ cursor = self._collection.list_indexes() index_name = self._index_name for res in cursor: current_index_name = res.pop('name') if current_index_name == index_name: return True return False
def index_exists(self) ->bool: """Verifies if the specified index name during instance construction exists on the collection Returns: Returns True on success and False if no such index exists on the collection """ cursor = self._collection.list_indexes() index_name = self._index_name for res in cursor: current_index_name = res.pop('name') if current_index_name == index_name: return True return False
Verifies if the specified index name during instance construction exists on the collection Returns: Returns True on success and False if no such index exists on the collection
lazy_parse
"""Lazily parse the blob.""" import pypdf with blob.as_bytes_io() as pdf_file_obj: pdf_reader = pypdf.PdfReader(pdf_file_obj, password=self.password) yield from [Document(page_content=page.extract_text() + self. _extract_images_from_page(page), metadata={'source': blob.source, 'page': page_number}) for page_number, page in enumerate(pdf_reader .pages)]
def lazy_parse(self, blob: Blob) ->Iterator[Document]: """Lazily parse the blob.""" import pypdf with blob.as_bytes_io() as pdf_file_obj: pdf_reader = pypdf.PdfReader(pdf_file_obj, password=self.password) yield from [Document(page_content=page.extract_text() + self. _extract_images_from_page(page), metadata={'source': blob. source, 'page': page_number}) for page_number, page in enumerate(pdf_reader.pages)]
Lazily parse the blob.
test_gpt_router_call_incorrect_model
"""Test invalid modelName""" anthropic_claude = GPTRouterModel(name='model_does_not_exist', provider_name='anthropic') chat = GPTRouter(models_priority_list=[anthropic_claude]) message = HumanMessage(content='Hello World') with pytest.raises(Exception): chat([message])
def test_gpt_router_call_incorrect_model() ->None: """Test invalid modelName""" anthropic_claude = GPTRouterModel(name='model_does_not_exist', provider_name='anthropic') chat = GPTRouter(models_priority_list=[anthropic_claude]) message = HumanMessage(content='Hello World') with pytest.raises(Exception): chat([message])
Test invalid modelName
embeddings
"""Access the query embedding object if available.""" logger.debug( f'{Embeddings.__name__} is not implemented for {self.__class__.__name__}') return None
@property def embeddings(self) ->Optional[Embeddings]: """Access the query embedding object if available.""" logger.debug( f'{Embeddings.__name__} is not implemented for {self.__class__.__name__}' ) return None
Access the query embedding object if available.
_call
"""Call to Banana endpoint.""" try: from banana_dev import Client except ImportError: raise ImportError( 'Could not import banana-dev python package. Please install it with `pip install banana-dev`.' ) params = self.model_kwargs or {} params = {**params, **kwargs} api_key = self.banana_api_key model_key = self.model_key model_url_slug = self.model_url_slug model_inputs = {'prompt': prompt, **params} model = Client(api_key=api_key, model_key=model_key, url= f'https://{model_url_slug}.run.banana.dev') response, meta = model.call('/', model_inputs) try: text = response['outputs'] except (KeyError, TypeError): raise ValueError( """Response should be of schema: {'outputs': 'text'}. To fix this: - fork the source repo of the Banana model - modify app.py to return the above schema - deploy that as a custom repo""" ) if stop is not None: text = enforce_stop_tokens(text, stop) return text
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: """Call to Banana endpoint.""" try: from banana_dev import Client except ImportError: raise ImportError( 'Could not import banana-dev python package. Please install it with `pip install banana-dev`.' ) params = self.model_kwargs or {} params = {**params, **kwargs} api_key = self.banana_api_key model_key = self.model_key model_url_slug = self.model_url_slug model_inputs = {'prompt': prompt, **params} model = Client(api_key=api_key, model_key=model_key, url= f'https://{model_url_slug}.run.banana.dev') response, meta = model.call('/', model_inputs) try: text = response['outputs'] except (KeyError, TypeError): raise ValueError( """Response should be of schema: {'outputs': 'text'}. To fix this: - fork the source repo of the Banana model - modify app.py to return the above schema - deploy that as a custom repo""" ) if stop is not None: text = enforce_stop_tokens(text, stop) return text
Call to Banana endpoint.
_stream
self._load_model(self.model_name) invocation_params = self._get_invocation_params(**kwargs, prompt=[[prompt]]) stop_words = stop if stop is not None else self.stop inputs = self._generate_inputs(stream=True, **invocation_params) outputs = self._generate_outputs() result_queue = self._invoke_triton(self.model_name, inputs, outputs, stop_words ) for token in result_queue: yield GenerationChunk(text=token) if run_manager: run_manager.on_llm_new_token(token) self.client.stop_stream()
def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[ GenerationChunk]: self._load_model(self.model_name) invocation_params = self._get_invocation_params(**kwargs, prompt=[[prompt]] ) stop_words = stop if stop is not None else self.stop inputs = self._generate_inputs(stream=True, **invocation_params) outputs = self._generate_outputs() result_queue = self._invoke_triton(self.model_name, inputs, outputs, stop_words) for token in result_queue: yield GenerationChunk(text=token) if run_manager: run_manager.on_llm_new_token(token) self.client.stop_stream()
null
_run
try: result = self.client.chat_postMessage(channel=channel, text=message) output = 'Message sent: ' + str(result) return output except Exception as e: return 'Error creating conversation: {}'.format(e)
def _run(self, message: str, channel: str, run_manager: Optional[ CallbackManagerForToolRun]=None) ->str: try: result = self.client.chat_postMessage(channel=channel, text=message) output = 'Message sent: ' + str(result) return output except Exception as e: return 'Error creating conversation: {}'.format(e)
null
_stream_response_to_generation_chunk
"""Convert a stream response to a generation chunk.""" if not stream_response['results']: return GenerationChunk(text='') return GenerationChunk(text=stream_response['results'][0]['generated_text'], generation_info=dict(finish_reason=stream_response['results'][0].get( 'stop_reason', None), llm_output={'generated_token_count': stream_response['results'][0].get('generated_token_count', None), 'model_id': self.model_id, 'deployment_id': self.deployment_id}))
def _stream_response_to_generation_chunk(self, stream_response: Dict[str, Any] ) ->GenerationChunk: """Convert a stream response to a generation chunk.""" if not stream_response['results']: return GenerationChunk(text='') return GenerationChunk(text=stream_response['results'][0][ 'generated_text'], generation_info=dict(finish_reason= stream_response['results'][0].get('stop_reason', None), llm_output= {'generated_token_count': stream_response['results'][0].get( 'generated_token_count', None), 'model_id': self.model_id, 'deployment_id': self.deployment_id}))
Convert a stream response to a generation chunk.
__getattr__
if name == 'AI21': return _import_ai21() elif name == 'AlephAlpha': return _import_aleph_alpha() elif name == 'AmazonAPIGateway': return _import_amazon_api_gateway() elif name == 'Anthropic': return _import_anthropic() elif name == 'Anyscale': return _import_anyscale() elif name == 'Aphrodite': return _import_aphrodite() elif name == 'Arcee': return _import_arcee() elif name == 'Aviary': return _import_aviary() elif name == 'AzureMLOnlineEndpoint': return _import_azureml_endpoint() elif name == 'QianfanLLMEndpoint': return _import_baidu_qianfan_endpoint() elif name == 'Banana': return _import_bananadev() elif name == 'Baseten': return _import_baseten() elif name == 'Beam': return _import_beam() elif name == 'Bedrock': return _import_bedrock() elif name == 'NIBittensorLLM': return _import_bittensor() elif name == 'CerebriumAI': return _import_cerebriumai() elif name == 'ChatGLM': return _import_chatglm() elif name == 'Clarifai': return _import_clarifai() elif name == 'Cohere': return _import_cohere() elif name == 'CTransformers': return _import_ctransformers() elif name == 'CTranslate2': return _import_ctranslate2() elif name == 'Databricks': return _import_databricks() elif name == 'DeepInfra': return _import_deepinfra() elif name == 'DeepSparse': return _import_deepsparse() elif name == 'EdenAI': return _import_edenai() elif name == 'FakeListLLM': return _import_fake() elif name == 'Fireworks': return _import_fireworks() elif name == 'ForefrontAI': return _import_forefrontai() elif name == 'GigaChat': return _import_gigachat() elif name == 'GooglePalm': return _import_google_palm() elif name == 'GooseAI': return _import_gooseai() elif name == 'GPT4All': return _import_gpt4all() elif name == 'GradientLLM': return _import_gradient_ai() elif name == 'HuggingFaceEndpoint': return _import_huggingface_endpoint() elif name == 'HuggingFaceHub': return _import_huggingface_hub() elif name == 'HuggingFacePipeline': return _import_huggingface_pipeline() elif name == 'HuggingFaceTextGenInference': return _import_huggingface_text_gen_inference() elif name == 'HumanInputLLM': return _import_human() elif name == 'JavelinAIGateway': return _import_javelin_ai_gateway() elif name == 'KoboldApiLLM': return _import_koboldai() elif name == 'LlamaCpp': return _import_llamacpp() elif name == 'ManifestWrapper': return _import_manifest() elif name == 'Minimax': return _import_minimax() elif name == 'Mlflow': return _import_mlflow() elif name == 'MlflowAIGateway': return _import_mlflow_ai_gateway() elif name == 'Modal': return _import_modal() elif name == 'MosaicML': return _import_mosaicml() elif name == 'NLPCloud': return _import_nlpcloud() elif name == 'OCIModelDeploymentTGI': return _import_oci_md_tgi() elif name == 'OCIModelDeploymentVLLM': return _import_oci_md_vllm() elif name == 'OctoAIEndpoint': return _import_octoai_endpoint() elif name == 'Ollama': return _import_ollama() elif name == 'OpaquePrompts': return _import_opaqueprompts() elif name == 'AzureOpenAI': return _import_azure_openai() elif name == 'OpenAI': return _import_openai() elif name == 'OpenAIChat': return _import_openai_chat() elif name == 'OpenLLM': return _import_openllm() elif name == 'OpenLM': return _import_openlm() elif name == 'PaiEasEndpoint': return _import_pai_eas_endpoint() elif name == 'Petals': return _import_petals() elif name == 'PipelineAI': return _import_pipelineai() elif name == 'Predibase': return _import_predibase() elif name == 'PredictionGuard': return _import_predictionguard() elif name == 'PromptLayerOpenAI': return _import_promptlayer() elif name == 'PromptLayerOpenAIChat': return _import_promptlayer_chat() elif name == 'Replicate': return _import_replicate() elif name == 'RWKV': return _import_rwkv() elif name == 'SagemakerEndpoint': return _import_sagemaker_endpoint() elif name == 'SelfHostedPipeline': return _import_self_hosted() elif name == 'SelfHostedHuggingFaceLLM': return _import_self_hosted_hugging_face() elif name == 'StochasticAI': return _import_stochasticai() elif name == 'Nebula': return _import_symblai_nebula() elif name == 'TextGen': return _import_textgen() elif name == 'TitanTakeoff': return _import_titan_takeoff() elif name == 'TitanTakeoffPro': return _import_titan_takeoff_pro() elif name == 'Together': return _import_together() elif name == 'Tongyi': return _import_tongyi() elif name == 'VertexAI': return _import_vertex() elif name == 'VertexAIModelGarden': return _import_vertex_model_garden() elif name == 'VLLM': return _import_vllm() elif name == 'VLLMOpenAI': return _import_vllm_openai() elif name == 'WatsonxLLM': return _import_watsonxllm() elif name == 'Writer': return _import_writer() elif name == 'Xinference': return _import_xinference() elif name == 'YandexGPT': return _import_yandex_gpt() elif name == 'VolcEngineMaasLLM': return _import_volcengine_maas() elif name == 'type_to_cls_dict': type_to_cls_dict: Dict[str, Type[BaseLLM]] = {k: v() for k, v in get_type_to_cls_dict().items()} return type_to_cls_dict else: raise AttributeError(f'Could not find: {name}')
def __getattr__(name: str) ->Any: if name == 'AI21': return _import_ai21() elif name == 'AlephAlpha': return _import_aleph_alpha() elif name == 'AmazonAPIGateway': return _import_amazon_api_gateway() elif name == 'Anthropic': return _import_anthropic() elif name == 'Anyscale': return _import_anyscale() elif name == 'Aphrodite': return _import_aphrodite() elif name == 'Arcee': return _import_arcee() elif name == 'Aviary': return _import_aviary() elif name == 'AzureMLOnlineEndpoint': return _import_azureml_endpoint() elif name == 'QianfanLLMEndpoint': return _import_baidu_qianfan_endpoint() elif name == 'Banana': return _import_bananadev() elif name == 'Baseten': return _import_baseten() elif name == 'Beam': return _import_beam() elif name == 'Bedrock': return _import_bedrock() elif name == 'NIBittensorLLM': return _import_bittensor() elif name == 'CerebriumAI': return _import_cerebriumai() elif name == 'ChatGLM': return _import_chatglm() elif name == 'Clarifai': return _import_clarifai() elif name == 'Cohere': return _import_cohere() elif name == 'CTransformers': return _import_ctransformers() elif name == 'CTranslate2': return _import_ctranslate2() elif name == 'Databricks': return _import_databricks() elif name == 'DeepInfra': return _import_deepinfra() elif name == 'DeepSparse': return _import_deepsparse() elif name == 'EdenAI': return _import_edenai() elif name == 'FakeListLLM': return _import_fake() elif name == 'Fireworks': return _import_fireworks() elif name == 'ForefrontAI': return _import_forefrontai() elif name == 'GigaChat': return _import_gigachat() elif name == 'GooglePalm': return _import_google_palm() elif name == 'GooseAI': return _import_gooseai() elif name == 'GPT4All': return _import_gpt4all() elif name == 'GradientLLM': return _import_gradient_ai() elif name == 'HuggingFaceEndpoint': return _import_huggingface_endpoint() elif name == 'HuggingFaceHub': return _import_huggingface_hub() elif name == 'HuggingFacePipeline': return _import_huggingface_pipeline() elif name == 'HuggingFaceTextGenInference': return _import_huggingface_text_gen_inference() elif name == 'HumanInputLLM': return _import_human() elif name == 'JavelinAIGateway': return _import_javelin_ai_gateway() elif name == 'KoboldApiLLM': return _import_koboldai() elif name == 'LlamaCpp': return _import_llamacpp() elif name == 'ManifestWrapper': return _import_manifest() elif name == 'Minimax': return _import_minimax() elif name == 'Mlflow': return _import_mlflow() elif name == 'MlflowAIGateway': return _import_mlflow_ai_gateway() elif name == 'Modal': return _import_modal() elif name == 'MosaicML': return _import_mosaicml() elif name == 'NLPCloud': return _import_nlpcloud() elif name == 'OCIModelDeploymentTGI': return _import_oci_md_tgi() elif name == 'OCIModelDeploymentVLLM': return _import_oci_md_vllm() elif name == 'OctoAIEndpoint': return _import_octoai_endpoint() elif name == 'Ollama': return _import_ollama() elif name == 'OpaquePrompts': return _import_opaqueprompts() elif name == 'AzureOpenAI': return _import_azure_openai() elif name == 'OpenAI': return _import_openai() elif name == 'OpenAIChat': return _import_openai_chat() elif name == 'OpenLLM': return _import_openllm() elif name == 'OpenLM': return _import_openlm() elif name == 'PaiEasEndpoint': return _import_pai_eas_endpoint() elif name == 'Petals': return _import_petals() elif name == 'PipelineAI': return _import_pipelineai() elif name == 'Predibase': return _import_predibase() elif name == 'PredictionGuard': return _import_predictionguard() elif name == 'PromptLayerOpenAI': return _import_promptlayer() elif name == 'PromptLayerOpenAIChat': return _import_promptlayer_chat() elif name == 'Replicate': return _import_replicate() elif name == 'RWKV': return _import_rwkv() elif name == 'SagemakerEndpoint': return _import_sagemaker_endpoint() elif name == 'SelfHostedPipeline': return _import_self_hosted() elif name == 'SelfHostedHuggingFaceLLM': return _import_self_hosted_hugging_face() elif name == 'StochasticAI': return _import_stochasticai() elif name == 'Nebula': return _import_symblai_nebula() elif name == 'TextGen': return _import_textgen() elif name == 'TitanTakeoff': return _import_titan_takeoff() elif name == 'TitanTakeoffPro': return _import_titan_takeoff_pro() elif name == 'Together': return _import_together() elif name == 'Tongyi': return _import_tongyi() elif name == 'VertexAI': return _import_vertex() elif name == 'VertexAIModelGarden': return _import_vertex_model_garden() elif name == 'VLLM': return _import_vllm() elif name == 'VLLMOpenAI': return _import_vllm_openai() elif name == 'WatsonxLLM': return _import_watsonxllm() elif name == 'Writer': return _import_writer() elif name == 'Xinference': return _import_xinference() elif name == 'YandexGPT': return _import_yandex_gpt() elif name == 'VolcEngineMaasLLM': return _import_volcengine_maas() elif name == 'type_to_cls_dict': type_to_cls_dict: Dict[str, Type[BaseLLM]] = {k: v() for k, v in get_type_to_cls_dict().items()} return type_to_cls_dict else: raise AttributeError(f'Could not find: {name}')
null
__validate_distance_strategy
if distance_strategy not in [DistanceStrategy.COSINE, DistanceStrategy. MAX_INNER_PRODUCT, DistanceStrategy.MAX_INNER_PRODUCT]: raise ValueError(f'Distance strategy {distance_strategy} not implemented.')
@staticmethod def __validate_distance_strategy(distance_strategy: DistanceStrategy) ->None: if distance_strategy not in [DistanceStrategy.COSINE, DistanceStrategy. MAX_INNER_PRODUCT, DistanceStrategy.MAX_INNER_PRODUCT]: raise ValueError( f'Distance strategy {distance_strategy} not implemented.')
null
query
if similarity is DistanceStrategy.COSINE: similarityAlgo = ( f"cosineSimilarity(params.query_vector, '{vector_query_field}') + 1.0") elif similarity is DistanceStrategy.EUCLIDEAN_DISTANCE: similarityAlgo = ( f"1 / (1 + l2norm(params.query_vector, '{vector_query_field}'))") elif similarity is DistanceStrategy.DOT_PRODUCT: similarityAlgo = f""" double value = dotProduct(params.query_vector, '{vector_query_field}'); return sigmoid(1, Math.E, -value); """ else: raise ValueError(f'Similarity {similarity} not supported.') queryBool: Dict = {'match_all': {}} if filter: queryBool = {'bool': {'filter': filter}} return {'query': {'script_score': {'query': queryBool, 'script': {'source': similarityAlgo, 'params': {'query_vector': query_vector}}}}}
def query(self, query_vector: Union[List[float], None], query: Union[str, None], k: int, fetch_k: int, vector_query_field: str, text_field: str, filter: Union[List[dict], None], similarity: Union[DistanceStrategy, None] ) ->Dict: if similarity is DistanceStrategy.COSINE: similarityAlgo = ( f"cosineSimilarity(params.query_vector, '{vector_query_field}') + 1.0" ) elif similarity is DistanceStrategy.EUCLIDEAN_DISTANCE: similarityAlgo = ( f"1 / (1 + l2norm(params.query_vector, '{vector_query_field}'))") elif similarity is DistanceStrategy.DOT_PRODUCT: similarityAlgo = f""" double value = dotProduct(params.query_vector, '{vector_query_field}'); return sigmoid(1, Math.E, -value); """ else: raise ValueError(f'Similarity {similarity} not supported.') queryBool: Dict = {'match_all': {}} if filter: queryBool = {'bool': {'filter': filter}} return {'query': {'script_score': {'query': queryBool, 'script': { 'source': similarityAlgo, 'params': {'query_vector': query_vector}}}}}
null
plan
"""Given input, decide what to do."""
@abstractmethod def plan(self, inputs: dict, callbacks: Callbacks=None, **kwargs: Any) ->Plan: """Given input, decide what to do."""
Given input, decide what to do.
__init__
super().__init__(**kwargs) self.tot_controller.c = self.c
def __init__(self, **kwargs: Any): super().__init__(**kwargs) self.tot_controller.c = self.c
null
_embed_query
"""Embed query text. Used to provide backward compatibility with `embedding_function` argument. Args: query: Query text. Returns: List of floats representing the query embedding. """ if self.embeddings is not None: embedding = self.embeddings.embed_query(query) elif self._embeddings_function is not None: embedding = self._embeddings_function(query) else: raise ValueError('Neither of embeddings or embedding_function is set') return embedding.tolist() if hasattr(embedding, 'tolist') else embedding
def _embed_query(self, query: str) ->List[float]: """Embed query text. Used to provide backward compatibility with `embedding_function` argument. Args: query: Query text. Returns: List of floats representing the query embedding. """ if self.embeddings is not None: embedding = self.embeddings.embed_query(query) elif self._embeddings_function is not None: embedding = self._embeddings_function(query) else: raise ValueError('Neither of embeddings or embedding_function is set') return embedding.tolist() if hasattr(embedding, 'tolist') else embedding
Embed query text. Used to provide backward compatibility with `embedding_function` argument. Args: query: Query text. Returns: List of floats representing the query embedding.
test_json_schema_evaluator_evaluation_name
assert json_schema_evaluator.evaluation_name == 'json_schema_validation'
@pytest.mark.requires('jsonschema') def test_json_schema_evaluator_evaluation_name(json_schema_evaluator: JsonSchemaEvaluator) ->None: assert json_schema_evaluator.evaluation_name == 'json_schema_validation'
null
test_multiple_messages
"""Tests multiple messages works.""" chat = VolcEngineMaasChat() message = HumanMessage(content='Hi, how are you?') response = chat.generate([[message], [message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generations in response.generations: assert len(generations) == 1 for generation in generations: assert isinstance(generation, ChatGeneration) assert isinstance(generation.text, str) assert generation.text == generation.message.content
def test_multiple_messages() ->None: """Tests multiple messages works.""" chat = VolcEngineMaasChat() message = HumanMessage(content='Hi, how are you?') response = chat.generate([[message], [message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generations in response.generations: assert len(generations) == 1 for generation in generations: assert isinstance(generation, ChatGeneration) assert isinstance(generation.text, str) assert generation.text == generation.message.content
Tests multiple messages works.
_get_tracer_project
run_tree = get_run_tree_context() return getattr(run_tree, 'session_name', getattr(tracing_v2_callback_var. get(), 'project', str(ls_utils.get_tracer_project())))
def _get_tracer_project() ->str: run_tree = get_run_tree_context() return getattr(run_tree, 'session_name', getattr( tracing_v2_callback_var.get(), 'project', str(ls_utils. get_tracer_project())))
null
test_custom_index_add_documents
"""This test checks the construction of a custom ElasticSearch index using the 'add_documents'.""" from elasticsearch import Elasticsearch index_name = f'custom_index_{uuid.uuid4().hex}' elastic_vector_search = ElasticVectorSearch(embedding=embedding_openai, elasticsearch_url=elasticsearch_url, index_name=index_name) es = Elasticsearch(hosts=elasticsearch_url) elastic_vector_search.add_documents(documents) index_names = es.indices.get(index='_all').keys() assert index_name in index_names search_result = elastic_vector_search.similarity_search('sharks') assert len(search_result) != 0
@pytest.mark.vcr(ignore_localhost=True) def test_custom_index_add_documents(self, documents: List[Document], embedding_openai: OpenAIEmbeddings, elasticsearch_url: str) ->None: """This test checks the construction of a custom ElasticSearch index using the 'add_documents'.""" from elasticsearch import Elasticsearch index_name = f'custom_index_{uuid.uuid4().hex}' elastic_vector_search = ElasticVectorSearch(embedding=embedding_openai, elasticsearch_url=elasticsearch_url, index_name=index_name) es = Elasticsearch(hosts=elasticsearch_url) elastic_vector_search.add_documents(documents) index_names = es.indices.get(index='_all').keys() assert index_name in index_names search_result = elastic_vector_search.similarity_search('sharks') assert len(search_result) != 0
This test checks the construction of a custom ElasticSearch index using the 'add_documents'.
construct_instance
try: import qdrant_client except ImportError: raise ValueError( 'Could not import qdrant-client python package. Please install it with `pip install qdrant-client`.' ) from grpc import RpcError from qdrant_client.http import models as rest from qdrant_client.http.exceptions import UnexpectedResponse partial_embeddings = embedding.embed_documents(texts[:1]) vector_size = len(partial_embeddings[0]) collection_name = collection_name or uuid.uuid4().hex distance_func = distance_func.upper() client, async_client = cls._generate_clients(location=location, url=url, port=port, grpc_port=grpc_port, prefer_grpc=prefer_grpc, https=https, api_key=api_key, prefix=prefix, timeout=timeout, host=host, path=path, **kwargs) try: if force_recreate: raise ValueError collection_info = client.get_collection(collection_name=collection_name) current_vector_config = collection_info.config.params.vectors if isinstance(current_vector_config, dict) and vector_name is not None: if vector_name not in current_vector_config: raise QdrantException( f"Existing Qdrant collection {collection_name} does not contain vector named {vector_name}. Did you mean one of the existing vectors: {', '.join(current_vector_config.keys())}? If you want to recreate the collection, set `force_recreate` parameter to `True`." ) current_vector_config = current_vector_config.get(vector_name) elif isinstance(current_vector_config, dict) and vector_name is None: raise QdrantException( f"Existing Qdrant collection {collection_name} uses named vectors. If you want to reuse it, please set `vector_name` to any of the existing named vectors: {', '.join(current_vector_config.keys())}.If you want to recreate the collection, set `force_recreate` parameter to `True`." ) elif not isinstance(current_vector_config, dict ) and vector_name is not None: raise QdrantException( f"Existing Qdrant collection {collection_name} doesn't use named vectors. If you want to reuse it, please set `vector_name` to `None`. If you want to recreate the collection, set `force_recreate` parameter to `True`." ) if current_vector_config.size != vector_size: raise QdrantException( f'Existing Qdrant collection is configured for vectors with {current_vector_config.size} dimensions. Selected embeddings are {vector_size}-dimensional. If you want to recreate the collection, set `force_recreate` parameter to `True`.' ) current_distance_func = current_vector_config.distance.name.upper() if current_distance_func != distance_func: raise QdrantException( f'Existing Qdrant collection is configured for {current_distance_func} similarity, but requested {distance_func}. Please set `distance_func` parameter to `{current_distance_func}` if you want to reuse it. If you want to recreate the collection, set `force_recreate` parameter to `True`.' ) except (UnexpectedResponse, RpcError, ValueError): vectors_config = rest.VectorParams(size=vector_size, distance=rest. Distance[distance_func], on_disk=on_disk) if vector_name is not None: vectors_config = {vector_name: vectors_config} client.recreate_collection(collection_name=collection_name, vectors_config=vectors_config, shard_number=shard_number, replication_factor=replication_factor, write_consistency_factor= write_consistency_factor, on_disk_payload=on_disk_payload, hnsw_config=hnsw_config, optimizers_config=optimizers_config, wal_config=wal_config, quantization_config=quantization_config, init_from=init_from, timeout=timeout) qdrant = cls(client=client, collection_name=collection_name, embeddings= embedding, content_payload_key=content_payload_key, metadata_payload_key=metadata_payload_key, distance_strategy= distance_func, vector_name=vector_name, async_client=async_client) return qdrant
@classmethod def construct_instance(cls: Type[Qdrant], texts: List[str], embedding: Embeddings, location: Optional[str]=None, url: Optional[str]=None, port: Optional[int]=6333, grpc_port: int=6334, prefer_grpc: bool=False, https: Optional[bool]=None, api_key: Optional[str]=None, prefix: Optional[str] =None, timeout: Optional[float]=None, host: Optional[str]=None, path: Optional[str]=None, collection_name: Optional[str]=None, distance_func: str='Cosine', content_payload_key: str=CONTENT_KEY, metadata_payload_key: str=METADATA_KEY, vector_name: Optional[str]= VECTOR_NAME, shard_number: Optional[int]=None, replication_factor: Optional[int]=None, write_consistency_factor: Optional[int]=None, on_disk_payload: Optional[bool]=None, hnsw_config: Optional[ common_types.HnswConfigDiff]=None, optimizers_config: Optional[ common_types.OptimizersConfigDiff]=None, wal_config: Optional[ common_types.WalConfigDiff]=None, quantization_config: Optional[ common_types.QuantizationConfig]=None, init_from: Optional[common_types .InitFrom]=None, on_disk: Optional[bool]=None, force_recreate: bool= False, **kwargs: Any) ->Qdrant: try: import qdrant_client except ImportError: raise ValueError( 'Could not import qdrant-client python package. Please install it with `pip install qdrant-client`.' ) from grpc import RpcError from qdrant_client.http import models as rest from qdrant_client.http.exceptions import UnexpectedResponse partial_embeddings = embedding.embed_documents(texts[:1]) vector_size = len(partial_embeddings[0]) collection_name = collection_name or uuid.uuid4().hex distance_func = distance_func.upper() client, async_client = cls._generate_clients(location=location, url=url, port=port, grpc_port=grpc_port, prefer_grpc=prefer_grpc, https= https, api_key=api_key, prefix=prefix, timeout=timeout, host=host, path=path, **kwargs) try: if force_recreate: raise ValueError collection_info = client.get_collection(collection_name=collection_name ) current_vector_config = collection_info.config.params.vectors if isinstance(current_vector_config, dict) and vector_name is not None: if vector_name not in current_vector_config: raise QdrantException( f"Existing Qdrant collection {collection_name} does not contain vector named {vector_name}. Did you mean one of the existing vectors: {', '.join(current_vector_config.keys())}? If you want to recreate the collection, set `force_recreate` parameter to `True`." ) current_vector_config = current_vector_config.get(vector_name) elif isinstance(current_vector_config, dict) and vector_name is None: raise QdrantException( f"Existing Qdrant collection {collection_name} uses named vectors. If you want to reuse it, please set `vector_name` to any of the existing named vectors: {', '.join(current_vector_config.keys())}.If you want to recreate the collection, set `force_recreate` parameter to `True`." ) elif not isinstance(current_vector_config, dict ) and vector_name is not None: raise QdrantException( f"Existing Qdrant collection {collection_name} doesn't use named vectors. If you want to reuse it, please set `vector_name` to `None`. If you want to recreate the collection, set `force_recreate` parameter to `True`." ) if current_vector_config.size != vector_size: raise QdrantException( f'Existing Qdrant collection is configured for vectors with {current_vector_config.size} dimensions. Selected embeddings are {vector_size}-dimensional. If you want to recreate the collection, set `force_recreate` parameter to `True`.' ) current_distance_func = current_vector_config.distance.name.upper() if current_distance_func != distance_func: raise QdrantException( f'Existing Qdrant collection is configured for {current_distance_func} similarity, but requested {distance_func}. Please set `distance_func` parameter to `{current_distance_func}` if you want to reuse it. If you want to recreate the collection, set `force_recreate` parameter to `True`.' ) except (UnexpectedResponse, RpcError, ValueError): vectors_config = rest.VectorParams(size=vector_size, distance=rest. Distance[distance_func], on_disk=on_disk) if vector_name is not None: vectors_config = {vector_name: vectors_config} client.recreate_collection(collection_name=collection_name, vectors_config=vectors_config, shard_number=shard_number, replication_factor=replication_factor, write_consistency_factor =write_consistency_factor, on_disk_payload=on_disk_payload, hnsw_config=hnsw_config, optimizers_config=optimizers_config, wal_config=wal_config, quantization_config=quantization_config, init_from=init_from, timeout=timeout) qdrant = cls(client=client, collection_name=collection_name, embeddings =embedding, content_payload_key=content_payload_key, metadata_payload_key=metadata_payload_key, distance_strategy= distance_func, vector_name=vector_name, async_client=async_client) return qdrant
null
exception
"""Add an Exception element to the container and return its index.""" kwargs = {'exception': exception} new_dg = self._get_dg(index).exception(**kwargs) record = ChildRecord(ChildType.EXCEPTION, kwargs, new_dg) return self._add_record(record, index)
def exception(self, exception: BaseException, *, index: Optional[int]=None ) ->int: """Add an Exception element to the container and return its index.""" kwargs = {'exception': exception} new_dg = self._get_dg(index).exception(**kwargs) record = ChildRecord(ChildType.EXCEPTION, kwargs, new_dg) return self._add_record(record, index)
Add an Exception element to the container and return its index.
_call
if self.sequential_responses: return self._get_next_response_in_sequence if self.queries is not None: return self.queries[prompt] if stop is None: return 'foo' else: return 'bar'
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: if self.sequential_responses: return self._get_next_response_in_sequence if self.queries is not None: return self.queries[prompt] if stop is None: return 'foo' else: return 'bar'
null
__init__
super().__init__(mapper=mapper, **kwargs)
def __init__(self, mapper: RunnableParallel[Dict[str, Any]], **kwargs: Any ) ->None: super().__init__(mapper=mapper, **kwargs)
null
test_maximal_marginal_relevance
query_embedding = np.array([1, 0]) embedding_list = [[3 ** 0.5, 1], [1, 1], [1, 2 + 3 ** 0.5]] expected = [0, 2] actual = maximal_marginal_relevance(query_embedding, embedding_list, lambda_mult=25 / 71, k=2) assert expected == actual expected = [0, 1] actual = maximal_marginal_relevance(query_embedding, embedding_list, lambda_mult=27 / 71, k=2) assert expected == actual
def test_maximal_marginal_relevance() ->None: query_embedding = np.array([1, 0]) embedding_list = [[3 ** 0.5, 1], [1, 1], [1, 2 + 3 ** 0.5]] expected = [0, 2] actual = maximal_marginal_relevance(query_embedding, embedding_list, lambda_mult=25 / 71, k=2) assert expected == actual expected = [0, 1] actual = maximal_marginal_relevance(query_embedding, embedding_list, lambda_mult=27 / 71, k=2) assert expected == actual
null
_get_schema_type_for_array
from openapi_pydantic import Reference, Schema items = schema.items if isinstance(items, Schema): schema_type = APIProperty._cast_schema_list_type(items) elif isinstance(items, Reference): ref_name = items.ref.split('/')[-1] schema_type = ref_name else: raise ValueError(f'Unsupported array items: {items}') if isinstance(schema_type, str): schema_type = schema_type, return schema_type
@staticmethod def _get_schema_type_for_array(schema: Schema) ->Optional[Union[str, Tuple[ str, ...]]]: from openapi_pydantic import Reference, Schema items = schema.items if isinstance(items, Schema): schema_type = APIProperty._cast_schema_list_type(items) elif isinstance(items, Reference): ref_name = items.ref.split('/')[-1] schema_type = ref_name else: raise ValueError(f'Unsupported array items: {items}') if isinstance(schema_type, str): schema_type = schema_type, return schema_type
null
test_google_palm_embedding_query
"""Test Google PaLM embeddings.""" document = 'foo bar' embedding = GooglePalmEmbeddings() output = embedding.embed_query(document) assert len(output) == 768
def test_google_palm_embedding_query() ->None: """Test Google PaLM embeddings.""" document = 'foo bar' embedding = GooglePalmEmbeddings() output = embedding.embed_query(document) assert len(output) == 768
Test Google PaLM embeddings.
test_singlestoredb_euclidean_distance
"""Test adding a new document""" table_name = 'test_singlestoredb_euclidean_distance' drop(table_name) docsearch = SingleStoreDB.from_texts(texts, FakeEmbeddings(), distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE, table_name= table_name, host=TEST_SINGLESTOREDB_URL) docsearch.add_texts(['foo']) output = docsearch.similarity_search('foo', k=2) assert output == TEST_RESULT drop(table_name)
@pytest.mark.skipif(not singlestoredb_installed, reason= 'singlestoredb not installed') def test_singlestoredb_euclidean_distance(texts: List[str]) ->None: """Test adding a new document""" table_name = 'test_singlestoredb_euclidean_distance' drop(table_name) docsearch = SingleStoreDB.from_texts(texts, FakeEmbeddings(), distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE, table_name= table_name, host=TEST_SINGLESTOREDB_URL) docsearch.add_texts(['foo']) output = docsearch.similarity_search('foo', k=2) assert output == TEST_RESULT drop(table_name)
Test adding a new document
test_unstructured_email_loader_with_attachments
file_path = Path(__file__).parent.parent / 'examples/fake-email-attachment.eml' loader = UnstructuredEmailLoader(str(file_path), mode='elements', process_attachments=True) docs = loader.load() assert docs[-1].page_content == 'Hey this is a fake attachment!' assert docs[-1].metadata['filename'] == 'fake-attachment.txt' assert docs[-1].metadata['source'].endswith('fake-email-attachment.eml')
def test_unstructured_email_loader_with_attachments() ->None: file_path = Path(__file__ ).parent.parent / 'examples/fake-email-attachment.eml' loader = UnstructuredEmailLoader(str(file_path), mode='elements', process_attachments=True) docs = loader.load() assert docs[-1].page_content == 'Hey this is a fake attachment!' assert docs[-1].metadata['filename'] == 'fake-attachment.txt' assert docs[-1].metadata['source'].endswith('fake-email-attachment.eml')
null
test_load
os.environ['MS_GRAPH_CLIENT_ID'] = 'CLIENT_ID' os.environ['MS_GRAPH_CLIENT_SECRET'] = 'CLIENT_SECRET' mocker.patch('requests.get', return_value=mocker.MagicMock(json=lambda : { 'value': []}, links=None)) loader = OneNoteLoader(notebook_name='test_notebook', section_name= 'test_section', page_title='test_title', access_token='access_token') documents = loader.load() assert documents == [] mocker.patch( 'langchain_community.document_loaders.onenote.OneNoteLoader._get_page_content' , return_value= '<html><head><title>Test Title</title></head><body><p>Test Content</p></body></html>' ) loader = OneNoteLoader(object_ids=['test_id'], access_token='access_token') documents = loader.load() assert documents == [Document(page_content="""Test Title Test Content""", metadata={'title': 'Test Title'})]
@pytest.mark.requires('bs4') def test_load(mocker: MockerFixture) ->None: os.environ['MS_GRAPH_CLIENT_ID'] = 'CLIENT_ID' os.environ['MS_GRAPH_CLIENT_SECRET'] = 'CLIENT_SECRET' mocker.patch('requests.get', return_value=mocker.MagicMock(json=lambda : {'value': []}, links=None)) loader = OneNoteLoader(notebook_name='test_notebook', section_name= 'test_section', page_title='test_title', access_token='access_token') documents = loader.load() assert documents == [] mocker.patch( 'langchain_community.document_loaders.onenote.OneNoteLoader._get_page_content' , return_value= '<html><head><title>Test Title</title></head><body><p>Test Content</p></body></html>' ) loader = OneNoteLoader(object_ids=['test_id'], access_token='access_token') documents = loader.load() assert documents == [Document(page_content='Test Title\nTest Content', metadata={'title': 'Test Title'})]
null
__init__
super().__init__() self.directory_path = Path(path) if isinstance(path, str) else path
def __init__(self, path: Union[str, Path]) ->None: super().__init__() self.directory_path = Path(path) if isinstance(path, str) else path
null
with_retry
"""Create a new Runnable that retries the original runnable on exceptions. Args: retry_if_exception_type: A tuple of exception types to retry on wait_exponential_jitter: Whether to add jitter to the wait time between retries stop_after_attempt: The maximum number of attempts to make before giving up Returns: A new Runnable that retries the original runnable on exceptions. """ from langchain_core.runnables.retry import RunnableRetry return RunnableRetry(bound=self, kwargs={}, config={}, retry_exception_types=retry_if_exception_type, wait_exponential_jitter= wait_exponential_jitter, max_attempt_number=stop_after_attempt)
def with_retry(self, *, retry_if_exception_type: Tuple[Type[BaseException], ...]=(Exception,), wait_exponential_jitter: bool=True, stop_after_attempt: int=3) ->Runnable[Input, Output]: """Create a new Runnable that retries the original runnable on exceptions. Args: retry_if_exception_type: A tuple of exception types to retry on wait_exponential_jitter: Whether to add jitter to the wait time between retries stop_after_attempt: The maximum number of attempts to make before giving up Returns: A new Runnable that retries the original runnable on exceptions. """ from langchain_core.runnables.retry import RunnableRetry return RunnableRetry(bound=self, kwargs={}, config={}, retry_exception_types=retry_if_exception_type, wait_exponential_jitter=wait_exponential_jitter, max_attempt_number =stop_after_attempt)
Create a new Runnable that retries the original runnable on exceptions. Args: retry_if_exception_type: A tuple of exception types to retry on wait_exponential_jitter: Whether to add jitter to the wait time between retries stop_after_attempt: The maximum number of attempts to make before giving up Returns: A new Runnable that retries the original runnable on exceptions.
search_results_with_query_embedding
return_count = 2 return [gen_mock_zep_document(collection_name='test_collection', embedding_dimensions=VECTOR_DIMS) for _ in range(return_count) ], gen_vector()
@pytest.fixture def search_results_with_query_embedding() ->Tuple[List['ZepDocument'], List [float]]: return_count = 2 return [gen_mock_zep_document(collection_name='test_collection', embedding_dimensions=VECTOR_DIMS) for _ in range(return_count) ], gen_vector()
null
create
""" create the vector store on the backend database Args: metadata_str (str): columns and their types Returns: True if successful; False if not successful """ podstore = self._pod + '.' + self._store """ source column is required. v:text column is required. """ q = 'create store ' q += podstore q += f' ({self._vector_index} vector({self._vector_dimension},' q += f" '{self._vector_type}')," q += f' source char(256), v:text char({text_size}),' q += metadata_str + ')' self.run(q)
def create(self, metadata_str: str, text_size: int) ->None: """ create the vector store on the backend database Args: metadata_str (str): columns and their types Returns: True if successful; False if not successful """ podstore = self._pod + '.' + self._store """ source column is required. v:text column is required. """ q = 'create store ' q += podstore q += f' ({self._vector_index} vector({self._vector_dimension},' q += f" '{self._vector_type}')," q += f' source char(256), v:text char({text_size}),' q += metadata_str + ')' self.run(q)
create the vector store on the backend database Args: metadata_str (str): columns and their types Returns: True if successful; False if not successful
_default_knn_query
knn: Dict = {'field': self.vector_query_field, 'k': k, 'num_candidates': num_candidates} if query_vector and not model_id: knn['query_vector'] = query_vector elif query and model_id: knn['query_vector_builder'] = {'text_embedding': {'model_id': model_id, 'model_text': query}} else: raise ValueError( 'Either `query_vector` or `model_id` must be provided, but not both.') return knn
def _default_knn_query(self, query_vector: Optional[List[float]]=None, query: Optional[str]=None, model_id: Optional[str]=None, k: Optional[ int]=10, num_candidates: Optional[int]=10) ->Dict: knn: Dict = {'field': self.vector_query_field, 'k': k, 'num_candidates': num_candidates} if query_vector and not model_id: knn['query_vector'] = query_vector elif query and model_id: knn['query_vector_builder'] = {'text_embedding': {'model_id': model_id, 'model_text': query}} else: raise ValueError( 'Either `query_vector` or `model_id` must be provided, but not both.' ) return knn
null
cosine_similarity
"""Row-wise cosine similarity between two equal-width matrices.""" if len(X) == 0 or len(Y) == 0: return np.array([]) X = np.array(X) Y = np.array(Y) if X.shape[1] != Y.shape[1]: raise ValueError( f'Number of columns in X and Y must be the same. X has shape {X.shape} and Y has shape {Y.shape}.' ) try: import simsimd as simd X = np.array(X, dtype=np.float32) Y = np.array(Y, dtype=np.float32) Z = 1 - simd.cdist(X, Y, metric='cosine') if isinstance(Z, float): return np.array([Z]) return Z except ImportError: logger.info( 'Unable to import simsimd, defaulting to NumPy implementation. If you want to use simsimd please install with `pip install simsimd`.' ) X_norm = np.linalg.norm(X, axis=1) Y_norm = np.linalg.norm(Y, axis=1) with np.errstate(divide='ignore', invalid='ignore'): similarity = np.dot(X, Y.T) / np.outer(X_norm, Y_norm) similarity[np.isnan(similarity) | np.isinf(similarity)] = 0.0 return similarity
def cosine_similarity(X: Matrix, Y: Matrix) ->np.ndarray: """Row-wise cosine similarity between two equal-width matrices.""" if len(X) == 0 or len(Y) == 0: return np.array([]) X = np.array(X) Y = np.array(Y) if X.shape[1] != Y.shape[1]: raise ValueError( f'Number of columns in X and Y must be the same. X has shape {X.shape} and Y has shape {Y.shape}.' ) try: import simsimd as simd X = np.array(X, dtype=np.float32) Y = np.array(Y, dtype=np.float32) Z = 1 - simd.cdist(X, Y, metric='cosine') if isinstance(Z, float): return np.array([Z]) return Z except ImportError: logger.info( 'Unable to import simsimd, defaulting to NumPy implementation. If you want to use simsimd please install with `pip install simsimd`.' ) X_norm = np.linalg.norm(X, axis=1) Y_norm = np.linalg.norm(Y, axis=1) with np.errstate(divide='ignore', invalid='ignore'): similarity = np.dot(X, Y.T) / np.outer(X_norm, Y_norm) similarity[np.isnan(similarity) | np.isinf(similarity)] = 0.0 return similarity
Row-wise cosine similarity between two equal-width matrices.
test_minimax_call_successful
"""Test valid call to minimax.""" llm = Minimax() output = llm( 'A chain is a serial assembly of connected pieces, called links, typically made of metal, with an overall character similar to that of a rope in that it is flexible and curved in compression but linear, rigid, and load-bearing in tension. A chain may consist of two or more links.' ) assert isinstance(output, str)
def test_minimax_call_successful() ->None: """Test valid call to minimax.""" llm = Minimax() output = llm( 'A chain is a serial assembly of connected pieces, called links, typically made of metal, with an overall character similar to that of a rope in that it is flexible and curved in compression but linear, rigid, and load-bearing in tension. A chain may consist of two or more links.' ) assert isinstance(output, str)
Test valid call to minimax.
create_demo_server_configurable
return create_demo_server(config_keys=['configurable'])
def create_demo_server_configurable(): return create_demo_server(config_keys=['configurable'])
null
raise_deprecation
if 'llm' in values: warnings.warn( 'Directly instantiating an LLMCheckerChain with an llm is deprecated. Please instantiate with question_to_checked_assertions_chain or using the from_llm class method.' ) if 'question_to_checked_assertions_chain' not in values and values['llm' ] is not None: question_to_checked_assertions_chain = ( _load_question_to_checked_assertions_chain(values['llm'], values.get('create_draft_answer_prompt', CREATE_DRAFT_ANSWER_PROMPT), values.get( 'list_assertions_prompt', LIST_ASSERTIONS_PROMPT), values.get( 'check_assertions_prompt', CHECK_ASSERTIONS_PROMPT), values.get ('revised_answer_prompt', REVISED_ANSWER_PROMPT))) values['question_to_checked_assertions_chain' ] = question_to_checked_assertions_chain return values
@root_validator(pre=True) def raise_deprecation(cls, values: Dict) ->Dict: if 'llm' in values: warnings.warn( 'Directly instantiating an LLMCheckerChain with an llm is deprecated. Please instantiate with question_to_checked_assertions_chain or using the from_llm class method.' ) if 'question_to_checked_assertions_chain' not in values and values[ 'llm'] is not None: question_to_checked_assertions_chain = ( _load_question_to_checked_assertions_chain(values['llm'], values.get('create_draft_answer_prompt', CREATE_DRAFT_ANSWER_PROMPT), values.get( 'list_assertions_prompt', LIST_ASSERTIONS_PROMPT), values. get('check_assertions_prompt', CHECK_ASSERTIONS_PROMPT), values.get('revised_answer_prompt', REVISED_ANSWER_PROMPT))) values['question_to_checked_assertions_chain' ] = question_to_checked_assertions_chain return values
null
from_texts
index = create_index(texts, embeddings) return cls(embeddings=embeddings, index=index, texts=texts, metadatas= metadatas, **kwargs)
@classmethod def from_texts(cls, texts: List[str], embeddings: Embeddings, metadatas: Optional[List[dict]]=None, **kwargs: Any) ->SVMRetriever: index = create_index(texts, embeddings) return cls(embeddings=embeddings, index=index, texts=texts, metadatas= metadatas, **kwargs)
null
_llm_type
"""Return type of llm.""" return 'fake'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'fake'
Return type of llm.
parse_issues
""" Extracts title and number from each Issue and puts them in a dictionary Parameters: issues(List[Issue]): A list of Github Issue objects Returns: List[dict]: A dictionary of issue titles and numbers """ parsed = [] for issue in issues: title = issue.title number = issue.number opened_by = issue.user.login if issue.user else None issue_dict = {'title': title, 'number': number} if opened_by is not None: issue_dict['opened_by'] = opened_by parsed.append(issue_dict) return parsed
def parse_issues(self, issues: List[Issue]) ->List[dict]: """ Extracts title and number from each Issue and puts them in a dictionary Parameters: issues(List[Issue]): A list of Github Issue objects Returns: List[dict]: A dictionary of issue titles and numbers """ parsed = [] for issue in issues: title = issue.title number = issue.number opened_by = issue.user.login if issue.user else None issue_dict = {'title': title, 'number': number} if opened_by is not None: issue_dict['opened_by'] = opened_by parsed.append(issue_dict) return parsed
Extracts title and number from each Issue and puts them in a dictionary Parameters: issues(List[Issue]): A list of Github Issue objects Returns: List[dict]: A dictionary of issue titles and numbers
img_prompt_func
""" GPT-4V prompt for image analysis. :param data_dict: A dict with images and a user-provided question. :param num_images: Number of images to include in the prompt. :return: A list containing message objects for each image and the text prompt. """ messages = [] if data_dict['context']['images']: for image in data_dict['context']['images'][:num_images]: image_message = {'type': 'image_url', 'image_url': f'data:image/jpeg;base64,{image}'} messages.append(image_message) text_message = {'type': 'text', 'text': """You are a helpful assistant that gives a description of food pictures. Give a detailed summary of the image. Give reccomendations for similar foods to try. """ } messages.append(text_message) return [HumanMessage(content=messages)]
def img_prompt_func(data_dict, num_images=1): """ GPT-4V prompt for image analysis. :param data_dict: A dict with images and a user-provided question. :param num_images: Number of images to include in the prompt. :return: A list containing message objects for each image and the text prompt. """ messages = [] if data_dict['context']['images']: for image in data_dict['context']['images'][:num_images]: image_message = {'type': 'image_url', 'image_url': f'data:image/jpeg;base64,{image}'} messages.append(image_message) text_message = {'type': 'text', 'text': """You are a helpful assistant that gives a description of food pictures. Give a detailed summary of the image. Give reccomendations for similar foods to try. """ } messages.append(text_message) return [HumanMessage(content=messages)]
GPT-4V prompt for image analysis. :param data_dict: A dict with images and a user-provided question. :param num_images: Number of images to include in the prompt. :return: A list containing message objects for each image and the text prompt.
add_texts
"""Insert text data into TencentVectorDB.""" texts = list(texts) try: embeddings = self.embedding_func.embed_documents(texts) except NotImplementedError: embeddings = [self.embedding_func.embed_query(x) for x in texts] if len(embeddings) == 0: logger.debug('Nothing to insert, skipping.') return [] pks: list[str] = [] total_count = len(embeddings) for start in range(0, total_count, batch_size): docs = [] end = min(start + batch_size, total_count) for id in range(start, end, 1): metadata = '{}' if metadatas is not None: metadata = json.dumps(metadatas[id]) doc = self.document.Document(id='{}-{}-{}'.format(time.time_ns(), hash(texts[id]), id), vector=embeddings[id], text=texts[id], metadata=metadata) docs.append(doc) pks.append(str(id)) self.collection.upsert(docs, timeout) return pks
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, timeout: Optional[int]=None, batch_size: int=1000, **kwargs: Any ) ->List[str]: """Insert text data into TencentVectorDB.""" texts = list(texts) try: embeddings = self.embedding_func.embed_documents(texts) except NotImplementedError: embeddings = [self.embedding_func.embed_query(x) for x in texts] if len(embeddings) == 0: logger.debug('Nothing to insert, skipping.') return [] pks: list[str] = [] total_count = len(embeddings) for start in range(0, total_count, batch_size): docs = [] end = min(start + batch_size, total_count) for id in range(start, end, 1): metadata = '{}' if metadatas is not None: metadata = json.dumps(metadatas[id]) doc = self.document.Document(id='{}-{}-{}'.format(time.time_ns( ), hash(texts[id]), id), vector=embeddings[id], text=texts[ id], metadata=metadata) docs.append(doc) pks.append(str(id)) self.collection.upsert(docs, timeout) return pks
Insert text data into TencentVectorDB.
_log_trace_from_run
"""Logs a LangChain Run to W*B as a W&B Trace.""" self._ensure_run() root_span = self.run_processor.process_span(run) model_dict = self.run_processor.process_model(run) if root_span is None: return model_trace = self._trace_tree.WBTraceTree(root_span=root_span, model_dict= model_dict) if self._wandb.run is not None: self._wandb.run.log({'langchain_trace': model_trace})
def _log_trace_from_run(self, run: Run) ->None: """Logs a LangChain Run to W*B as a W&B Trace.""" self._ensure_run() root_span = self.run_processor.process_span(run) model_dict = self.run_processor.process_model(run) if root_span is None: return model_trace = self._trace_tree.WBTraceTree(root_span=root_span, model_dict=model_dict) if self._wandb.run is not None: self._wandb.run.log({'langchain_trace': model_trace})
Logs a LangChain Run to W*B as a W&B Trace.
from_chain_type
"""Load chain from chain type.""" _chain_type_kwargs = chain_type_kwargs or {} combine_documents_chain = load_qa_chain(llm, chain_type=chain_type, ** _chain_type_kwargs) return cls(combine_documents_chain=combine_documents_chain, **kwargs)
@classmethod def from_chain_type(cls, llm: BaseLanguageModel, chain_type: str='stuff', chain_type_kwargs: Optional[dict]=None, **kwargs: Any) ->BaseRetrievalQA: """Load chain from chain type.""" _chain_type_kwargs = chain_type_kwargs or {} combine_documents_chain = load_qa_chain(llm, chain_type=chain_type, ** _chain_type_kwargs) return cls(combine_documents_chain=combine_documents_chain, **kwargs)
Load chain from chain type.
prep_prompts
"""Prepare prompts from inputs.""" stop = None if len(input_list) == 0: return [], stop if 'stop' in input_list[0]: stop = input_list[0]['stop'] prompts = [] for inputs in input_list: selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} prompt = self.prompt.format_prompt(**selected_inputs) _colored_text = get_colored_text(prompt.to_string(), 'green') _text = 'Prompt after formatting:\n' + _colored_text if run_manager: run_manager.on_text(_text, end='\n', verbose=self.verbose) if 'stop' in inputs and inputs['stop'] != stop: raise ValueError( 'If `stop` is present in any inputs, should be present in all.') prompts.append(prompt) return prompts, stop
def prep_prompts(self, input_list: List[Dict[str, Any]], run_manager: Optional[CallbackManagerForChainRun]=None) ->Tuple[List[PromptValue], Optional[List[str]]]: """Prepare prompts from inputs.""" stop = None if len(input_list) == 0: return [], stop if 'stop' in input_list[0]: stop = input_list[0]['stop'] prompts = [] for inputs in input_list: selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} prompt = self.prompt.format_prompt(**selected_inputs) _colored_text = get_colored_text(prompt.to_string(), 'green') _text = 'Prompt after formatting:\n' + _colored_text if run_manager: run_manager.on_text(_text, end='\n', verbose=self.verbose) if 'stop' in inputs and inputs['stop'] != stop: raise ValueError( 'If `stop` is present in any inputs, should be present in all.' ) prompts.append(prompt) return prompts, stop
Prepare prompts from inputs.
query_with_sources
"""Query the vectorstore and get back sources.""" llm = llm or OpenAI(temperature=0) retriever_kwargs = retriever_kwargs or {} chain = RetrievalQAWithSourcesChain.from_chain_type(llm, retriever=self. vectorstore.as_retriever(**retriever_kwargs), **kwargs) return chain({chain.question_key: question})
def query_with_sources(self, question: str, llm: Optional[BaseLanguageModel ]=None, retriever_kwargs: Optional[Dict[str, Any]]=None, **kwargs: Any ) ->dict: """Query the vectorstore and get back sources.""" llm = llm or OpenAI(temperature=0) retriever_kwargs = retriever_kwargs or {} chain = RetrievalQAWithSourcesChain.from_chain_type(llm, retriever=self .vectorstore.as_retriever(**retriever_kwargs), **kwargs) return chain({chain.question_key: question})
Query the vectorstore and get back sources.
test_vertexai_instantiation
if model_name: model = ChatVertexAI(model_name=model_name) else: model = ChatVertexAI() assert model._llm_type == 'vertexai' try: assert model.model_name == model.client._model_id except AttributeError: assert model.model_name == model.client._model_name.split('/')[-1]
@pytest.mark.parametrize('model_name', model_names_to_test) def test_vertexai_instantiation(model_name: str) ->None: if model_name: model = ChatVertexAI(model_name=model_name) else: model = ChatVertexAI() assert model._llm_type == 'vertexai' try: assert model.model_name == model.client._model_id except AttributeError: assert model.model_name == model.client._model_name.split('/')[-1]
null
deprecated
"""Decorator to mark a function, a class, or a property as deprecated. When deprecating a classmethod, a staticmethod, or a property, the ``@deprecated`` decorator should go *under* ``@classmethod`` and ``@staticmethod`` (i.e., `deprecated` should directly decorate the underlying callable), but *over* ``@property``. When deprecating a class ``C`` intended to be used as a base class in a multiple inheritance hierarchy, ``C`` *must* define an ``__init__`` method (if ``C`` instead inherited its ``__init__`` from its own base class, then ``@deprecated`` would mess up ``__init__`` inheritance when installing its own (deprecation-emitting) ``C.__init__``). Parameters are the same as for `warn_deprecated`, except that *obj_type* defaults to 'class' if decorating a class, 'attribute' if decorating a property, and 'function' otherwise. Arguments: since : str The release at which this API became deprecated. message : str, optional Override the default deprecation message. The %(since)s, %(name)s, %(alternative)s, %(obj_type)s, %(addendum)s, and %(removal)s format specifiers will be replaced by the values of the respective arguments passed to this function. name : str, optional The name of the deprecated object. alternative : str, optional An alternative API that the user may use in place of the deprecated API. The deprecation warning will tell the user about this alternative if provided. pending : bool, optional If True, uses a PendingDeprecationWarning instead of a DeprecationWarning. Cannot be used together with removal. obj_type : str, optional The object type being deprecated. addendum : str, optional Additional text appended directly to the final message. removal : str, optional The expected removal version. With the default (an empty string), a removal version is automatically computed from since. Set to other Falsy values to not schedule a removal date. Cannot be used together with pending. Examples -------- .. code-block:: python @deprecated('1.4.0') def the_function_to_deprecate(): pass """ def deprecate(obj: T, *, _obj_type: str=obj_type, _name: str=name, _message: str=message, _alternative: str=alternative, _pending: bool=pending, _addendum: str=addendum) ->T: """Implementation of the decorator returned by `deprecated`.""" if isinstance(obj, type): if not _obj_type: _obj_type = 'class' wrapped = obj.__init__ _name = _name or obj.__name__ old_doc = obj.__doc__ def finalize(wrapper: Callable[..., Any], new_doc: str) ->T: """Finalize the deprecation of a class.""" try: obj.__doc__ = new_doc except AttributeError: pass obj.__init__ = functools.wraps(obj.__init__)(wrapper) return obj elif isinstance(obj, property): if not _obj_type: _obj_type = 'attribute' wrapped = None _name = _name or obj.fget.__name__ old_doc = obj.__doc__ class _deprecated_property(type(obj)): """A deprecated property.""" def __get__(self, instance, owner=None): if instance is not None or owner is not None: emit_warning() return super().__get__(instance, owner) def __set__(self, instance, value): if instance is not None: emit_warning() return super().__set__(instance, value) def __delete__(self, instance): if instance is not None: emit_warning() return super().__delete__(instance) def __set_name__(self, owner, set_name): nonlocal _name if _name == '<lambda>': _name = set_name def finalize(_: Any, new_doc: str) ->Any: """Finalize the property.""" return _deprecated_property(fget=obj.fget, fset=obj.fset, fdel= obj.fdel, doc=new_doc) else: if not _obj_type: _obj_type = 'function' wrapped = obj _name = _name or obj.__name__ old_doc = wrapped.__doc__ def finalize(wrapper: Callable[..., Any], new_doc: str) ->T: """Wrap the wrapped function using the wrapper and update the docstring. Args: wrapper: The wrapper function. new_doc: The new docstring. Returns: The wrapped function. """ wrapper = functools.wraps(wrapped)(wrapper) wrapper.__doc__ = new_doc return wrapper def emit_warning() ->None: """Emit the warning.""" warn_deprecated(since, message=_message, name=_name, alternative= _alternative, pending=_pending, obj_type=_obj_type, addendum= _addendum, removal=removal) def warning_emitting_wrapper(*args: Any, **kwargs: Any) ->Any: """Wrapper for the original wrapped callable that emits a warning. Args: *args: The positional arguments to the function. **kwargs: The keyword arguments to the function. Returns: The return value of the function being wrapped. """ emit_warning() return wrapped(*args, **kwargs) old_doc = inspect.cleandoc(old_doc or '').strip('\n') if not old_doc: new_doc = '[*Deprecated*]' else: new_doc = f'[*Deprecated*] {old_doc}' notes_header = '\nNotes\n-----' components = [message, f'Use {alternative} instead.' if alternative else '', addendum] details = ' '.join([component.strip() for component in components if component]) new_doc += f"""[*Deprecated*] {old_doc} {notes_header if notes_header not in old_doc else ''} .. deprecated:: {since} {details}""" return finalize(warning_emitting_wrapper, new_doc) return deprecate
def deprecated(since: str, *, message: str='', name: str='', alternative: str='', pending: bool=False, obj_type: str='', addendum: str='', removal: str='') ->Callable[[T], T]: """Decorator to mark a function, a class, or a property as deprecated. When deprecating a classmethod, a staticmethod, or a property, the ``@deprecated`` decorator should go *under* ``@classmethod`` and ``@staticmethod`` (i.e., `deprecated` should directly decorate the underlying callable), but *over* ``@property``. When deprecating a class ``C`` intended to be used as a base class in a multiple inheritance hierarchy, ``C`` *must* define an ``__init__`` method (if ``C`` instead inherited its ``__init__`` from its own base class, then ``@deprecated`` would mess up ``__init__`` inheritance when installing its own (deprecation-emitting) ``C.__init__``). Parameters are the same as for `warn_deprecated`, except that *obj_type* defaults to 'class' if decorating a class, 'attribute' if decorating a property, and 'function' otherwise. Arguments: since : str The release at which this API became deprecated. message : str, optional Override the default deprecation message. The %(since)s, %(name)s, %(alternative)s, %(obj_type)s, %(addendum)s, and %(removal)s format specifiers will be replaced by the values of the respective arguments passed to this function. name : str, optional The name of the deprecated object. alternative : str, optional An alternative API that the user may use in place of the deprecated API. The deprecation warning will tell the user about this alternative if provided. pending : bool, optional If True, uses a PendingDeprecationWarning instead of a DeprecationWarning. Cannot be used together with removal. obj_type : str, optional The object type being deprecated. addendum : str, optional Additional text appended directly to the final message. removal : str, optional The expected removal version. With the default (an empty string), a removal version is automatically computed from since. Set to other Falsy values to not schedule a removal date. Cannot be used together with pending. Examples -------- .. code-block:: python @deprecated('1.4.0') def the_function_to_deprecate(): pass """ def deprecate(obj: T, *, _obj_type: str=obj_type, _name: str=name, _message: str=message, _alternative: str=alternative, _pending: bool=pending, _addendum: str=addendum) ->T: """Implementation of the decorator returned by `deprecated`.""" if isinstance(obj, type): if not _obj_type: _obj_type = 'class' wrapped = obj.__init__ _name = _name or obj.__name__ old_doc = obj.__doc__ def finalize(wrapper: Callable[..., Any], new_doc: str) ->T: """Finalize the deprecation of a class.""" try: obj.__doc__ = new_doc except AttributeError: pass obj.__init__ = functools.wraps(obj.__init__)(wrapper) return obj elif isinstance(obj, property): if not _obj_type: _obj_type = 'attribute' wrapped = None _name = _name or obj.fget.__name__ old_doc = obj.__doc__ class _deprecated_property(type(obj)): """A deprecated property.""" def __get__(self, instance, owner=None): if instance is not None or owner is not None: emit_warning() return super().__get__(instance, owner) def __set__(self, instance, value): if instance is not None: emit_warning() return super().__set__(instance, value) def __delete__(self, instance): if instance is not None: emit_warning() return super().__delete__(instance) def __set_name__(self, owner, set_name): nonlocal _name if _name == '<lambda>': _name = set_name def finalize(_: Any, new_doc: str) ->Any: """Finalize the property.""" return _deprecated_property(fget=obj.fget, fset=obj.fset, fdel=obj.fdel, doc=new_doc) else: if not _obj_type: _obj_type = 'function' wrapped = obj _name = _name or obj.__name__ old_doc = wrapped.__doc__ def finalize(wrapper: Callable[..., Any], new_doc: str) ->T: """Wrap the wrapped function using the wrapper and update the docstring. Args: wrapper: The wrapper function. new_doc: The new docstring. Returns: The wrapped function. """ wrapper = functools.wraps(wrapped)(wrapper) wrapper.__doc__ = new_doc return wrapper def emit_warning() ->None: """Emit the warning.""" warn_deprecated(since, message=_message, name=_name, alternative=_alternative, pending=_pending, obj_type= _obj_type, addendum=_addendum, removal=removal) def warning_emitting_wrapper(*args: Any, **kwargs: Any) ->Any: """Wrapper for the original wrapped callable that emits a warning. Args: *args: The positional arguments to the function. **kwargs: The keyword arguments to the function. Returns: The return value of the function being wrapped. """ emit_warning() return wrapped(*args, **kwargs) old_doc = inspect.cleandoc(old_doc or '').strip('\n') if not old_doc: new_doc = '[*Deprecated*]' else: new_doc = f'[*Deprecated*] {old_doc}' notes_header = '\nNotes\n-----' components = [message, f'Use {alternative} instead.' if alternative else '', addendum] details = ' '.join([component.strip() for component in components if component]) new_doc += f"""[*Deprecated*] {old_doc} {notes_header if notes_header not in old_doc else ''} .. deprecated:: {since} {details}""" return finalize(warning_emitting_wrapper, new_doc) return deprecate
Decorator to mark a function, a class, or a property as deprecated. When deprecating a classmethod, a staticmethod, or a property, the ``@deprecated`` decorator should go *under* ``@classmethod`` and ``@staticmethod`` (i.e., `deprecated` should directly decorate the underlying callable), but *over* ``@property``. When deprecating a class ``C`` intended to be used as a base class in a multiple inheritance hierarchy, ``C`` *must* define an ``__init__`` method (if ``C`` instead inherited its ``__init__`` from its own base class, then ``@deprecated`` would mess up ``__init__`` inheritance when installing its own (deprecation-emitting) ``C.__init__``). Parameters are the same as for `warn_deprecated`, except that *obj_type* defaults to 'class' if decorating a class, 'attribute' if decorating a property, and 'function' otherwise. Arguments: since : str The release at which this API became deprecated. message : str, optional Override the default deprecation message. The %(since)s, %(name)s, %(alternative)s, %(obj_type)s, %(addendum)s, and %(removal)s format specifiers will be replaced by the values of the respective arguments passed to this function. name : str, optional The name of the deprecated object. alternative : str, optional An alternative API that the user may use in place of the deprecated API. The deprecation warning will tell the user about this alternative if provided. pending : bool, optional If True, uses a PendingDeprecationWarning instead of a DeprecationWarning. Cannot be used together with removal. obj_type : str, optional The object type being deprecated. addendum : str, optional Additional text appended directly to the final message. removal : str, optional The expected removal version. With the default (an empty string), a removal version is automatically computed from since. Set to other Falsy values to not schedule a removal date. Cannot be used together with pending. Examples -------- .. code-block:: python @deprecated('1.4.0') def the_function_to_deprecate(): pass
get_triples
"""Get all triples in the graph.""" return [(u, v, d['relation']) for u, v, d in self._graph.edges(data=True)]
def get_triples(self) ->List[Tuple[str, str, str]]: """Get all triples in the graph.""" return [(u, v, d['relation']) for u, v, d in self._graph.edges(data=True)]
Get all triples in the graph.
_make_id
return f'{_hash(prompt)}#{_hash(llm_string)}'
@staticmethod def _make_id(prompt: str, llm_string: str) ->str: return f'{_hash(prompt)}#{_hash(llm_string)}'
null
_call
"""Call out to Anthropic's completion endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python prompt = "What are the biggest risks facing humanity?" prompt = f"\\n\\nHuman: {prompt}\\n\\nAssistant:" response = model(prompt) """ if self.streaming: completion = '' for chunk in self._stream(prompt=prompt, stop=stop, run_manager= run_manager, **kwargs): completion += chunk.text return completion stop = self._get_anthropic_stop(stop) params = {**self._default_params, **kwargs} response = self.client.completions.create(prompt=self._wrap_prompt(prompt), stop_sequences=stop, **params) return response.completion
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: """Call out to Anthropic's completion endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python prompt = "What are the biggest risks facing humanity?" prompt = f"\\n\\nHuman: {prompt}\\n\\nAssistant:" response = model(prompt) """ if self.streaming: completion = '' for chunk in self._stream(prompt=prompt, stop=stop, run_manager= run_manager, **kwargs): completion += chunk.text return completion stop = self._get_anthropic_stop(stop) params = {**self._default_params, **kwargs} response = self.client.completions.create(prompt=self._wrap_prompt( prompt), stop_sequences=stop, **params) return response.completion
Call out to Anthropic's completion endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python prompt = "What are the biggest risks facing humanity?" prompt = f"\n\nHuman: {prompt}\n\nAssistant:" response = model(prompt)
_get_diffbot_data
"""Get Diffbot file from Diffbot REST API.""" diffbot_url = self._diffbot_api_url('article') params = {'token': self.api_token, 'url': url} response = requests.get(diffbot_url, params=params, timeout=10) return response.json() if response.ok else {}
def _get_diffbot_data(self, url: str) ->Any: """Get Diffbot file from Diffbot REST API.""" diffbot_url = self._diffbot_api_url('article') params = {'token': self.api_token, 'url': url} response = requests.get(diffbot_url, params=params, timeout=10) return response.json() if response.ok else {}
Get Diffbot file from Diffbot REST API.
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'schema', 'output']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'schema', 'output']
Get the namespace of the langchain object.
parse
"""Returns the input text with no changes.""" return text
def parse(self, text: str) ->str: """Returns the input text with no changes.""" return text
Returns the input text with no changes.
on_llm_start
self.on_llm_start_common()
def on_llm_start(self, *args: Any, **kwargs: Any) ->Any: self.on_llm_start_common()
null
test_timescalevector
"""Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] docsearch = TimescaleVector.from_texts(texts=texts, collection_name= 'test_collection', embedding=FakeEmbeddingsWithAdaDimension(), service_url=SERVICE_URL, pre_delete_collection=True) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
def test_timescalevector() ->None: """Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] docsearch = TimescaleVector.from_texts(texts=texts, collection_name= 'test_collection', embedding=FakeEmbeddingsWithAdaDimension(), service_url=SERVICE_URL, pre_delete_collection=True) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
Test end to end construction and search.
test_parse_json_with_python_dict
parsed = parse_json_markdown(JSON_WITH_PYTHON_DICT) assert parsed == {'action': 'Final Answer', 'action_input': {'foo': 'bar', 'bar': 'foo'}}
def test_parse_json_with_python_dict() ->None: parsed = parse_json_markdown(JSON_WITH_PYTHON_DICT) assert parsed == {'action': 'Final Answer', 'action_input': {'foo': 'bar', 'bar': 'foo'}}
null
parse
"""Parse text into agent action/finish."""
@abstractmethod def parse(self, text: str) ->Union[AgentAction, AgentFinish]: """Parse text into agent action/finish."""
Parse text into agent action/finish.
test_embed_query_different_lengths
"""Test embedding queries of different lengths.""" model = GoogleGenerativeAIEmbeddings(model=_MODEL) result = model.embed_query(query) assert len(result) == 768
@pytest.mark.parametrize('query', ['Hi', 'This is a longer query string to test the embedding functionality of the model against the pickle rick?' ]) def test_embed_query_different_lengths(query: str) ->None: """Test embedding queries of different lengths.""" model = GoogleGenerativeAIEmbeddings(model=_MODEL) result = model.embed_query(query) assert len(result) == 768
Test embedding queries of different lengths.
load
"""Load data into document objects.""" return list(self.lazy_load())
def load(self) ->List[Document]: """Load data into document objects.""" return list(self.lazy_load())
Load data into document objects.
test_bedrock_streaming
"""Test streaming tokens from OpenAI.""" for token in chat.stream("I'm Pickle Rick"): assert isinstance(token.content, str)
@pytest.mark.scheduled def test_bedrock_streaming(chat: BedrockChat) ->None: """Test streaming tokens from OpenAI.""" for token in chat.stream("I'm Pickle Rick"): assert isinstance(token.content, str)
Test streaming tokens from OpenAI.
test_run_kwargs
"""Test run method with kwargs.""" chain = FakeChain(the_input_keys=['foo', 'bar']) output = chain.run(foo='bar', bar='foo') assert output == 'baz'
def test_run_kwargs() ->None: """Test run method with kwargs.""" chain = FakeChain(the_input_keys=['foo', 'bar']) output = chain.run(foo='bar', bar='foo') assert output == 'baz'
Test run method with kwargs.
test_partial_with_chat_prompts
prompt_a = ChatPromptTemplate(input_variables=['foo'], messages=[ MessagesPlaceholder(variable_name='foo')]) prompt_b = ChatPromptTemplate.from_template('jim {bar}') pipeline_prompt = PipelinePromptTemplate(final_prompt=prompt_a, pipeline_prompts=[('foo', prompt_b)]) assert pipeline_prompt.input_variables == ['bar'] output = pipeline_prompt.format_prompt(bar='okay') assert output.to_messages()[0].content == 'jim okay'
def test_partial_with_chat_prompts() ->None: prompt_a = ChatPromptTemplate(input_variables=['foo'], messages=[ MessagesPlaceholder(variable_name='foo')]) prompt_b = ChatPromptTemplate.from_template('jim {bar}') pipeline_prompt = PipelinePromptTemplate(final_prompt=prompt_a, pipeline_prompts=[('foo', prompt_b)]) assert pipeline_prompt.input_variables == ['bar'] output = pipeline_prompt.format_prompt(bar='okay') assert output.to_messages()[0].content == 'jim okay'
null
test_elasticsearch_with_relevance_score
"""Test to make sure the relevance score is scaled to 0-1.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] embeddings = FakeEmbeddings() docsearch = ElasticsearchStore.from_texts(index_name=index_name, texts= texts, embedding=embeddings, metadatas=metadatas, ** elasticsearch_connection) embedded_query = embeddings.embed_query('foo') output = docsearch.similarity_search_by_vector_with_relevance_scores(embedding =embedded_query, k=1) assert output == [(Document(page_content='foo', metadata={'page': '0'}), 1.0)]
def test_elasticsearch_with_relevance_score(self, elasticsearch_connection: dict, index_name: str) ->None: """Test to make sure the relevance score is scaled to 0-1.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] embeddings = FakeEmbeddings() docsearch = ElasticsearchStore.from_texts(index_name=index_name, texts= texts, embedding=embeddings, metadatas=metadatas, ** elasticsearch_connection) embedded_query = embeddings.embed_query('foo') output = docsearch.similarity_search_by_vector_with_relevance_scores( embedding=embedded_query, k=1) assert output == [(Document(page_content='foo', metadata={'page': '0'}), 1.0)]
Test to make sure the relevance score is scaled to 0-1.
test_shell_input_validation
shell_input = ShellInput(commands=test_commands) assert isinstance(shell_input.commands, list) assert len(shell_input.commands) == 2 with warnings.catch_warnings(record=True) as w: ShellInput(commands=test_commands) assert len(w) == 1 assert str(w[-1].message ) == 'The shell tool has no safeguards by default. Use at your own risk.'
def test_shell_input_validation() ->None: shell_input = ShellInput(commands=test_commands) assert isinstance(shell_input.commands, list) assert len(shell_input.commands) == 2 with warnings.catch_warnings(record=True) as w: ShellInput(commands=test_commands) assert len(w) == 1 assert str(w[-1].message ) == 'The shell tool has no safeguards by default. Use at your own risk.'
null
replace_file
try: content = source.read_text() except UnicodeDecodeError: return new_content = find_and_replace(content, replacements) if new_content != content: source.write_text(new_content)
def replace_file(source: Path, replacements: Dict[str, str]) ->None: try: content = source.read_text() except UnicodeDecodeError: return new_content = find_and_replace(content, replacements) if new_content != content: source.write_text(new_content)
null
lazy_parse
"""Load documents from a blob.""" mimetype = blob.mimetype if mimetype is None: raise ValueError(f'{blob} does not have a mimetype.') if mimetype in self.handlers: handler = self.handlers[mimetype] yield from handler.lazy_parse(blob) elif self.fallback_parser is not None: yield from self.fallback_parser.lazy_parse(blob) else: raise ValueError(f'Unsupported mime type: {mimetype}')
def lazy_parse(self, blob: Blob) ->Iterator[Document]: """Load documents from a blob.""" mimetype = blob.mimetype if mimetype is None: raise ValueError(f'{blob} does not have a mimetype.') if mimetype in self.handlers: handler = self.handlers[mimetype] yield from handler.lazy_parse(blob) elif self.fallback_parser is not None: yield from self.fallback_parser.lazy_parse(blob) else: raise ValueError(f'Unsupported mime type: {mimetype}')
Load documents from a blob.
test_find_all_links_single
htmls = ["href='foobar.com'", 'href="foobar.com"', '<div><a class="blah" href="foobar.com">hullo</a></div>'] actual = [find_all_links(html) for html in htmls] assert actual == [['foobar.com']] * 3
def test_find_all_links_single() ->None: htmls = ["href='foobar.com'", 'href="foobar.com"', '<div><a class="blah" href="foobar.com">hullo</a></div>'] actual = [find_all_links(html) for html in htmls] assert actual == [['foobar.com']] * 3
null
test_singlestoredb_new_vector
"""Test adding a new document""" table_name = 'test_singlestoredb_new_vector' drop(table_name) docsearch = SingleStoreDB.from_texts(texts, NormilizedFakeEmbeddings(), table_name=table_name, host=TEST_SINGLESTOREDB_URL) docsearch.add_texts(['foo']) output = docsearch.similarity_search('foo', k=2) assert output == TEST_RESULT drop(table_name)
@pytest.mark.skipif(not singlestoredb_installed, reason= 'singlestoredb not installed') def test_singlestoredb_new_vector(texts: List[str]) ->None: """Test adding a new document""" table_name = 'test_singlestoredb_new_vector' drop(table_name) docsearch = SingleStoreDB.from_texts(texts, NormilizedFakeEmbeddings(), table_name=table_name, host=TEST_SINGLESTOREDB_URL) docsearch.add_texts(['foo']) output = docsearch.similarity_search('foo', k=2) assert output == TEST_RESULT drop(table_name)
Test adding a new document
test_transform_empty_html
bs_transformer = BeautifulSoupTransformer() empty_html = '<html></html>' documents = [Document(page_content=empty_html)] docs_transformed = bs_transformer.transform_documents(documents) assert docs_transformed[0].page_content == ''
@pytest.mark.requires('bs4') def test_transform_empty_html() ->None: bs_transformer = BeautifulSoupTransformer() empty_html = '<html></html>' documents = [Document(page_content=empty_html)] docs_transformed = bs_transformer.transform_documents(documents) assert docs_transformed[0].page_content == ''
null
worker
old_stdout = sys.stdout sys.stdout = mystdout = StringIO() try: exec(command, globals, locals) sys.stdout = old_stdout queue.put(mystdout.getvalue()) except Exception as e: sys.stdout = old_stdout queue.put(repr(e))
@classmethod def worker(cls, command: str, globals: Optional[Dict], locals: Optional[ Dict], queue: multiprocessing.Queue) ->None: old_stdout = sys.stdout sys.stdout = mystdout = StringIO() try: exec(command, globals, locals) sys.stdout = old_stdout queue.put(mystdout.getvalue()) except Exception as e: sys.stdout = old_stdout queue.put(repr(e))
null