method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
on_agent_finish_common
self.agent_ends += 1 self.ends += 1
def on_agent_finish_common(self) ->None: self.agent_ends += 1 self.ends += 1
null
_import_requests_tool_RequestsPatchTool
from langchain_community.tools.requests.tool import RequestsPatchTool return RequestsPatchTool
def _import_requests_tool_RequestsPatchTool() ->Any: from langchain_community.tools.requests.tool import RequestsPatchTool return RequestsPatchTool
null
test_clone_different_repo
""" Test that trying to clone a different repository into a directory already containing a clone raises a ValueError. """ clone_url = init_repo(tmpdir, 'remote_repo') repo_path = tmpdir.mkdir('local_repo').strpath loader = GitLoader(repo_path=repo_path, clone_url=clone_url) documents = loader.load() assert len(documents) == 1 other_clone_url = init_repo(tmpdir, 'other_remote_repo') other_loader = GitLoader(repo_path=repo_path, clone_url=other_clone_url) with pytest.raises(ValueError): other_loader.load()
@pytest.mark.requires('git') def test_clone_different_repo(tmpdir: py.path.local) ->None: """ Test that trying to clone a different repository into a directory already containing a clone raises a ValueError. """ clone_url = init_repo(tmpdir, 'remote_repo') repo_path = tmpdir.mkdir('local_repo').strpath loader = GitLoader(repo_path=repo_path, clone_url=clone_url) documents = loader.load() assert len(documents) == 1 other_clone_url = init_repo(tmpdir, 'other_remote_repo') other_loader = GitLoader(repo_path=repo_path, clone_url=other_clone_url) with pytest.raises(ValueError): other_loader.load()
Test that trying to clone a different repository into a directory already containing a clone raises a ValueError.
test_init_fail_no_text_column
index = mock_index(index_details) with pytest.raises(ValueError) as ex: DatabricksVectorSearch(index, embedding=DEFAULT_EMBEDDING_MODEL) assert '`text_column` is required for this index.' in str(ex.value)
@pytest.mark.requires('databricks', 'databricks.vector_search') @pytest.mark.parametrize('index_details', [ DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS, DIRECT_ACCESS_INDEX]) def test_init_fail_no_text_column(index_details: dict) ->None: index = mock_index(index_details) with pytest.raises(ValueError) as ex: DatabricksVectorSearch(index, embedding=DEFAULT_EMBEDDING_MODEL) assert '`text_column` is required for this index.' in str(ex.value)
null
save_context
"""Save the context of this model run to memory.""" mem = outputs.get(self.add_memory_key) now = outputs.get(self.now_key) if mem: self.add_memory(mem, now=now)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, Any]) ->None: """Save the context of this model run to memory.""" mem = outputs.get(self.add_memory_key) now = outputs.get(self.now_key) if mem: self.add_memory(mem, now=now)
Save the context of this model run to memory.
test_bedrock_batch
"""Test batch tokens from BedrockChat.""" result = chat.batch(["I'm Pickle Rick", "I'm not Pickle Rick"]) for token in result: assert isinstance(token.content, str)
@pytest.mark.scheduled def test_bedrock_batch(chat: BedrockChat) ->None: """Test batch tokens from BedrockChat.""" result = chat.batch(["I'm Pickle Rick", "I'm not Pickle Rick"]) for token in result: assert isinstance(token.content, str)
Test batch tokens from BedrockChat.
on_retriever_start_common
self.starts += 1 self.retriever_starts += 1
def on_retriever_start_common(self) ->None: self.starts += 1 self.retriever_starts += 1
null
config_specs
with _enums_for_spec_lock: if (which_enum := _enums_for_spec.get(self.which)): pass else: which_enum = StrEnum(self.which.name or self.which.id, ((v, v) for v in list(self.alternatives.keys()) + [self.default_key])) _enums_for_spec[self.which] = cast(Type[StrEnum], which_enum) return get_unique_config_specs([ConfigurableFieldSpec(id=self.which.id, name=self.which.name, description=self.which.description, annotation= which_enum, default=self.default_key, is_shared=self.which.is_shared)] + ([prefix_config_spec(s, f'{self.which.id}=={self.default_key}') for s in self.default.config_specs] if self.prefix_keys else self.default. config_specs) + [(prefix_config_spec(s, f'{self.which.id}=={alt_key}') if self.prefix_keys else s) for alt_key, alt in self.alternatives.items() if isinstance(alt, RunnableSerializable) for s in alt.config_specs])
@property def config_specs(self) ->List[ConfigurableFieldSpec]: with _enums_for_spec_lock: if (which_enum := _enums_for_spec.get(self.which)): pass else: which_enum = StrEnum(self.which.name or self.which.id, ((v, v) for v in list(self.alternatives.keys()) + [self.default_key])) _enums_for_spec[self.which] = cast(Type[StrEnum], which_enum) return get_unique_config_specs([ConfigurableFieldSpec(id=self.which.id, name=self.which.name, description=self.which.description, annotation=which_enum, default=self.default_key, is_shared=self. which.is_shared)] + ([prefix_config_spec(s, f'{self.which.id}=={self.default_key}') for s in self.default. config_specs] if self.prefix_keys else self.default.config_specs) + [(prefix_config_spec(s, f'{self.which.id}=={alt_key}') if self. prefix_keys else s) for alt_key, alt in self.alternatives.items() if isinstance(alt, RunnableSerializable) for s in alt.config_specs])
null
test_extra_kwargs
chat = ChatBaichuan(temperature=0.88, top_p=0.7) assert chat.temperature == 0.88 assert chat.top_p == 0.7
def test_extra_kwargs() ->None: chat = ChatBaichuan(temperature=0.88, top_p=0.7) assert chat.temperature == 0.88 assert chat.top_p == 0.7
null
_create_stream
if self.stop is not None and stop is not None: raise ValueError('`stop` found in both the input and default params.') elif self.stop is not None: stop = self.stop elif stop is None: stop = [] params = self._default_params if 'model' in kwargs: params['model'] = kwargs['model'] if 'options' in kwargs: params['options'] = kwargs['options'] else: params['options'] = {**params['options'], 'stop': stop, **kwargs} if payload.get('messages'): request_payload = {'messages': payload.get('messages', []), **params} else: request_payload = {'prompt': payload.get('prompt'), 'images': payload. get('images', []), **params} response = requests.post(url=api_url, headers={'Content-Type': 'application/json'}, json=request_payload, stream=True, timeout=self. timeout) response.encoding = 'utf-8' if response.status_code != 200: if response.status_code == 404: raise OllamaEndpointNotFoundError( f'Ollama call failed with status code 404. Maybe your model is not found and you should pull the model with `ollama pull {self.model}`.' ) else: optional_detail = response.json().get('error') raise ValueError( f'Ollama call failed with status code {response.status_code}. Details: {optional_detail}' ) return response.iter_lines(decode_unicode=True)
def _create_stream(self, api_url: str, payload: Any, stop: Optional[List[ str]]=None, **kwargs: Any) ->Iterator[str]: if self.stop is not None and stop is not None: raise ValueError('`stop` found in both the input and default params.') elif self.stop is not None: stop = self.stop elif stop is None: stop = [] params = self._default_params if 'model' in kwargs: params['model'] = kwargs['model'] if 'options' in kwargs: params['options'] = kwargs['options'] else: params['options'] = {**params['options'], 'stop': stop, **kwargs} if payload.get('messages'): request_payload = {'messages': payload.get('messages', []), **params} else: request_payload = {'prompt': payload.get('prompt'), 'images': payload.get('images', []), **params} response = requests.post(url=api_url, headers={'Content-Type': 'application/json'}, json=request_payload, stream=True, timeout= self.timeout) response.encoding = 'utf-8' if response.status_code != 200: if response.status_code == 404: raise OllamaEndpointNotFoundError( f'Ollama call failed with status code 404. Maybe your model is not found and you should pull the model with `ollama pull {self.model}`.' ) else: optional_detail = response.json().get('error') raise ValueError( f'Ollama call failed with status code {response.status_code}. Details: {optional_detail}' ) return response.iter_lines(decode_unicode=True)
null
test_memory_with_message_store
"""Test the memory with a message store.""" message_history = UpstashRedisChatMessageHistory(url=URL, token=TOKEN, ttl= 10, session_id='my-test-session') memory = ConversationBufferMemory(memory_key='baz', chat_memory= message_history, return_messages=True) memory.chat_memory.add_ai_message('This is me, the AI') memory.chat_memory.add_user_message('This is me, the human') messages = memory.chat_memory.messages messages_json = json.dumps([message_to_dict(msg) for msg in messages]) assert 'This is me, the AI' in messages_json assert 'This is me, the human' in messages_json memory.chat_memory.clear()
@pytest.mark.requires('upstash_redis') def test_memory_with_message_store() ->None: """Test the memory with a message store.""" message_history = UpstashRedisChatMessageHistory(url=URL, token=TOKEN, ttl=10, session_id='my-test-session') memory = ConversationBufferMemory(memory_key='baz', chat_memory= message_history, return_messages=True) memory.chat_memory.add_ai_message('This is me, the AI') memory.chat_memory.add_user_message('This is me, the human') messages = memory.chat_memory.messages messages_json = json.dumps([message_to_dict(msg) for msg in messages]) assert 'This is me, the AI' in messages_json assert 'This is me, the human' in messages_json memory.chat_memory.clear()
Test the memory with a message store.
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
drop
""" Helper function: Drop data """ get_named_result(self.connection, f'DROP TABLE IF EXISTS {self.config.database}.{self.config.table}')
def drop(self) ->None: """ Helper function: Drop data """ get_named_result(self.connection, f'DROP TABLE IF EXISTS {self.config.database}.{self.config.table}')
Helper function: Drop data
test_bilibili_loader
"""Test Bilibili Loader.""" loader = BiliBiliLoader(['https://www.bilibili.com/video/BV1xt411o7Xu/', 'https://www.bilibili.com/video/av330407025/']) docs = loader.load() assert len(docs) == 2 assert len(docs[0].page_content) > 0 assert docs[1].metadata['owner']['mid'] == 398095160 assert docs[1].page_content == '' assert docs[1].metadata['owner']['mid'] == 398095160
def test_bilibili_loader() ->None: """Test Bilibili Loader.""" loader = BiliBiliLoader(['https://www.bilibili.com/video/BV1xt411o7Xu/', 'https://www.bilibili.com/video/av330407025/']) docs = loader.load() assert len(docs) == 2 assert len(docs[0].page_content) > 0 assert docs[1].metadata['owner']['mid'] == 398095160 assert docs[1].page_content == '' assert docs[1].metadata['owner']['mid'] == 398095160
Test Bilibili Loader.
_call
try: from predibase import PredibaseClient pc = PredibaseClient(token=self.predibase_api_key.get_secret_value()) except ImportError as e: raise ImportError( 'Could not import Predibase Python package. Please install it with `pip install predibase`.' ) from e except ValueError as e: raise ValueError('Your API key is not correct. Please try again') from e results = pc.prompt(prompt, model_name=self.model) return results[0].response
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: try: from predibase import PredibaseClient pc = PredibaseClient(token=self.predibase_api_key.get_secret_value()) except ImportError as e: raise ImportError( 'Could not import Predibase Python package. Please install it with `pip install predibase`.' ) from e except ValueError as e: raise ValueError('Your API key is not correct. Please try again' ) from e results = pc.prompt(prompt, model_name=self.model) return results[0].response
null
truncate_run_iterative
"""Utility to truncate a list of runs dictionaries to only keep the specified keys in each run. :param runs: The list of runs to truncate. :param keep_keys: The keys to keep in each run. :return: The truncated list of runs. """ def truncate_single(run: Dict[str, Any]) ->Dict[str, Any]: """Utility to truncate a single run dictionary to only keep the specified keys. :param run: The run dictionary to truncate. :return: The truncated run dictionary """ new_dict = {} for key in run: if key in keep_keys: new_dict[key] = run.get(key) return new_dict return list(map(truncate_single, runs))
def truncate_run_iterative(self, runs: List[Dict[str, Any]], keep_keys: Tuple[str, ...]=()) ->List[Dict[str, Any]]: """Utility to truncate a list of runs dictionaries to only keep the specified keys in each run. :param runs: The list of runs to truncate. :param keep_keys: The keys to keep in each run. :return: The truncated list of runs. """ def truncate_single(run: Dict[str, Any]) ->Dict[str, Any]: """Utility to truncate a single run dictionary to only keep the specified keys. :param run: The run dictionary to truncate. :return: The truncated run dictionary """ new_dict = {} for key in run: if key in keep_keys: new_dict[key] = run.get(key) return new_dict return list(map(truncate_single, runs))
Utility to truncate a list of runs dictionaries to only keep the specified keys in each run. :param runs: The list of runs to truncate. :param keep_keys: The keys to keep in each run. :return: The truncated list of runs.
add_vectors
return self._add_vectors(self._client, self.table_name, vectors, documents, ids, self.chunk_size)
def add_vectors(self, vectors: List[List[float]], documents: List[Document], ids: List[str]) ->List[str]: return self._add_vectors(self._client, self.table_name, vectors, documents, ids, self.chunk_size)
null
_raw_similarity_search_with_score
"""Return raw opensearch documents (dict) including vectors, scores most similar to query. By default, supports Approximate Search. Also supports Script Scoring and Painless Scripting. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of dict with its scores most similar to the query. Optional Args: same as `similarity_search` """ embedding = self.embedding_function.embed_query(query) search_type = kwargs.get('search_type', 'approximate_search') vector_field = kwargs.get('vector_field', 'vector_field') index_name = kwargs.get('index_name', self.index_name) filter = kwargs.get('filter', {}) if self.is_aoss and search_type != 'approximate_search' and search_type != SCRIPT_SCORING_SEARCH: raise ValueError( 'Amazon OpenSearch Service Serverless only supports `approximate_search` and `script_scoring`' ) if search_type == 'approximate_search': boolean_filter = kwargs.get('boolean_filter', {}) subquery_clause = kwargs.get('subquery_clause', 'must') efficient_filter = kwargs.get('efficient_filter', {}) lucene_filter = kwargs.get('lucene_filter', {}) if boolean_filter != {} and efficient_filter != {}: raise ValueError( 'Both `boolean_filter` and `efficient_filter` are provided which is invalid' ) if lucene_filter != {} and efficient_filter != {}: raise ValueError( 'Both `lucene_filter` and `efficient_filter` are provided which is invalid. `lucene_filter` is deprecated' ) if lucene_filter != {} and boolean_filter != {}: raise ValueError( 'Both `lucene_filter` and `boolean_filter` are provided which is invalid. `lucene_filter` is deprecated' ) if efficient_filter == {} and boolean_filter == {} and lucene_filter == { } and filter != {}: if self.engine in ['faiss', 'lucene']: efficient_filter = filter else: boolean_filter = filter if boolean_filter != {}: search_query = _approximate_search_query_with_boolean_filter(embedding, boolean_filter, k=k, vector_field=vector_field, subquery_clause =subquery_clause) elif efficient_filter != {}: search_query = _approximate_search_query_with_efficient_filter( embedding, efficient_filter, k=k, vector_field=vector_field) elif lucene_filter != {}: warnings.warn( '`lucene_filter` is deprecated. Please use the keyword argument `efficient_filter`' ) search_query = _approximate_search_query_with_efficient_filter( embedding, lucene_filter, k=k, vector_field=vector_field) else: search_query = _default_approximate_search_query(embedding, k=k, vector_field=vector_field) elif search_type == SCRIPT_SCORING_SEARCH: space_type = kwargs.get('space_type', 'l2') pre_filter = kwargs.get('pre_filter', MATCH_ALL_QUERY) search_query = _default_script_query(embedding, k, space_type, pre_filter, vector_field) elif search_type == PAINLESS_SCRIPTING_SEARCH: space_type = kwargs.get('space_type', 'l2Squared') pre_filter = kwargs.get('pre_filter', MATCH_ALL_QUERY) search_query = _default_painless_scripting_query(embedding, k, space_type, pre_filter, vector_field) else: raise ValueError('Invalid `search_type` provided as an argument') response = self.client.search(index=index_name, body=search_query) return [hit for hit in response['hits']['hits']]
def _raw_similarity_search_with_score(self, query: str, k: int=4, **kwargs: Any ) ->List[dict]: """Return raw opensearch documents (dict) including vectors, scores most similar to query. By default, supports Approximate Search. Also supports Script Scoring and Painless Scripting. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of dict with its scores most similar to the query. Optional Args: same as `similarity_search` """ embedding = self.embedding_function.embed_query(query) search_type = kwargs.get('search_type', 'approximate_search') vector_field = kwargs.get('vector_field', 'vector_field') index_name = kwargs.get('index_name', self.index_name) filter = kwargs.get('filter', {}) if (self.is_aoss and search_type != 'approximate_search' and search_type != SCRIPT_SCORING_SEARCH): raise ValueError( 'Amazon OpenSearch Service Serverless only supports `approximate_search` and `script_scoring`' ) if search_type == 'approximate_search': boolean_filter = kwargs.get('boolean_filter', {}) subquery_clause = kwargs.get('subquery_clause', 'must') efficient_filter = kwargs.get('efficient_filter', {}) lucene_filter = kwargs.get('lucene_filter', {}) if boolean_filter != {} and efficient_filter != {}: raise ValueError( 'Both `boolean_filter` and `efficient_filter` are provided which is invalid' ) if lucene_filter != {} and efficient_filter != {}: raise ValueError( 'Both `lucene_filter` and `efficient_filter` are provided which is invalid. `lucene_filter` is deprecated' ) if lucene_filter != {} and boolean_filter != {}: raise ValueError( 'Both `lucene_filter` and `boolean_filter` are provided which is invalid. `lucene_filter` is deprecated' ) if efficient_filter == {} and boolean_filter == { } and lucene_filter == {} and filter != {}: if self.engine in ['faiss', 'lucene']: efficient_filter = filter else: boolean_filter = filter if boolean_filter != {}: search_query = _approximate_search_query_with_boolean_filter( embedding, boolean_filter, k=k, vector_field=vector_field, subquery_clause=subquery_clause) elif efficient_filter != {}: search_query = _approximate_search_query_with_efficient_filter( embedding, efficient_filter, k=k, vector_field=vector_field) elif lucene_filter != {}: warnings.warn( '`lucene_filter` is deprecated. Please use the keyword argument `efficient_filter`' ) search_query = _approximate_search_query_with_efficient_filter( embedding, lucene_filter, k=k, vector_field=vector_field) else: search_query = _default_approximate_search_query(embedding, k=k, vector_field=vector_field) elif search_type == SCRIPT_SCORING_SEARCH: space_type = kwargs.get('space_type', 'l2') pre_filter = kwargs.get('pre_filter', MATCH_ALL_QUERY) search_query = _default_script_query(embedding, k, space_type, pre_filter, vector_field) elif search_type == PAINLESS_SCRIPTING_SEARCH: space_type = kwargs.get('space_type', 'l2Squared') pre_filter = kwargs.get('pre_filter', MATCH_ALL_QUERY) search_query = _default_painless_scripting_query(embedding, k, space_type, pre_filter, vector_field) else: raise ValueError('Invalid `search_type` provided as an argument') response = self.client.search(index=index_name, body=search_query) return [hit for hit in response['hits']['hits']]
Return raw opensearch documents (dict) including vectors, scores most similar to query. By default, supports Approximate Search. Also supports Script Scoring and Painless Scripting. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of dict with its scores most similar to the query. Optional Args: same as `similarity_search`
_import_elastic_vector_search
from langchain_community.vectorstores.elastic_vector_search import ElasticVectorSearch return ElasticVectorSearch
def _import_elastic_vector_search() ->Any: from langchain_community.vectorstores.elastic_vector_search import ElasticVectorSearch return ElasticVectorSearch
null
_import_oci_md_tgi
from langchain_community.llms.oci_data_science_model_deployment_endpoint import OCIModelDeploymentTGI return OCIModelDeploymentTGI
def _import_oci_md_tgi() ->Any: from langchain_community.llms.oci_data_science_model_deployment_endpoint import OCIModelDeploymentTGI return OCIModelDeploymentTGI
null
test_vald_search
"""Test end to end construction and search.""" docsearch = _vald_from_texts() time.sleep(WAIT_TIME) output = docsearch.similarity_search('foo', k=3) assert output == [Document(page_content='foo'), Document(page_content='bar' ), Document(page_content='baz')]
def test_vald_search() ->None: """Test end to end construction and search.""" docsearch = _vald_from_texts() time.sleep(WAIT_TIME) output = docsearch.similarity_search('foo', k=3) assert output == [Document(page_content='foo'), Document(page_content= 'bar'), Document(page_content='baz')]
Test end to end construction and search.
observation_prefix
"""Prefix to append the observation with.""" return 'Observation: '
@property def observation_prefix(self) ->str: """Prefix to append the observation with.""" return 'Observation: '
Prefix to append the observation with.
test_load_jsonlines
file_path = '/workspaces/langchain/test.json' expected_docs = [Document(page_content='value1', metadata={'source': file_path, 'seq_num': 1}), Document(page_content='value2', metadata={ 'source': file_path, 'seq_num': 2})] mocker.patch('pathlib.Path.open', return_value=io.StringIO( """ {"text": "value1"} {"text": "value2"} """ )) loader = JSONLoader(file_path=file_path, jq_schema='.', content_key='text', json_lines=True) result = loader.load() assert result == expected_docs
def test_load_jsonlines(mocker: MockerFixture) ->None: file_path = '/workspaces/langchain/test.json' expected_docs = [Document(page_content='value1', metadata={'source': file_path, 'seq_num': 1}), Document(page_content='value2', metadata ={'source': file_path, 'seq_num': 2})] mocker.patch('pathlib.Path.open', return_value=io.StringIO( """ {"text": "value1"} {"text": "value2"} """ )) loader = JSONLoader(file_path=file_path, jq_schema='.', content_key= 'text', json_lines=True) result = loader.load() assert result == expected_docs
null
_Await
self.write('(') self.write('await') if t.value: self.write(' ') self.dispatch(t.value) self.write(')')
def _Await(self, t): self.write('(') self.write('await') if t.value: self.write(' ') self.dispatch(t.value) self.write(')')
null
pause_to_reflect
"""Reflect on recent observations and generate 'insights'.""" if self.verbose: logger.info('Character is reflecting') new_insights = [] topics = self._get_topics_of_reflection() for topic in topics: insights = self._get_insights_on_topic(topic, now=now) for insight in insights: self.add_memory(insight, now=now) new_insights.extend(insights) return new_insights
def pause_to_reflect(self, now: Optional[datetime]=None) ->List[str]: """Reflect on recent observations and generate 'insights'.""" if self.verbose: logger.info('Character is reflecting') new_insights = [] topics = self._get_topics_of_reflection() for topic in topics: insights = self._get_insights_on_topic(topic, now=now) for insight in insights: self.add_memory(insight, now=now) new_insights.extend(insights) return new_insights
Reflect on recent observations and generate 'insights'.
completion_with_retry
"""Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) @retry_decorator def _completion_with_retry(**kwargs: Any) ->Any: ordered_generation_requests = get_ordered_generation_requests( models_priority_list, **kwargs) return llm.client.generate(ordered_generation_requests= ordered_generation_requests, is_stream=kwargs.get('stream', False)) return _completion_with_retry(**kwargs)
def completion_with_retry(llm: GPTRouter, models_priority_list: List[ GPTRouterModel], run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Union[GenerationResponse, Generator[ ChunkedGenerationResponse]]: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) @retry_decorator def _completion_with_retry(**kwargs: Any) ->Any: ordered_generation_requests = get_ordered_generation_requests( models_priority_list, **kwargs) return llm.client.generate(ordered_generation_requests= ordered_generation_requests, is_stream=kwargs.get('stream', False)) return _completion_with_retry(**kwargs)
Use tenacity to retry the completion call.
add_message
"""Store a message in the cache. Args: message (BaseMessage): The message object to store. Raises: SdkException: Momento service or network error. Exception: Unexpected response. """ from momento.responses import CacheListPushBack item = json.dumps(message_to_dict(message)) push_response = self.cache_client.list_push_back(self.cache_name, self.key, item, ttl=self.ttl) if isinstance(push_response, CacheListPushBack.Success): return None elif isinstance(push_response, CacheListPushBack.Error): raise push_response.inner_exception else: raise Exception(f'Unexpected response: {push_response}')
def add_message(self, message: BaseMessage) ->None: """Store a message in the cache. Args: message (BaseMessage): The message object to store. Raises: SdkException: Momento service or network error. Exception: Unexpected response. """ from momento.responses import CacheListPushBack item = json.dumps(message_to_dict(message)) push_response = self.cache_client.list_push_back(self.cache_name, self. key, item, ttl=self.ttl) if isinstance(push_response, CacheListPushBack.Success): return None elif isinstance(push_response, CacheListPushBack.Error): raise push_response.inner_exception else: raise Exception(f'Unexpected response: {push_response}')
Store a message in the cache. Args: message (BaseMessage): The message object to store. Raises: SdkException: Momento service or network error. Exception: Unexpected response.
_run
schedule = self.account.schedule() calendar = schedule.get_default_calendar() event = calendar.new_event() event.body = body event.subject = subject event.start = dt.strptime(start_datetime, UTC_FORMAT) event.end = dt.strptime(end_datetime, UTC_FORMAT) for attendee in attendees: event.attendees.add(attendee) event.save() output = 'Event sent: ' + str(event) return output
def _run(self, body: str, attendees: List[str], subject: str, start_datetime: str, end_datetime: str, run_manager: Optional[ CallbackManagerForToolRun]=None) ->str: schedule = self.account.schedule() calendar = schedule.get_default_calendar() event = calendar.new_event() event.body = body event.subject = subject event.start = dt.strptime(start_datetime, UTC_FORMAT) event.end = dt.strptime(end_datetime, UTC_FORMAT) for attendee in attendees: event.attendees.add(attendee) event.save() output = 'Event sent: ' + str(event) return output
null
test_loadnotebook_eachnotehasexpectedmetadata
documents = EverNoteLoader(self.example_notebook_path( 'sample_notebook.enex'), False).load() metadata_note1 = documents[0].metadata assert 'title' in metadata_note1.keys() assert 'created' in metadata_note1.keys() assert 'updated' in metadata_note1.keys() assert 'note-attributes.author' in metadata_note1.keys() assert 'content' not in metadata_note1.keys() assert 'content-raw' not in metadata_note1.keys() assert 'resource' not in metadata_note1.keys() assert metadata_note1['title'] == 'Test' assert metadata_note1['note-attributes.author'] == 'Michael McGarry' assert isinstance(metadata_note1['created'], time.struct_time) assert isinstance(metadata_note1['updated'], time.struct_time) assert metadata_note1['created'].tm_year == 2023 assert metadata_note1['created'].tm_mon == 5 assert metadata_note1['created'].tm_mday == 11 assert metadata_note1['updated'].tm_year == 2024 assert metadata_note1['updated'].tm_mon == 7 assert metadata_note1['updated'].tm_mday == 14 metadata_note2 = documents[1].metadata assert 'title' in metadata_note2.keys() assert 'created' in metadata_note2.keys() assert 'updated' not in metadata_note2.keys() assert 'note-attributes.author' in metadata_note2.keys() assert 'note-attributes.source' in metadata_note2.keys() assert 'content' not in metadata_note2.keys() assert 'content-raw' not in metadata_note2.keys() assert 'resource' not in metadata_note2.keys() assert metadata_note2['title'] == 'Summer Training Program' assert metadata_note2['note-attributes.author'] == 'Mike McGarry' assert metadata_note2['note-attributes.source'] == 'mobile.iphone' assert isinstance(metadata_note2['created'], time.struct_time) assert metadata_note2['created'].tm_year == 2022 assert metadata_note2['created'].tm_mon == 12 assert metadata_note2['created'].tm_mday == 27
def test_loadnotebook_eachnotehasexpectedmetadata(self) ->None: documents = EverNoteLoader(self.example_notebook_path( 'sample_notebook.enex'), False).load() metadata_note1 = documents[0].metadata assert 'title' in metadata_note1.keys() assert 'created' in metadata_note1.keys() assert 'updated' in metadata_note1.keys() assert 'note-attributes.author' in metadata_note1.keys() assert 'content' not in metadata_note1.keys() assert 'content-raw' not in metadata_note1.keys() assert 'resource' not in metadata_note1.keys() assert metadata_note1['title'] == 'Test' assert metadata_note1['note-attributes.author'] == 'Michael McGarry' assert isinstance(metadata_note1['created'], time.struct_time) assert isinstance(metadata_note1['updated'], time.struct_time) assert metadata_note1['created'].tm_year == 2023 assert metadata_note1['created'].tm_mon == 5 assert metadata_note1['created'].tm_mday == 11 assert metadata_note1['updated'].tm_year == 2024 assert metadata_note1['updated'].tm_mon == 7 assert metadata_note1['updated'].tm_mday == 14 metadata_note2 = documents[1].metadata assert 'title' in metadata_note2.keys() assert 'created' in metadata_note2.keys() assert 'updated' not in metadata_note2.keys() assert 'note-attributes.author' in metadata_note2.keys() assert 'note-attributes.source' in metadata_note2.keys() assert 'content' not in metadata_note2.keys() assert 'content-raw' not in metadata_note2.keys() assert 'resource' not in metadata_note2.keys() assert metadata_note2['title'] == 'Summer Training Program' assert metadata_note2['note-attributes.author'] == 'Mike McGarry' assert metadata_note2['note-attributes.source'] == 'mobile.iphone' assert isinstance(metadata_note2['created'], time.struct_time) assert metadata_note2['created'].tm_year == 2022 assert metadata_note2['created'].tm_mon == 12 assert metadata_note2['created'].tm_mday == 27
null
load
p = Path(self.path) docs = [] items = p.rglob(self.glob) if self.recursive else p.glob(self.glob) for i in items: if i.is_file(): if self._is_visible(i.relative_to(p)) or self.load_hidden: try: loader = PyPDFLoader(str(i), extract_images=self.extract_images ) sub_docs = loader.load() for doc in sub_docs: doc.metadata['source'] = str(i) docs.extend(sub_docs) except Exception as e: if self.silent_errors: logger.warning(e) else: raise e return docs
def load(self) ->List[Document]: p = Path(self.path) docs = [] items = p.rglob(self.glob) if self.recursive else p.glob(self.glob) for i in items: if i.is_file(): if self._is_visible(i.relative_to(p)) or self.load_hidden: try: loader = PyPDFLoader(str(i), extract_images=self. extract_images) sub_docs = loader.load() for doc in sub_docs: doc.metadata['source'] = str(i) docs.extend(sub_docs) except Exception as e: if self.silent_errors: logger.warning(e) else: raise e return docs
null
test_as_import_path
"""Test that the path is converted to a LangChain import path.""" assert path.PACKAGE_DIR == ROOT / 'langchain_core' assert path.as_import_path(HERE, relative_to=ROOT) == 'tests.unit_tests._api' assert path.as_import_path(__file__, relative_to=ROOT ) == 'tests.unit_tests._api.test_path' assert path.as_import_path(__file__, suffix='create_agent', relative_to=ROOT ) == 'tests.unit_tests._api.test_path.create_agent'
def test_as_import_path() ->None: """Test that the path is converted to a LangChain import path.""" assert path.PACKAGE_DIR == ROOT / 'langchain_core' assert path.as_import_path(HERE, relative_to=ROOT ) == 'tests.unit_tests._api' assert path.as_import_path(__file__, relative_to=ROOT ) == 'tests.unit_tests._api.test_path' assert path.as_import_path(__file__, suffix='create_agent', relative_to =ROOT) == 'tests.unit_tests._api.test_path.create_agent'
Test that the path is converted to a LangChain import path.
chat
return ChatFireworks(model_kwargs={'temperature': 0, 'max_tokens': 512})
@pytest.fixture def chat() ->ChatFireworks: return ChatFireworks(model_kwargs={'temperature': 0, 'max_tokens': 512})
null
from_texts
""" Create a BM25Retriever from a list of texts. Args: texts: A list of texts to vectorize. metadatas: A list of metadata dicts to associate with each text. bm25_params: Parameters to pass to the BM25 vectorizer. preprocess_func: A function to preprocess each text before vectorization. **kwargs: Any other arguments to pass to the retriever. Returns: A BM25Retriever instance. """ try: from rank_bm25 import BM25Okapi except ImportError: raise ImportError( 'Could not import rank_bm25, please install with `pip install rank_bm25`.' ) texts_processed = [preprocess_func(t) for t in texts] bm25_params = bm25_params or {} vectorizer = BM25Okapi(texts_processed, **bm25_params) metadatas = metadatas or ({} for _ in texts) docs = [Document(page_content=t, metadata=m) for t, m in zip(texts, metadatas)] return cls(vectorizer=vectorizer, docs=docs, preprocess_func= preprocess_func, **kwargs)
@classmethod def from_texts(cls, texts: Iterable[str], metadatas: Optional[Iterable[dict ]]=None, bm25_params: Optional[Dict[str, Any]]=None, preprocess_func: Callable[[str], List[str]]=default_preprocessing_func, **kwargs: Any ) ->BM25Retriever: """ Create a BM25Retriever from a list of texts. Args: texts: A list of texts to vectorize. metadatas: A list of metadata dicts to associate with each text. bm25_params: Parameters to pass to the BM25 vectorizer. preprocess_func: A function to preprocess each text before vectorization. **kwargs: Any other arguments to pass to the retriever. Returns: A BM25Retriever instance. """ try: from rank_bm25 import BM25Okapi except ImportError: raise ImportError( 'Could not import rank_bm25, please install with `pip install rank_bm25`.' ) texts_processed = [preprocess_func(t) for t in texts] bm25_params = bm25_params or {} vectorizer = BM25Okapi(texts_processed, **bm25_params) metadatas = metadatas or ({} for _ in texts) docs = [Document(page_content=t, metadata=m) for t, m in zip(texts, metadatas)] return cls(vectorizer=vectorizer, docs=docs, preprocess_func= preprocess_func, **kwargs)
Create a BM25Retriever from a list of texts. Args: texts: A list of texts to vectorize. metadatas: A list of metadata dicts to associate with each text. bm25_params: Parameters to pass to the BM25 vectorizer. preprocess_func: A function to preprocess each text before vectorization. **kwargs: Any other arguments to pass to the retriever. Returns: A BM25Retriever instance.
merge_content
"""Merge two message contents. Args: first_content: The first content. second_content: The second content. Returns: The merged content. """ if isinstance(first_content, str): if isinstance(second_content, str): return first_content + second_content else: return_list: List[Union[str, Dict]] = [first_content] return return_list + second_content elif isinstance(second_content, List): return first_content + second_content elif isinstance(first_content[-1], str): return first_content[:-1] + [first_content[-1] + second_content] else: return first_content + [second_content]
def merge_content(first_content: Union[str, List[Union[str, Dict]]], second_content: Union[str, List[Union[str, Dict]]]) ->Union[str, List[ Union[str, Dict]]]: """Merge two message contents. Args: first_content: The first content. second_content: The second content. Returns: The merged content. """ if isinstance(first_content, str): if isinstance(second_content, str): return first_content + second_content else: return_list: List[Union[str, Dict]] = [first_content] return return_list + second_content elif isinstance(second_content, List): return first_content + second_content elif isinstance(first_content[-1], str): return first_content[:-1] + [first_content[-1] + second_content] else: return first_content + [second_content]
Merge two message contents. Args: first_content: The first content. second_content: The second content. Returns: The merged content.
_get_schema_type
if schema is None: return None schema_type: SCHEMA_TYPE = APIProperty._cast_schema_list_type(schema) if schema_type == 'array': schema_type = APIProperty._get_schema_type_for_array(schema) elif schema_type == 'object': raise NotImplementedError('Objects not yet supported') elif schema_type in PRIMITIVE_TYPES: if schema.enum: schema_type = APIProperty._get_schema_type_for_enum(parameter, schema) else: pass else: raise NotImplementedError(f'Unsupported type: {schema_type}') return schema_type
@staticmethod def _get_schema_type(parameter: Parameter, schema: Optional[Schema] ) ->SCHEMA_TYPE: if schema is None: return None schema_type: SCHEMA_TYPE = APIProperty._cast_schema_list_type(schema) if schema_type == 'array': schema_type = APIProperty._get_schema_type_for_array(schema) elif schema_type == 'object': raise NotImplementedError('Objects not yet supported') elif schema_type in PRIMITIVE_TYPES: if schema.enum: schema_type = APIProperty._get_schema_type_for_enum(parameter, schema) else: pass else: raise NotImplementedError(f'Unsupported type: {schema_type}') return schema_type
null
from_model_id
"""Construct the pipeline object from model_id and task.""" try: from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer from transformers import pipeline as hf_pipeline except ImportError: raise ValueError( 'Could not import transformers python package. Please install it with `pip install transformers`.' ) _model_kwargs = model_kwargs or {} tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs) try: if task == 'text-generation': model = AutoModelForCausalLM.from_pretrained(model_id, **_model_kwargs) elif task in ('text2text-generation', 'summarization'): model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **_model_kwargs ) else: raise ValueError( f'Got invalid task {task}, currently only {VALID_TASKS} are supported' ) except ImportError as e: raise ValueError( f'Could not load the {task} model due to missing dependencies.') from e if tokenizer.pad_token is None: tokenizer.pad_token_id = model.config.eos_token_id if (getattr(model, 'is_loaded_in_4bit', False) or getattr(model, 'is_loaded_in_8bit', False)) and device is not None: logger.warning( f'Setting the `device` argument to None from {device} to avoid the error caused by attempting to move the model that was already loaded on the GPU using the Accelerate module to the same or another device.' ) device = None if device is not None and importlib.util.find_spec('torch') is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or device >= cuda_device_count: raise ValueError( f'Got device=={device}, device is required to be within [-1, {cuda_device_count})' ) if device_map is not None and device < 0: device = None if device is not None and device < 0 and cuda_device_count > 0: logger.warning( 'Device has %d GPUs available. Provide device={deviceId} to `from_model_id` to use availableGPUs for execution. deviceId is -1 (default) for CPU and can be a positive integer associated with CUDA device id.' , cuda_device_count) if 'trust_remote_code' in _model_kwargs: _model_kwargs = {k: v for k, v in _model_kwargs.items() if k != 'trust_remote_code'} _pipeline_kwargs = pipeline_kwargs or {} pipeline = hf_pipeline(task=task, model=model, tokenizer=tokenizer, device= device, device_map=device_map, batch_size=batch_size, model_kwargs= _model_kwargs, **_pipeline_kwargs) if pipeline.task not in VALID_TASKS: raise ValueError( f'Got invalid task {pipeline.task}, currently only {VALID_TASKS} are supported' ) return cls(pipeline=pipeline, model_id=model_id, model_kwargs=_model_kwargs, pipeline_kwargs=_pipeline_kwargs, batch_size=batch_size, **kwargs)
@classmethod def from_model_id(cls, model_id: str, task: str, device: Optional[int]=-1, device_map: Optional[str]=None, model_kwargs: Optional[dict]=None, pipeline_kwargs: Optional[dict]=None, batch_size: int= DEFAULT_BATCH_SIZE, **kwargs: Any) ->HuggingFacePipeline: """Construct the pipeline object from model_id and task.""" try: from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer from transformers import pipeline as hf_pipeline except ImportError: raise ValueError( 'Could not import transformers python package. Please install it with `pip install transformers`.' ) _model_kwargs = model_kwargs or {} tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs) try: if task == 'text-generation': model = AutoModelForCausalLM.from_pretrained(model_id, ** _model_kwargs) elif task in ('text2text-generation', 'summarization'): model = AutoModelForSeq2SeqLM.from_pretrained(model_id, ** _model_kwargs) else: raise ValueError( f'Got invalid task {task}, currently only {VALID_TASKS} are supported' ) except ImportError as e: raise ValueError( f'Could not load the {task} model due to missing dependencies.' ) from e if tokenizer.pad_token is None: tokenizer.pad_token_id = model.config.eos_token_id if (getattr(model, 'is_loaded_in_4bit', False) or getattr(model, 'is_loaded_in_8bit', False)) and device is not None: logger.warning( f'Setting the `device` argument to None from {device} to avoid the error caused by attempting to move the model that was already loaded on the GPU using the Accelerate module to the same or another device.' ) device = None if device is not None and importlib.util.find_spec('torch') is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or device >= cuda_device_count: raise ValueError( f'Got device=={device}, device is required to be within [-1, {cuda_device_count})' ) if device_map is not None and device < 0: device = None if device is not None and device < 0 and cuda_device_count > 0: logger.warning( 'Device has %d GPUs available. Provide device={deviceId} to `from_model_id` to use availableGPUs for execution. deviceId is -1 (default) for CPU and can be a positive integer associated with CUDA device id.' , cuda_device_count) if 'trust_remote_code' in _model_kwargs: _model_kwargs = {k: v for k, v in _model_kwargs.items() if k != 'trust_remote_code'} _pipeline_kwargs = pipeline_kwargs or {} pipeline = hf_pipeline(task=task, model=model, tokenizer=tokenizer, device=device, device_map=device_map, batch_size=batch_size, model_kwargs=_model_kwargs, **_pipeline_kwargs) if pipeline.task not in VALID_TASKS: raise ValueError( f'Got invalid task {pipeline.task}, currently only {VALID_TASKS} are supported' ) return cls(pipeline=pipeline, model_id=model_id, model_kwargs= _model_kwargs, pipeline_kwargs=_pipeline_kwargs, batch_size= batch_size, **kwargs)
Construct the pipeline object from model_id and task.
_handle_missing_document
replacement_result = self.collection.find_one_and_replace(filter={'_id': missing_document['_id']}, replacement=missing_document) return replacement_result['data']['document']['_id']
def _handle_missing_document(missing_document: DocDict) ->str: replacement_result = self.collection.find_one_and_replace(filter={'_id': missing_document['_id']}, replacement=missing_document) return replacement_result['data']['document']['_id']
null
create_anonymizer_mapping
"""Creates or updates the mapping used to anonymize and/or deanonymize text. This method exploits the results returned by the analysis and anonymization processes. If is_reversed is True, it constructs a mapping from each original entity to its anonymized value. If is_reversed is False, it constructs a mapping from each anonymized entity back to its original text value. If there are multiple entities of the same type, the mapping will include a count to differentiate them. For example, if there are two names in the input text, the mapping will include NAME_1 and NAME_2. Example of mapping: { "PERSON": { "<original>": "<anonymized>", "John Doe": "Slim Shady" }, "PHONE_NUMBER": { "111-111-1111": "555-555-5555" } ... } """ analyzer_results.sort(key=lambda d: d.start) anonymizer_results.items.sort(key=lambda d: d.start) mapping: MappingDataType = defaultdict(dict) count: dict = defaultdict(int) for analyzed, anonymized in zip(analyzer_results, anonymizer_results.items): original_value = original_text[analyzed.start:analyzed.end] entity_type = anonymized.entity_type if is_reversed: cond = original_value in mapping[entity_type].values() else: cond = original_value in mapping[entity_type] if cond: continue if anonymized.text in mapping[entity_type].values( ) or anonymized.text in mapping[entity_type]: anonymized_value = format_duplicated_operator(anonymized.text, count[entity_type] + 2) count[entity_type] += 1 else: anonymized_value = anonymized.text mapping_key, mapping_value = (anonymized_value, original_value ) if is_reversed else (original_value, anonymized_value) mapping[entity_type][mapping_key] = mapping_value return mapping
def create_anonymizer_mapping(original_text: str, analyzer_results: List[ 'RecognizerResult'], anonymizer_results: 'EngineResult', is_reversed: bool=False) ->MappingDataType: """Creates or updates the mapping used to anonymize and/or deanonymize text. This method exploits the results returned by the analysis and anonymization processes. If is_reversed is True, it constructs a mapping from each original entity to its anonymized value. If is_reversed is False, it constructs a mapping from each anonymized entity back to its original text value. If there are multiple entities of the same type, the mapping will include a count to differentiate them. For example, if there are two names in the input text, the mapping will include NAME_1 and NAME_2. Example of mapping: { "PERSON": { "<original>": "<anonymized>", "John Doe": "Slim Shady" }, "PHONE_NUMBER": { "111-111-1111": "555-555-5555" } ... } """ analyzer_results.sort(key=lambda d: d.start) anonymizer_results.items.sort(key=lambda d: d.start) mapping: MappingDataType = defaultdict(dict) count: dict = defaultdict(int) for analyzed, anonymized in zip(analyzer_results, anonymizer_results.items ): original_value = original_text[analyzed.start:analyzed.end] entity_type = anonymized.entity_type if is_reversed: cond = original_value in mapping[entity_type].values() else: cond = original_value in mapping[entity_type] if cond: continue if anonymized.text in mapping[entity_type].values( ) or anonymized.text in mapping[entity_type]: anonymized_value = format_duplicated_operator(anonymized.text, count[entity_type] + 2) count[entity_type] += 1 else: anonymized_value = anonymized.text mapping_key, mapping_value = (anonymized_value, original_value ) if is_reversed else (original_value, anonymized_value) mapping[entity_type][mapping_key] = mapping_value return mapping
Creates or updates the mapping used to anonymize and/or deanonymize text. This method exploits the results returned by the analysis and anonymization processes. If is_reversed is True, it constructs a mapping from each original entity to its anonymized value. If is_reversed is False, it constructs a mapping from each anonymized entity back to its original text value. If there are multiple entities of the same type, the mapping will include a count to differentiate them. For example, if there are two names in the input text, the mapping will include NAME_1 and NAME_2. Example of mapping: { "PERSON": { "<original>": "<anonymized>", "John Doe": "Slim Shady" }, "PHONE_NUMBER": { "111-111-1111": "555-555-5555" } ... }
__init__
"""Initialize the NLTK splitter.""" super().__init__(**kwargs) try: from nltk.tokenize import sent_tokenize self._tokenizer = sent_tokenize except ImportError: raise ImportError( 'NLTK is not installed, please install it with `pip install nltk`.') self._separator = separator self._language = language
def __init__(self, separator: str='\n\n', language: str='english', **kwargs: Any) ->None: """Initialize the NLTK splitter.""" super().__init__(**kwargs) try: from nltk.tokenize import sent_tokenize self._tokenizer = sent_tokenize except ImportError: raise ImportError( 'NLTK is not installed, please install it with `pip install nltk`.' ) self._separator = separator self._language = language
Initialize the NLTK splitter.
requires_reference
"""Whether the evaluation requires a reference text.""" return False
@property def requires_reference(self) ->bool: """Whether the evaluation requires a reference text.""" return False
Whether the evaluation requires a reference text.
from_llm
"""Load QA Generate Chain from LLM.""" return cls(llm=llm, prompt=PROMPT, **kwargs)
@classmethod def from_llm(cls, llm: BaseLanguageModel, **kwargs: Any) ->QAGenerateChain: """Load QA Generate Chain from LLM.""" return cls(llm=llm, prompt=PROMPT, **kwargs)
Load QA Generate Chain from LLM.
_default_params
"""Get the default parameters for calling Tongyi Qwen API.""" normal_params = {'model': self.model_name, 'top_p': self.top_p, 'api_key': self.dashscope_api_key} return {**normal_params, **self.model_kwargs}
@property def _default_params(self) ->Dict[str, Any]: """Get the default parameters for calling Tongyi Qwen API.""" normal_params = {'model': self.model_name, 'top_p': self.top_p, 'api_key': self.dashscope_api_key} return {**normal_params, **self.model_kwargs}
Get the default parameters for calling Tongyi Qwen API.
_run
"""Use the tool.""" res = self.api_wrapper.results(query, self.max_results, source=self.backend) res_strs = [', '.join([f'{k}: {v}' for k, v in d.items()]) for d in res] return ', '.join([f'[{rs}]' for rs in res_strs])
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Use the tool.""" res = self.api_wrapper.results(query, self.max_results, source=self.backend ) res_strs = [', '.join([f'{k}: {v}' for k, v in d.items()]) for d in res] return ', '.join([f'[{rs}]' for rs in res_strs])
Use the tool.
_import_myscale_settings
from langchain_community.vectorstores.myscale import MyScaleSettings return MyScaleSettings
def _import_myscale_settings() ->Any: from langchain_community.vectorstores.myscale import MyScaleSettings return MyScaleSettings
null
_create_chat_result
generations = [] for m in response['data']['messages']: message = _convert_dict_to_message(m) gen = ChatGeneration(message=message) generations.append(gen) token_usage = response['usage'] llm_output = {'token_usage': token_usage, 'model': self.model} return ChatResult(generations=generations, llm_output=llm_output)
def _create_chat_result(self, response: Mapping[str, Any]) ->ChatResult: generations = [] for m in response['data']['messages']: message = _convert_dict_to_message(m) gen = ChatGeneration(message=message) generations.append(gen) token_usage = response['usage'] llm_output = {'token_usage': token_usage, 'model': self.model} return ChatResult(generations=generations, llm_output=llm_output)
null
extract_cypher
"""Extract Cypher code from a text. Args: text: Text to extract Cypher code from. Returns: Cypher code extracted from the text. """ pattern = '```(.*?)```' matches = re.findall(pattern, text, re.DOTALL) return matches[0] if matches else text
def extract_cypher(text: str) ->str: """Extract Cypher code from a text. Args: text: Text to extract Cypher code from. Returns: Cypher code extracted from the text. """ pattern = '```(.*?)```' matches = re.findall(pattern, text, re.DOTALL) return matches[0] if matches else text
Extract Cypher code from a text. Args: text: Text to extract Cypher code from. Returns: Cypher code extracted from the text.
_get_span
import regex minor = quote major = context errs_ = 0 s = regex.search(f'({minor}){{e<={errs_}}}', major) while s is None and errs_ <= errs: errs_ += 1 s = regex.search(f'({minor}){{e<={errs_}}}', major) if s is not None: yield from s.spans()
def _get_span(self, quote: str, context: str, errs: int=100) ->Iterator[str]: import regex minor = quote major = context errs_ = 0 s = regex.search(f'({minor}){{e<={errs_}}}', major) while s is None and errs_ <= errs: errs_ += 1 s = regex.search(f'({minor}){{e<={errs_}}}', major) if s is not None: yield from s.spans()
null
_FUNCTION_COMPOSER
""" Composer for functions. Args: op_name: Name of the function. Returns: Callable that takes a list of arguments and returns a string. """ def f(*args: Any) ->str: args_: map[str] = map(str, args) return f"{op_name}({','.join(args_)})" return f
def _FUNCTION_COMPOSER(op_name: str) ->Callable: """ Composer for functions. Args: op_name: Name of the function. Returns: Callable that takes a list of arguments and returns a string. """ def f(*args: Any) ->str: args_: map[str] = map(str, args) return f"{op_name}({','.join(args_)})" return f
Composer for functions. Args: op_name: Name of the function. Returns: Callable that takes a list of arguments and returns a string.
_get_embedding_dimension
if self._embedding_dimension is None: self._embedding_dimension = len(self.embedding.embed_query( 'This is a sample sentence.')) return self._embedding_dimension
def _get_embedding_dimension(self) ->int: if self._embedding_dimension is None: self._embedding_dimension = len(self.embedding.embed_query( 'This is a sample sentence.')) return self._embedding_dimension
null
_select_eval_results
if isinstance(results, EvaluationResult): results_ = [results] elif isinstance(results, dict) and 'results' in results: results_ = cast(List[EvaluationResult], results['results']) else: raise TypeError( f'Invalid evaluation result type {type(results)}. Expected EvaluationResult or EvaluationResults.' ) return results_
def _select_eval_results(self, results: Union[EvaluationResult, EvaluationResults]) ->List[EvaluationResult]: if isinstance(results, EvaluationResult): results_ = [results] elif isinstance(results, dict) and 'results' in results: results_ = cast(List[EvaluationResult], results['results']) else: raise TypeError( f'Invalid evaluation result type {type(results)}. Expected EvaluationResult or EvaluationResults.' ) return results_
null
test_parse_with_language
llm_output = """I can use the `foo` tool to achieve the goal. Action: ```json { "action": "foo", "action_input": "bar" } ``` """ action, action_input = get_action_and_input(llm_output) assert action == 'foo' assert action_input == 'bar'
def test_parse_with_language() ->None: llm_output = """I can use the `foo` tool to achieve the goal. Action: ```json { "action": "foo", "action_input": "bar" } ``` """ action, action_input = get_action_and_input(llm_output) assert action == 'foo' assert action_input == 'bar'
null
test_loc_call
search = DataForSeoAPIWrapper(params={'location_name': 'Spain', 'language_code': 'es'}) output = search.results('iphone') assert '/es/' in output[0]['url']
def test_loc_call() ->None: search = DataForSeoAPIWrapper(params={'location_name': 'Spain', 'language_code': 'es'}) output = search.results('iphone') assert '/es/' in output[0]['url']
null
llm_prefix
"""Prefix to append the LLM call with.""" return 'Thought:'
@property def llm_prefix(self) ->str: """Prefix to append the LLM call with.""" return 'Thought:'
Prefix to append the LLM call with.
assert_docs
for doc in docs: assert doc.page_content assert doc.metadata main_meta = {'Published', 'Title', 'Authors', 'Summary'} assert set(doc.metadata).issuperset(main_meta) if all_meta: assert len(set(doc.metadata)) > len(main_meta) else: assert len(set(doc.metadata)) == len(main_meta)
def assert_docs(docs: List[Document], all_meta: bool=False) ->None: for doc in docs: assert doc.page_content assert doc.metadata main_meta = {'Published', 'Title', 'Authors', 'Summary'} assert set(doc.metadata).issuperset(main_meta) if all_meta: assert len(set(doc.metadata)) > len(main_meta) else: assert len(set(doc.metadata)) == len(main_meta)
null
test_math_question_2
"""Test simple question.""" question = """Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?""" prompt = MATH_PROMPT.format(question=question) queries = {prompt: _MATH_SOLUTION_2} fake_llm = FakeLLM(queries=queries) fake_pal_chain = PALChain.from_math_prompt(fake_llm, timeout=None) output = fake_pal_chain.run(question) assert output == '33'
def test_math_question_2() ->None: """Test simple question.""" question = """Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?""" prompt = MATH_PROMPT.format(question=question) queries = {prompt: _MATH_SOLUTION_2} fake_llm = FakeLLM(queries=queries) fake_pal_chain = PALChain.from_math_prompt(fake_llm, timeout=None) output = fake_pal_chain.run(question) assert output == '33'
Test simple question.
_setter
values[done] = value done.set() return value
def _setter(done: threading.Event, values: Values, value: T) ->T: values[done] = value done.set() return value
null
get_label
cost = None if event.selected: chosen_action = event.selected.index cost = (-1.0 * event.selected.score if event.selected.score is not None else None) prob = event.selected.probability return chosen_action, cost, prob else: return None, None, None
def get_label(self, event: PickBestEvent) ->tuple: cost = None if event.selected: chosen_action = event.selected.index cost = (-1.0 * event.selected.score if event.selected.score is not None else None) prob = event.selected.probability return chosen_action, cost, prob else: return None, None, None
null
test_run_with_callback
"""Test run method works when callback manager is passed.""" handler = FakeCallbackHandler() chain = FakeChain(callbacks=[handler]) output = chain.run('bar') assert output == 'baz' assert handler.starts == 1 assert handler.ends == 1 assert handler.errors == 0
def test_run_with_callback() ->None: """Test run method works when callback manager is passed.""" handler = FakeCallbackHandler() chain = FakeChain(callbacks=[handler]) output = chain.run('bar') assert output == 'baz' assert handler.starts == 1 assert handler.ends == 1 assert handler.errors == 0
Test run method works when callback manager is passed.
max_marginal_relevance_search
"""Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query (str): Text to look up documents similar to. k (int = 4): Number of Documents to return. fetch_k (int = 20): Number of Documents to fetch to pass to MMR algorithm. lambda_mult (float = 0.5): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Optional. Returns: List of Documents selected by maximal marginal relevance. """ embedding_vector = self.embedding.embed_query(query) return self.max_marginal_relevance_search_by_vector(embedding_vector, k, fetch_k, lambda_mult=lambda_mult, filter=filter)
def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int= 20, lambda_mult: float=0.5, filter: Optional[Dict[str, str]]=None, ** kwargs: Any) ->List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query (str): Text to look up documents similar to. k (int = 4): Number of Documents to return. fetch_k (int = 20): Number of Documents to fetch to pass to MMR algorithm. lambda_mult (float = 0.5): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Optional. Returns: List of Documents selected by maximal marginal relevance. """ embedding_vector = self.embedding.embed_query(query) return self.max_marginal_relevance_search_by_vector(embedding_vector, k, fetch_k, lambda_mult=lambda_mult, filter=filter)
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query (str): Text to look up documents similar to. k (int = 4): Number of Documents to return. fetch_k (int = 20): Number of Documents to fetch to pass to MMR algorithm. lambda_mult (float = 0.5): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Optional. Returns: List of Documents selected by maximal marginal relevance.
load_memory_variables
""" Returns chat history and all generated entities with summaries if available, and updates or clears the recent entity cache. New entity name can be found when calling this method, before the entity summaries are generated, so the entity cache values may be empty if no entity descriptions are generated yet. """ chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key buffer_string = get_buffer_string(self.buffer[-self.k * 2:], human_prefix= self.human_prefix, ai_prefix=self.ai_prefix) output = chain.predict(history=buffer_string, input=inputs[prompt_input_key]) if output.strip() == 'NONE': entities = [] else: entities = [w.strip() for w in output.split(',')] entity_summaries = {} for entity in entities: entity_summaries[entity] = self.entity_store.get(entity, '') self.entity_cache = entities if self.return_messages: buffer: Any = self.buffer[-self.k * 2:] else: buffer = buffer_string return {self.chat_history_key: buffer, 'entities': entity_summaries}
def load_memory_variables(self, inputs: Dict[str, Any]) ->Dict[str, Any]: """ Returns chat history and all generated entities with summaries if available, and updates or clears the recent entity cache. New entity name can be found when calling this method, before the entity summaries are generated, so the entity cache values may be empty if no entity descriptions are generated yet. """ chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key buffer_string = get_buffer_string(self.buffer[-self.k * 2:], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix) output = chain.predict(history=buffer_string, input=inputs[ prompt_input_key]) if output.strip() == 'NONE': entities = [] else: entities = [w.strip() for w in output.split(',')] entity_summaries = {} for entity in entities: entity_summaries[entity] = self.entity_store.get(entity, '') self.entity_cache = entities if self.return_messages: buffer: Any = self.buffer[-self.k * 2:] else: buffer = buffer_string return {self.chat_history_key: buffer, 'entities': entity_summaries}
Returns chat history and all generated entities with summaries if available, and updates or clears the recent entity cache. New entity name can be found when calling this method, before the entity summaries are generated, so the entity cache values may be empty if no entity descriptions are generated yet.
__add__
...
def __add__(self, __x: _T_contra) ->_T_co: ...
null
load
return list(self.lazy_load())
def load(self) ->List[Document]: return list(self.lazy_load())
null
__str__
result = '' for task in self.tasks: result += f'{task}\n' result += f'status: {task.status}\n' if task.failed(): result += f'message: {task.message}\n' if task.completed(): result += f'result: {task.result}\n' return result
def __str__(self) ->str: result = '' for task in self.tasks: result += f'{task}\n' result += f'status: {task.status}\n' if task.failed(): result += f'message: {task.message}\n' if task.completed(): result += f'result: {task.result}\n' return result
null
_load_credentials
"""Load credentials.""" try: from google.auth.transport.requests import Request from google.oauth2 import service_account from google.oauth2.credentials import Credentials from google_auth_oauthlib.flow import InstalledAppFlow from youtube_transcript_api import YouTubeTranscriptApi except ImportError: raise ImportError( 'You must run`pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib youtube-transcript-api` to use the Google Drive loader' ) creds = None if self.service_account_path.exists(): return service_account.Credentials.from_service_account_file(str(self. service_account_path)) if self.token_path.exists(): creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES) if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file(str(self. credentials_path), SCOPES) creds = flow.run_local_server(port=0) with open(self.token_path, 'w') as token: token.write(creds.to_json()) return creds
def _load_credentials(self) ->Any: """Load credentials.""" try: from google.auth.transport.requests import Request from google.oauth2 import service_account from google.oauth2.credentials import Credentials from google_auth_oauthlib.flow import InstalledAppFlow from youtube_transcript_api import YouTubeTranscriptApi except ImportError: raise ImportError( 'You must run`pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib youtube-transcript-api` to use the Google Drive loader' ) creds = None if self.service_account_path.exists(): return service_account.Credentials.from_service_account_file(str( self.service_account_path)) if self.token_path.exists(): creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES) if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file(str(self. credentials_path), SCOPES) creds = flow.run_local_server(port=0) with open(self.token_path, 'w') as token: token.write(creds.to_json()) return creds
Load credentials.
_default_params
"""Get the default parameters for calling the Aleph Alpha API.""" return {'maximum_tokens': self.maximum_tokens, 'temperature': self. temperature, 'top_k': self.top_k, 'top_p': self.top_p, 'presence_penalty': self.presence_penalty, 'frequency_penalty': self. frequency_penalty, 'n': self.n, 'repetition_penalties_include_prompt': self.repetition_penalties_include_prompt, 'use_multiplicative_presence_penalty': self. use_multiplicative_presence_penalty, 'penalty_bias': self.penalty_bias, 'penalty_exceptions': self.penalty_exceptions, 'penalty_exceptions_include_stop_sequences': self. penalty_exceptions_include_stop_sequences, 'best_of': self.best_of, 'logit_bias': self.logit_bias, 'log_probs': self.log_probs, 'tokens': self.tokens, 'disable_optimizations': self.disable_optimizations, 'minimum_tokens': self.minimum_tokens, 'echo': self.echo, 'use_multiplicative_frequency_penalty': self. use_multiplicative_frequency_penalty, 'sequence_penalty': self. sequence_penalty, 'sequence_penalty_min_length': self. sequence_penalty_min_length, 'use_multiplicative_sequence_penalty': self.use_multiplicative_sequence_penalty, 'completion_bias_inclusion': self.completion_bias_inclusion, 'completion_bias_inclusion_first_token_only': self. completion_bias_inclusion_first_token_only, 'completion_bias_exclusion': self.completion_bias_exclusion, 'completion_bias_exclusion_first_token_only': self. completion_bias_exclusion_first_token_only, 'contextual_control_threshold': self.contextual_control_threshold, 'control_log_additive': self.control_log_additive, 'repetition_penalties_include_completion': self. repetition_penalties_include_completion, 'raw_completion': self. raw_completion}
@property def _default_params(self) ->Dict[str, Any]: """Get the default parameters for calling the Aleph Alpha API.""" return {'maximum_tokens': self.maximum_tokens, 'temperature': self. temperature, 'top_k': self.top_k, 'top_p': self.top_p, 'presence_penalty': self.presence_penalty, 'frequency_penalty': self.frequency_penalty, 'n': self.n, 'repetition_penalties_include_prompt': self. repetition_penalties_include_prompt, 'use_multiplicative_presence_penalty': self. use_multiplicative_presence_penalty, 'penalty_bias': self. penalty_bias, 'penalty_exceptions': self.penalty_exceptions, 'penalty_exceptions_include_stop_sequences': self. penalty_exceptions_include_stop_sequences, 'best_of': self.best_of, 'logit_bias': self.logit_bias, 'log_probs': self.log_probs, 'tokens': self.tokens, 'disable_optimizations': self. disable_optimizations, 'minimum_tokens': self.minimum_tokens, 'echo': self.echo, 'use_multiplicative_frequency_penalty': self. use_multiplicative_frequency_penalty, 'sequence_penalty': self. sequence_penalty, 'sequence_penalty_min_length': self. sequence_penalty_min_length, 'use_multiplicative_sequence_penalty': self.use_multiplicative_sequence_penalty, 'completion_bias_inclusion': self.completion_bias_inclusion, 'completion_bias_inclusion_first_token_only': self. completion_bias_inclusion_first_token_only, 'completion_bias_exclusion': self.completion_bias_exclusion, 'completion_bias_exclusion_first_token_only': self. completion_bias_exclusion_first_token_only, 'contextual_control_threshold': self.contextual_control_threshold, 'control_log_additive': self.control_log_additive, 'repetition_penalties_include_completion': self. repetition_penalties_include_completion, 'raw_completion': self. raw_completion}
Get the default parameters for calling the Aleph Alpha API.
__getitem__
...
@overload def __getitem__(self, item: slice) ->Tuple[AsyncIterator[T], ...]: ...
null
__init__
""" Args: file_path: The path to the file to load. mode: The mode to use when loading the file. Can be one of "single", "multi", or "all". Default is "single". **unstructured_kwargs: Any kwargs to pass to the unstructured. """ validate_unstructured_version(min_unstructured_version='0.6.3') super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def __init__(self, file_path: str, mode: str='single', ** unstructured_kwargs: Any): """ Args: file_path: The path to the file to load. mode: The mode to use when loading the file. Can be one of "single", "multi", or "all". Default is "single". **unstructured_kwargs: Any kwargs to pass to the unstructured. """ validate_unstructured_version(min_unstructured_version='0.6.3') super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
Args: file_path: The path to the file to load. mode: The mode to use when loading the file. Can be one of "single", "multi", or "all". Default is "single". **unstructured_kwargs: Any kwargs to pass to the unstructured.
install_python_packages
"""Install python packages in the sandbox.""" self.session.install_python_packages(package_names)
def install_python_packages(self, package_names: Union[str, List[str]]) ->None: """Install python packages in the sandbox.""" self.session.install_python_packages(package_names)
Install python packages in the sandbox.
_llm_type
"""Return type of llm.""" return 'titan_takeoff'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'titan_takeoff'
Return type of llm.
format_response_payload
"""Formats the response body according to the output schema of the model. Returns the data type that is received from the response. """
@abstractmethod def format_response_payload(self, output: bytes) ->str: """Formats the response body according to the output schema of the model. Returns the data type that is received from the response. """
Formats the response body according to the output schema of the model. Returns the data type that is received from the response.
on_tool_error
"""Run when tool errors.""" self.metrics['step'] += 1 self.metrics['errors'] += 1
def on_tool_error(self, error: BaseException, **kwargs: Any) ->None: """Run when tool errors.""" self.metrics['step'] += 1 self.metrics['errors'] += 1
Run when tool errors.
_stop
return ['Observation:']
@property def _stop(self) ->List[str]: return ['Observation:']
null
_search
""" Return docs similar to query. Args: query (str, optional): Text to look up similar docs. embedding (Union[List[float], np.ndarray], optional): Query's embedding. embedding_function (Callable, optional): Function to convert `query` into embedding. k (int): Number of Documents to return. distance_metric (Optional[str], optional): `L2` for Euclidean, `L1` for Nuclear, `max` for L-infinity distance, `cos` for cosine similarity, 'dot' for dot product. filter (Union[Dict, Callable], optional): Additional filter prior to the embedding search. - ``Dict`` - Key-value search on tensors of htype json, on an AND basis (a sample must satisfy all key-value filters to be True) Dict = {"tensor_name_1": {"key": value}, "tensor_name_2": {"key": value}} - ``Function`` - Any function compatible with `deeplake.filter`. use_maximal_marginal_relevance (bool): Use maximal marginal relevance. fetch_k (int): Number of Documents for MMR algorithm. return_score (bool): Return the score. exec_option (str, optional): Supports 3 ways to perform searching. Could be "python", "compute_engine" or "tensor_db". - ``python`` - Pure-python implementation for the client. WARNING: not recommended for big datasets. - ``compute_engine`` - C++ implementation of Deep Lake Compute Engine for the client. Not for in-memory or local datasets. - ``tensor_db`` - Hosted Managed Tensor Database for storage and query execution. Only for data in Deep Lake Managed Database. Use runtime = {"db_engine": True} during dataset creation. deep_memory (bool): Whether to use the Deep Memory model for improving search results. Defaults to False if deep_memory is not specified in the Vector Store initialization. If True, the distance metric is set to "deepmemory_distance", which represents the metric with which the model was trained. The search is performed using the Deep Memory model. If False, the distance metric is set to "COS" or whatever distance metric user specifies. **kwargs: Additional keyword arguments. Returns: List of Documents by the specified distance metric, if return_score True, return a tuple of (Document, score) Raises: ValueError: if both `embedding` and `embedding_function` are not specified. """ if kwargs.get('tql'): return self._search_tql(tql=kwargs['tql'], exec_option=exec_option, return_score=return_score, embedding=embedding, embedding_function= embedding_function, distance_metric=distance_metric, use_maximal_marginal_relevance=use_maximal_marginal_relevance, filter=filter) if embedding_function: if isinstance(embedding_function, Embeddings): _embedding_function = embedding_function.embed_query else: _embedding_function = embedding_function elif self._embedding_function: _embedding_function = self._embedding_function.embed_query else: _embedding_function = None if embedding is None: if _embedding_function is None: raise ValueError( 'Either `embedding` or `embedding_function` needs to be specified.' ) embedding = _embedding_function(query) if query else None if isinstance(embedding, list): embedding = np.array(embedding, dtype=np.float32) if len(embedding.shape) > 1: embedding = embedding[0] result = self.vectorstore.search(embedding=embedding, k=fetch_k if use_maximal_marginal_relevance else k, distance_metric=distance_metric, filter=filter, exec_option=exec_option, return_tensors=['embedding', 'metadata', 'text', self._id_tensor_name], deep_memory=deep_memory) scores = result['score'] embeddings = result['embedding'] metadatas = result['metadata'] texts = result['text'] if use_maximal_marginal_relevance: lambda_mult = kwargs.get('lambda_mult', 0.5) indices = maximal_marginal_relevance(embedding, embeddings, k=min(k, len(texts)), lambda_mult=lambda_mult) scores = [scores[i] for i in indices] texts = [texts[i] for i in indices] metadatas = [metadatas[i] for i in indices] docs = [Document(page_content=text, metadata=metadata) for text, metadata in zip(texts, metadatas)] if return_score: return [(doc, score) for doc, score in zip(docs, scores)] return docs
def _search(self, query: Optional[str]=None, embedding: Optional[Union[List [float], np.ndarray]]=None, embedding_function: Optional[Callable]=None, k: int=4, distance_metric: Optional[str]=None, use_maximal_marginal_relevance: bool=False, fetch_k: Optional[int]=20, filter: Optional[Union[Dict, Callable]]=None, return_score: bool=False, exec_option: Optional[str]=None, deep_memory: bool=False, **kwargs: Any ) ->Any[List[Document], List[Tuple[Document, float]]]: """ Return docs similar to query. Args: query (str, optional): Text to look up similar docs. embedding (Union[List[float], np.ndarray], optional): Query's embedding. embedding_function (Callable, optional): Function to convert `query` into embedding. k (int): Number of Documents to return. distance_metric (Optional[str], optional): `L2` for Euclidean, `L1` for Nuclear, `max` for L-infinity distance, `cos` for cosine similarity, 'dot' for dot product. filter (Union[Dict, Callable], optional): Additional filter prior to the embedding search. - ``Dict`` - Key-value search on tensors of htype json, on an AND basis (a sample must satisfy all key-value filters to be True) Dict = {"tensor_name_1": {"key": value}, "tensor_name_2": {"key": value}} - ``Function`` - Any function compatible with `deeplake.filter`. use_maximal_marginal_relevance (bool): Use maximal marginal relevance. fetch_k (int): Number of Documents for MMR algorithm. return_score (bool): Return the score. exec_option (str, optional): Supports 3 ways to perform searching. Could be "python", "compute_engine" or "tensor_db". - ``python`` - Pure-python implementation for the client. WARNING: not recommended for big datasets. - ``compute_engine`` - C++ implementation of Deep Lake Compute Engine for the client. Not for in-memory or local datasets. - ``tensor_db`` - Hosted Managed Tensor Database for storage and query execution. Only for data in Deep Lake Managed Database. Use runtime = {"db_engine": True} during dataset creation. deep_memory (bool): Whether to use the Deep Memory model for improving search results. Defaults to False if deep_memory is not specified in the Vector Store initialization. If True, the distance metric is set to "deepmemory_distance", which represents the metric with which the model was trained. The search is performed using the Deep Memory model. If False, the distance metric is set to "COS" or whatever distance metric user specifies. **kwargs: Additional keyword arguments. Returns: List of Documents by the specified distance metric, if return_score True, return a tuple of (Document, score) Raises: ValueError: if both `embedding` and `embedding_function` are not specified. """ if kwargs.get('tql'): return self._search_tql(tql=kwargs['tql'], exec_option=exec_option, return_score=return_score, embedding=embedding, embedding_function=embedding_function, distance_metric= distance_metric, use_maximal_marginal_relevance= use_maximal_marginal_relevance, filter=filter) if embedding_function: if isinstance(embedding_function, Embeddings): _embedding_function = embedding_function.embed_query else: _embedding_function = embedding_function elif self._embedding_function: _embedding_function = self._embedding_function.embed_query else: _embedding_function = None if embedding is None: if _embedding_function is None: raise ValueError( 'Either `embedding` or `embedding_function` needs to be specified.' ) embedding = _embedding_function(query) if query else None if isinstance(embedding, list): embedding = np.array(embedding, dtype=np.float32) if len(embedding.shape) > 1: embedding = embedding[0] result = self.vectorstore.search(embedding=embedding, k=fetch_k if use_maximal_marginal_relevance else k, distance_metric= distance_metric, filter=filter, exec_option=exec_option, return_tensors=['embedding', 'metadata', 'text', self. _id_tensor_name], deep_memory=deep_memory) scores = result['score'] embeddings = result['embedding'] metadatas = result['metadata'] texts = result['text'] if use_maximal_marginal_relevance: lambda_mult = kwargs.get('lambda_mult', 0.5) indices = maximal_marginal_relevance(embedding, embeddings, k=min(k, len(texts)), lambda_mult=lambda_mult) scores = [scores[i] for i in indices] texts = [texts[i] for i in indices] metadatas = [metadatas[i] for i in indices] docs = [Document(page_content=text, metadata=metadata) for text, metadata in zip(texts, metadatas)] if return_score: return [(doc, score) for doc, score in zip(docs, scores)] return docs
Return docs similar to query. Args: query (str, optional): Text to look up similar docs. embedding (Union[List[float], np.ndarray], optional): Query's embedding. embedding_function (Callable, optional): Function to convert `query` into embedding. k (int): Number of Documents to return. distance_metric (Optional[str], optional): `L2` for Euclidean, `L1` for Nuclear, `max` for L-infinity distance, `cos` for cosine similarity, 'dot' for dot product. filter (Union[Dict, Callable], optional): Additional filter prior to the embedding search. - ``Dict`` - Key-value search on tensors of htype json, on an AND basis (a sample must satisfy all key-value filters to be True) Dict = {"tensor_name_1": {"key": value}, "tensor_name_2": {"key": value}} - ``Function`` - Any function compatible with `deeplake.filter`. use_maximal_marginal_relevance (bool): Use maximal marginal relevance. fetch_k (int): Number of Documents for MMR algorithm. return_score (bool): Return the score. exec_option (str, optional): Supports 3 ways to perform searching. Could be "python", "compute_engine" or "tensor_db". - ``python`` - Pure-python implementation for the client. WARNING: not recommended for big datasets. - ``compute_engine`` - C++ implementation of Deep Lake Compute Engine for the client. Not for in-memory or local datasets. - ``tensor_db`` - Hosted Managed Tensor Database for storage and query execution. Only for data in Deep Lake Managed Database. Use runtime = {"db_engine": True} during dataset creation. deep_memory (bool): Whether to use the Deep Memory model for improving search results. Defaults to False if deep_memory is not specified in the Vector Store initialization. If True, the distance metric is set to "deepmemory_distance", which represents the metric with which the model was trained. The search is performed using the Deep Memory model. If False, the distance metric is set to "COS" or whatever distance metric user specifies. **kwargs: Additional keyword arguments. Returns: List of Documents by the specified distance metric, if return_score True, return a tuple of (Document, score) Raises: ValueError: if both `embedding` and `embedding_function` are not specified.
get_initial_label
"""Return the markdown label for a new LLMThought that doesn't have an associated tool yet. """ return f'{THINKING_EMOJI} **Thinking...**'
def get_initial_label(self) ->str: """Return the markdown label for a new LLMThought that doesn't have an associated tool yet. """ return f'{THINKING_EMOJI} **Thinking...**'
Return the markdown label for a new LLMThought that doesn't have an associated tool yet.
embed_documents
"""Call out to Aleph Alpha's asymmetric Document endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ try: from aleph_alpha_client import Prompt, SemanticEmbeddingRequest, SemanticRepresentation except ImportError: raise ValueError( 'Could not import aleph_alpha_client python package. Please install it with `pip install aleph_alpha_client`.' ) document_embeddings = [] for text in texts: document_params = {'prompt': Prompt.from_text(text), 'representation': SemanticRepresentation.Document, 'compress_to_size': self. compress_to_size, 'normalize': self.normalize, 'contextual_control_threshold': self.contextual_control_threshold, 'control_log_additive': self.control_log_additive} document_request = SemanticEmbeddingRequest(**document_params) document_response = self.client.semantic_embed(request=document_request, model=self.model) document_embeddings.append(document_response.embedding) return document_embeddings
def embed_documents(self, texts: List[str]) ->List[List[float]]: """Call out to Aleph Alpha's asymmetric Document endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ try: from aleph_alpha_client import Prompt, SemanticEmbeddingRequest, SemanticRepresentation except ImportError: raise ValueError( 'Could not import aleph_alpha_client python package. Please install it with `pip install aleph_alpha_client`.' ) document_embeddings = [] for text in texts: document_params = {'prompt': Prompt.from_text(text), 'representation': SemanticRepresentation.Document, 'compress_to_size': self.compress_to_size, 'normalize': self. normalize, 'contextual_control_threshold': self. contextual_control_threshold, 'control_log_additive': self. control_log_additive} document_request = SemanticEmbeddingRequest(**document_params) document_response = self.client.semantic_embed(request= document_request, model=self.model) document_embeddings.append(document_response.embedding) return document_embeddings
Call out to Aleph Alpha's asymmetric Document endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text.
test_all_imports
"""Simple test to make sure all things can be imported.""" for cls in llms.__all__: assert issubclass(getattr(llms, cls), BaseLLM) assert set(llms.__all__) == set(EXPECT_ALL)
def test_all_imports() ->None: """Simple test to make sure all things can be imported.""" for cls in llms.__all__: assert issubclass(getattr(llms, cls), BaseLLM) assert set(llms.__all__) == set(EXPECT_ALL)
Simple test to make sure all things can be imported.
test_regex_dict_result
"""Test regex dict result.""" regex_dict_parser = RegexDictParser(output_key_to_format= DEF_OUTPUT_KEY_TO_FORMAT, no_update_value='N/A') result_dict = regex_dict_parser.parse_folder(DEF_README) print('parse_result:', result_dict) assert DEF_EXPECTED_RESULT == result_dict
def test_regex_dict_result() ->None: """Test regex dict result.""" regex_dict_parser = RegexDictParser(output_key_to_format= DEF_OUTPUT_KEY_TO_FORMAT, no_update_value='N/A') result_dict = regex_dict_parser.parse_folder(DEF_README) print('parse_result:', result_dict) assert DEF_EXPECTED_RESULT == result_dict
Test regex dict result.
_import_arxiv
from langchain_community.utilities.arxiv import ArxivAPIWrapper return ArxivAPIWrapper
def _import_arxiv() ->Any: from langchain_community.utilities.arxiv import ArxivAPIWrapper return ArxivAPIWrapper
null
test_model
documents = ['hi', 'qianfan'] embedding = QianfanEmbeddingsEndpoint(model='Embedding-V1') output = embedding.embed_documents(documents) assert len(output) == 2
def test_model() ->None: documents = ['hi', 'qianfan'] embedding = QianfanEmbeddingsEndpoint(model='Embedding-V1') output = embedding.embed_documents(documents) assert len(output) == 2
null
_import_databricks
from langchain_community.llms.databricks import Databricks return Databricks
def _import_databricks() ->Any: from langchain_community.llms.databricks import Databricks return Databricks
null
_llm_type
"""Return type of llm.""" return 'volc-engine-maas-llm'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'volc-engine-maas-llm'
Return type of llm.
test_call
"""Test that call gives the correct answer.""" chain = SerpAPIWrapper() output = chain.run("What was Obama's first name?") assert output == 'Barack Hussein Obama II'
def test_call() ->None: """Test that call gives the correct answer.""" chain = SerpAPIWrapper() output = chain.run("What was Obama's first name?") assert output == 'Barack Hussein Obama II'
Test that call gives the correct answer.
_import_together
from langchain_community.llms.together import Together return Together
def _import_together() ->Any: from langchain_community.llms.together import Together return Together
null
run
"""Run query through OpenAI and parse result.""" if is_openai_v1(): response = self.client.generate(prompt=query, n=self.n, size=self.size, model=self.model_name, quality=self.quality) image_urls = self.separator.join([item.url for item in response.data]) else: response = self.client.create(prompt=query, n=self.n, size=self.size, model=self.model_name) image_urls = self.separator.join([item['url'] for item in response['data']] ) return image_urls if image_urls else 'No image was generated'
def run(self, query: str) ->str: """Run query through OpenAI and parse result.""" if is_openai_v1(): response = self.client.generate(prompt=query, n=self.n, size=self. size, model=self.model_name, quality=self.quality) image_urls = self.separator.join([item.url for item in response.data]) else: response = self.client.create(prompt=query, n=self.n, size=self. size, model=self.model_name) image_urls = self.separator.join([item['url'] for item in response[ 'data']]) return image_urls if image_urls else 'No image was generated'
Run query through OpenAI and parse result.
dict
"""Return dictionary representation of agent.""" _dict = super().dict() try: _type = self._agent_type except NotImplementedError: _type = None if isinstance(_type, AgentType): _dict['_type'] = str(_type.value) elif _type is not None: _dict['_type'] = _type return _dict
def dict(self, **kwargs: Any) ->Dict: """Return dictionary representation of agent.""" _dict = super().dict() try: _type = self._agent_type except NotImplementedError: _type = None if isinstance(_type, AgentType): _dict['_type'] = str(_type.value) elif _type is not None: _dict['_type'] = _type return _dict
Return dictionary representation of agent.
_Slice
if t.lower: self.dispatch(t.lower) self.write(':') if t.upper: self.dispatch(t.upper) if t.step: self.write(':') self.dispatch(t.step)
def _Slice(self, t): if t.lower: self.dispatch(t.lower) self.write(':') if t.upper: self.dispatch(t.upper) if t.step: self.write(':') self.dispatch(t.step)
null
__init__
super().__init__() self.ignore_case = ignore_case self.ignore_punctuation = ignore_punctuation self.ignore_numbers = ignore_numbers
def __init__(self, *, ignore_case: bool=False, ignore_punctuation: bool= False, ignore_numbers: bool=False, **kwargs: Any): super().__init__() self.ignore_case = ignore_case self.ignore_punctuation = ignore_punctuation self.ignore_numbers = ignore_numbers
null
test_embaas_embed_query
"""Test embaas embeddings with multiple texts.""" text = 'foo' embeddings = EmbaasEmbeddings() output = embeddings.embed_query(text) assert len(output) == 1024
def test_embaas_embed_query() ->None: """Test embaas embeddings with multiple texts.""" text = 'foo' embeddings = EmbaasEmbeddings() output = embeddings.embed_query(text) assert len(output) == 1024
Test embaas embeddings with multiple texts.
pretty_print_dict
return title + '\n' + json.dumps(d, indent=4)
def pretty_print_dict(title: str, d: dict) ->str: return title + '\n' + json.dumps(d, indent=4)
null
test_llm_chain_extractor_empty
texts = ['I love chocolate chip cookies—my mother makes great cookies.', "Don't you just love Caesar salad?", "Let's go to Olive Garden!"] doc = Document(page_content=' '.join(texts)) compressor = LLMChainExtractor.from_llm(ChatOpenAI()) actual = compressor.compress_documents([doc], 'Tell me about the Roman Empire') assert len(actual) == 0
def test_llm_chain_extractor_empty() ->None: texts = ['I love chocolate chip cookies—my mother makes great cookies.', "Don't you just love Caesar salad?", "Let's go to Olive Garden!"] doc = Document(page_content=' '.join(texts)) compressor = LLMChainExtractor.from_llm(ChatOpenAI()) actual = compressor.compress_documents([doc], 'Tell me about the Roman Empire') assert len(actual) == 0
null
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'llms', 'fireworks']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'llms', 'fireworks']
Get the namespace of the langchain object.
_combine_llm_outputs
if llm_outputs[0] is None: return {} return llm_outputs[0]
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) ->dict: if llm_outputs[0] is None: return {} return llm_outputs[0]
null
from_zapier_nla_wrapper
"""Create a toolkit from a ZapierNLAWrapper.""" actions = zapier_nla_wrapper.list() tools = [ZapierNLARunAction(action_id=action['id'], zapier_description= action['description'], params_schema=action['params'], api_wrapper= zapier_nla_wrapper) for action in actions] return cls(tools=tools)
@classmethod def from_zapier_nla_wrapper(cls, zapier_nla_wrapper: ZapierNLAWrapper ) ->'ZapierToolkit': """Create a toolkit from a ZapierNLAWrapper.""" actions = zapier_nla_wrapper.list() tools = [ZapierNLARunAction(action_id=action['id'], zapier_description= action['description'], params_schema=action['params'], api_wrapper= zapier_nla_wrapper) for action in actions] return cls(tools=tools)
Create a toolkit from a ZapierNLAWrapper.
test_load_max_content_chars
"""Test that cuts off document contents at max_content_chars.""" loader = BibtexLoader(file_path=str(BIBTEX_EXAMPLE_FILE), max_content_chars=10) doc = loader.load()[0] assert len(doc.page_content) == 10
@pytest.mark.requires('fitz', 'bibtexparser') def test_load_max_content_chars() ->None: """Test that cuts off document contents at max_content_chars.""" loader = BibtexLoader(file_path=str(BIBTEX_EXAMPLE_FILE), max_content_chars=10) doc = loader.load()[0] assert len(doc.page_content) == 10
Test that cuts off document contents at max_content_chars.
copy_repo
""" Copies a repo, ignoring git folders. Raises FileNotFound error if it can't find source """ def ignore_func(_, files): return [f for f in files if f == '.git'] shutil.copytree(source, destination, ignore=ignore_func)
def copy_repo(source: Path, destination: Path) ->None: """ Copies a repo, ignoring git folders. Raises FileNotFound error if it can't find source """ def ignore_func(_, files): return [f for f in files if f == '.git'] shutil.copytree(source, destination, ignore=ignore_func)
Copies a repo, ignoring git folders. Raises FileNotFound error if it can't find source
_import_outline
from langchain_community.utilities.outline import OutlineAPIWrapper return OutlineAPIWrapper
def _import_outline() ->Any: from langchain_community.utilities.outline import OutlineAPIWrapper return OutlineAPIWrapper
null
test_clear
zep_chat.clear() zep_chat.zep_client.memory.delete_memory.assert_called_once_with('test_session' )
@pytest.mark.requires('zep_python') def test_clear(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) ->None: zep_chat.clear() zep_chat.zep_client.memory.delete_memory.assert_called_once_with( 'test_session')
null
_call_before_predict
...
@abstractmethod def _call_before_predict(self, inputs: Dict[str, Any]) ->TEvent: ...
null
test_pickbest_textembedder_w_full_label_w_emb
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) str1 = '0' str2 = '1' str3 = '2' encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1)) encoded_str2 = rl_chain.stringify_embedding(list(encoded_keyword + str2)) encoded_str3 = rl_chain.stringify_embedding(list(encoded_keyword + str3)) ctx_str_1 = 'context1' encoded_ctx_str_1 = rl_chain.stringify_embedding(list(encoded_keyword + ctx_str_1)) named_actions = {'action1': rl_chain.Embed([str1, str2, str3])} context = {'context': rl_chain.Embed(ctx_str_1)} expected = f"""shared |context {encoded_ctx_str_1} 0:-0.0:1.0 |action1 {encoded_str1} |action1 {encoded_str2} |action1 {encoded_str3} """ selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0 ) event = pick_best_chain.PickBestEvent(inputs={}, to_select_from= named_actions, based_on=context, selected=selected) vw_ex_str = feature_embedder.format(event) assert vw_ex_str == expected
@pytest.mark.requires('vowpal_wabbit_next') def test_pickbest_textembedder_w_full_label_w_emb() ->None: feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed= False, model=MockEncoder()) str1 = '0' str2 = '1' str3 = '2' encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1)) encoded_str2 = rl_chain.stringify_embedding(list(encoded_keyword + str2)) encoded_str3 = rl_chain.stringify_embedding(list(encoded_keyword + str3)) ctx_str_1 = 'context1' encoded_ctx_str_1 = rl_chain.stringify_embedding(list(encoded_keyword + ctx_str_1)) named_actions = {'action1': rl_chain.Embed([str1, str2, str3])} context = {'context': rl_chain.Embed(ctx_str_1)} expected = f"""shared |context {encoded_ctx_str_1} 0:-0.0:1.0 |action1 {encoded_str1} |action1 {encoded_str2} |action1 {encoded_str3} """ selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0) event = pick_best_chain.PickBestEvent(inputs={}, to_select_from= named_actions, based_on=context, selected=selected) vw_ex_str = feature_embedder.format(event) assert vw_ex_str == expected
null
assert_docs
for doc in docs: assert doc.page_content assert doc.metadata assert set(doc.metadata) == {'Published', 'Title', 'Authors', 'Summary'}
def assert_docs(docs: List[Document]) ->None: for doc in docs: assert doc.page_content assert doc.metadata assert set(doc.metadata) == {'Published', 'Title', 'Authors', 'Summary' }
null