method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
_unbatch
if len(batch_of_texts) == 1 and len(batch_of_texts[0]) == 1: return batch_of_texts[0] texts = [] for sublist in batch_of_texts: texts.extend(sublist) return texts
@staticmethod def _unbatch(batch_of_texts: List[List[Any]]) ->List[Any]: if len(batch_of_texts) == 1 and len(batch_of_texts[0]) == 1: return batch_of_texts[0] texts = [] for sublist in batch_of_texts: texts.extend(sublist) return texts
null
_convert_dict_to_message
role = _dict['role'] content = _dict['content'] if role == 'user': return HumanMessage(content=content) elif role == 'assistant': return AIMessage(content=content) elif role == 'system': return SystemMessage(content=content) else: return ChatMessage(content=content, role=role)
@staticmethod def _convert_dict_to_message(_dict: Mapping[str, Any]) ->BaseMessage: role = _dict['role'] content = _dict['content'] if role == 'user': return HumanMessage(content=content) elif role == 'assistant': return AIMessage(content=content) elif role == 'system': return SystemMessage(content=content) else: return ChatMessage(content=content, role=role)
null
process_response
""" Transform the Diffbot NLP response into a GraphDocument. Args: payload (Dict[str, Any]): The JSON response from Diffbot's NLP API. document (Document): The original document. Returns: GraphDocument: The transformed document as a graph. """ if 'facts' not in payload or not payload['facts']: return GraphDocument(nodes=[], relationships=[], source=document) nodes_list = NodesList() relationships = list() for record in payload['facts']: if record['confidence'] < self.fact_threshold_confidence: continue if not record['value']['allTypes']: continue source_id = record['entity']['allUris'][0] if record['entity']['allUris' ] else record['entity']['name'] source_label = record['entity']['allTypes'][0]['name'].capitalize() source_name = record['entity']['name'] source_node = Node(id=source_id, type=source_label) nodes_list.add_node_property((source_id, source_label), {'name': source_name}) target_id = record['value']['allUris'][0] if record['value']['allUris' ] else record['value']['name'] target_label = record['value']['allTypes'][0]['name'].capitalize() target_name = record['value']['name'] if target_label in FACT_TO_PROPERTY_TYPE: nodes_list.add_node_property((source_id, source_label), { format_property_key(record['property']['name']): target_name}) else: target_node = Node(id=target_id, type=target_label) nodes_list.add_node_property((target_id, target_label), {'name': target_name}) rel_type = record['property']['name'].replace(' ', '_').upper() if self.simplified_schema: rel_type = self.simplified_schema.get_type(rel_type) rel_properties = dict() relationship_evidence = [el['passage'] for el in record['evidence']][0] if self.include_evidence: rel_properties.update({'evidence': relationship_evidence}) if self.include_qualifiers and record.get('qualifiers'): for property in record['qualifiers']: prop_key = format_property_key(property['property']['name']) rel_properties[prop_key] = property['value']['name'] relationship = Relationship(source=source_node, target=target_node, type=rel_type, properties=rel_properties) relationships.append(relationship) return GraphDocument(nodes=nodes_list.return_node_list(), relationships= relationships, source=document)
def process_response(self, payload: Dict[str, Any], document: Document ) ->GraphDocument: """ Transform the Diffbot NLP response into a GraphDocument. Args: payload (Dict[str, Any]): The JSON response from Diffbot's NLP API. document (Document): The original document. Returns: GraphDocument: The transformed document as a graph. """ if 'facts' not in payload or not payload['facts']: return GraphDocument(nodes=[], relationships=[], source=document) nodes_list = NodesList() relationships = list() for record in payload['facts']: if record['confidence'] < self.fact_threshold_confidence: continue if not record['value']['allTypes']: continue source_id = record['entity']['allUris'][0] if record['entity'][ 'allUris'] else record['entity']['name'] source_label = record['entity']['allTypes'][0]['name'].capitalize() source_name = record['entity']['name'] source_node = Node(id=source_id, type=source_label) nodes_list.add_node_property((source_id, source_label), {'name': source_name}) target_id = record['value']['allUris'][0] if record['value']['allUris' ] else record['value']['name'] target_label = record['value']['allTypes'][0]['name'].capitalize() target_name = record['value']['name'] if target_label in FACT_TO_PROPERTY_TYPE: nodes_list.add_node_property((source_id, source_label), { format_property_key(record['property']['name']): target_name}) else: target_node = Node(id=target_id, type=target_label) nodes_list.add_node_property((target_id, target_label), {'name': target_name}) rel_type = record['property']['name'].replace(' ', '_').upper() if self.simplified_schema: rel_type = self.simplified_schema.get_type(rel_type) rel_properties = dict() relationship_evidence = [el['passage'] for el in record['evidence'] ][0] if self.include_evidence: rel_properties.update({'evidence': relationship_evidence}) if self.include_qualifiers and record.get('qualifiers'): for property in record['qualifiers']: prop_key = format_property_key(property['property']['name'] ) rel_properties[prop_key] = property['value']['name'] relationship = Relationship(source=source_node, target= target_node, type=rel_type, properties=rel_properties) relationships.append(relationship) return GraphDocument(nodes=nodes_list.return_node_list(), relationships =relationships, source=document)
Transform the Diffbot NLP response into a GraphDocument. Args: payload (Dict[str, Any]): The JSON response from Diffbot's NLP API. document (Document): The original document. Returns: GraphDocument: The transformed document as a graph.
_llm_type
"""Return type of llm.""" return 'manifest'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'manifest'
Return type of llm.
test_runnable_sequence_transform
llm = FakeStreamingListLLM(responses=['foo-lish']) chain: Runnable = llm | StrOutputParser() stream = chain.transform(llm.stream('Hi there!')) chunks = [] for chunk in stream: chunks.append(chunk) assert len(chunks) == len('foo-lish') assert ''.join(chunks) == 'foo-lish'
def test_runnable_sequence_transform() ->None: llm = FakeStreamingListLLM(responses=['foo-lish']) chain: Runnable = llm | StrOutputParser() stream = chain.transform(llm.stream('Hi there!')) chunks = [] for chunk in stream: chunks.append(chunk) assert len(chunks) == len('foo-lish') assert ''.join(chunks) == 'foo-lish'
null
_load_json_block
try: response_content = json.loads(serialized_block, strict=False) return response_content.get('response', 'ERROR parsing response.') except json.JSONDecodeError: return 'ERROR parsing response.' except: raise
def _load_json_block(self, serialized_block: str) ->str: try: response_content = json.loads(serialized_block, strict=False) return response_content.get('response', 'ERROR parsing response.') except json.JSONDecodeError: return 'ERROR parsing response.' except: raise
null
_get_elements
from unstructured.partition.auto import partition local_prefix = 'local://' if self.presign: with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.path.split('/')[-1]}" os.makedirs(os.path.dirname(file_path), exist_ok=True) response = requests.get(self.url) response.raise_for_status() with open(file_path, mode='wb') as file: file.write(response.content) return partition(filename=file_path) elif not self.url.startswith(local_prefix): raise ValueError( "Non pre-signed URLs are supported only with 'local' blockstore") else: local_path = self.url[len(local_prefix):] return partition(filename=local_path)
def _get_elements(self) ->List: from unstructured.partition.auto import partition local_prefix = 'local://' if self.presign: with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.path.split('/')[-1]}" os.makedirs(os.path.dirname(file_path), exist_ok=True) response = requests.get(self.url) response.raise_for_status() with open(file_path, mode='wb') as file: file.write(response.content) return partition(filename=file_path) elif not self.url.startswith(local_prefix): raise ValueError( "Non pre-signed URLs are supported only with 'local' blockstore") else: local_path = self.url[len(local_prefix):] return partition(filename=local_path)
null
test_user_defined_scorer
llm, PROMPT = setup() class CustomSelectionScorer(rl_chain.SelectionScorer): def score_response(self, inputs: Dict[str, Any], llm_response: str, event: pick_best_chain.PickBestEvent) ->float: score = 200 return score chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT, selection_scorer=CustomSelectionScorer(), feature_embedder= pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model= MockEncoder())) actions = ['0', '1', '2'] response = chain.run(User=rl_chain.BasedOn('Context'), action=rl_chain. ToSelectFrom(actions)) assert response['response'] == 'hey' selection_metadata = response['selection_metadata'] assert selection_metadata.selected.score == 200.0
@pytest.mark.requires('vowpal_wabbit_next', 'sentence_transformers') def test_user_defined_scorer() ->None: llm, PROMPT = setup() class CustomSelectionScorer(rl_chain.SelectionScorer): def score_response(self, inputs: Dict[str, Any], llm_response: str, event: pick_best_chain.PickBestEvent) ->float: score = 200 return score chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT, selection_scorer=CustomSelectionScorer(), feature_embedder= pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model= MockEncoder())) actions = ['0', '1', '2'] response = chain.run(User=rl_chain.BasedOn('Context'), action=rl_chain. ToSelectFrom(actions)) assert response['response'] == 'hey' selection_metadata = response['selection_metadata'] assert selection_metadata.selected.score == 200.0
null
convert_message_to_dict
"""Convert a message to a dictionary that can be passed to the API.""" message_dict: Dict[str, Any] if isinstance(message, ChatMessage): message_dict = {'role': message.role, 'content': message.content} elif isinstance(message, HumanMessage): message_dict = {'role': 'user', 'content': message.content} elif isinstance(message, AIMessage): message_dict = {'role': 'assistant', 'content': message.content} if 'function_call' in message.additional_kwargs: message_dict['function_call'] = message.additional_kwargs[ 'function_call'] if message_dict['content'] == '': message_dict['content'] = None elif isinstance(message, FunctionMessage): message_dict = {'role': 'function', 'content': message.content, 'name': message.name} else: raise TypeError(f'Got unknown type {message}') return message_dict
def convert_message_to_dict(message: BaseMessage) ->dict: """Convert a message to a dictionary that can be passed to the API.""" message_dict: Dict[str, Any] if isinstance(message, ChatMessage): message_dict = {'role': message.role, 'content': message.content} elif isinstance(message, HumanMessage): message_dict = {'role': 'user', 'content': message.content} elif isinstance(message, AIMessage): message_dict = {'role': 'assistant', 'content': message.content} if 'function_call' in message.additional_kwargs: message_dict['function_call'] = message.additional_kwargs[ 'function_call'] if message_dict['content'] == '': message_dict['content'] = None elif isinstance(message, FunctionMessage): message_dict = {'role': 'function', 'content': message.content, 'name': message.name} else: raise TypeError(f'Got unknown type {message}') return message_dict
Convert a message to a dictionary that can be passed to the API.
_chain_type
return 'vector_db_qa_with_sources_chain'
@property def _chain_type(self) ->str: return 'vector_db_qa_with_sources_chain'
null
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
compress_documents
"""Compress retrieved documents given the query context."""
@abstractmethod def compress_documents(self, documents: Sequence[Document], query: str, callbacks: Optional[Callbacks]=None) ->Sequence[Document]: """Compress retrieved documents given the query context."""
Compress retrieved documents given the query context.
test_neo4jvector_with_metadatas_with_scores
"""Test end to end construction and search.""" metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = Neo4jVector.from_texts(texts=texts, embedding= FakeEmbeddingsWithOsDimension(), metadatas=metadatas, url=url, username =username, password=password, pre_delete_collection=True) output = docsearch.similarity_search_with_score('foo', k=1) assert output == [(Document(page_content='foo', metadata={'page': '0'}), 1.0)] drop_vector_indexes(docsearch)
def test_neo4jvector_with_metadatas_with_scores() ->None: """Test end to end construction and search.""" metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = Neo4jVector.from_texts(texts=texts, embedding= FakeEmbeddingsWithOsDimension(), metadatas=metadatas, url=url, username=username, password=password, pre_delete_collection=True) output = docsearch.similarity_search_with_score('foo', k=1) assert output == [(Document(page_content='foo', metadata={'page': '0'}), 1.0)] drop_vector_indexes(docsearch)
Test end to end construction and search.
_print_keys
if isinstance(keys, str): return f"'{keys}'" else: return ', '.join(f"'{k}'" for k in keys)
def _print_keys(keys: Union[str, Sequence[str]]) ->str: if isinstance(keys, str): return f"'{keys}'" else: return ', '.join(f"'{k}'" for k in keys)
null
test_chat_bedrock_streaming_generation_info
"""Test that generation info is preserved when streaming.""" class _FakeCallback(FakeCallbackHandler): saved_things: dict = {} def on_llm_end(self, *args: Any, **kwargs: Any) ->Any: self.saved_things['generation'] = args[0] callback = _FakeCallback() callback_manager = CallbackManager([callback]) chat = BedrockChat(model_id='anthropic.claude-v2', callback_manager= callback_manager) list(chat.stream('hi')) generation = callback.saved_things['generation'] assert generation.generations[0][0].text == ' Hello!'
@pytest.mark.scheduled def test_chat_bedrock_streaming_generation_info() ->None: """Test that generation info is preserved when streaming.""" class _FakeCallback(FakeCallbackHandler): saved_things: dict = {} def on_llm_end(self, *args: Any, **kwargs: Any) ->Any: self.saved_things['generation'] = args[0] callback = _FakeCallback() callback_manager = CallbackManager([callback]) chat = BedrockChat(model_id='anthropic.claude-v2', callback_manager= callback_manager) list(chat.stream('hi')) generation = callback.saved_things['generation'] assert generation.generations[0][0].text == ' Hello!'
Test that generation info is preserved when streaming.
memory_variables
"""Will always return list of memory variables. :meta private: """ return [self.memory_key]
@property def memory_variables(self) ->List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key]
Will always return list of memory variables. :meta private:
load_agent_executor
""" Load an agent executor. Args: llm: BaseLanguageModel tools: List[BaseTool] verbose: bool. Defaults to False. include_task_in_prompt: bool. Defaults to False. Returns: ChainExecutor """ input_variables = ['previous_steps', 'current_step', 'agent_scratchpad'] template = HUMAN_MESSAGE_TEMPLATE if include_task_in_prompt: input_variables.append('objective') template = TASK_PREFIX + template agent = StructuredChatAgent.from_llm_and_tools(llm, tools, human_message_template=template, input_variables=input_variables) agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools= tools, verbose=verbose) return ChainExecutor(chain=agent_executor)
def load_agent_executor(llm: BaseLanguageModel, tools: List[BaseTool], verbose: bool=False, include_task_in_prompt: bool=False) ->ChainExecutor: """ Load an agent executor. Args: llm: BaseLanguageModel tools: List[BaseTool] verbose: bool. Defaults to False. include_task_in_prompt: bool. Defaults to False. Returns: ChainExecutor """ input_variables = ['previous_steps', 'current_step', 'agent_scratchpad'] template = HUMAN_MESSAGE_TEMPLATE if include_task_in_prompt: input_variables.append('objective') template = TASK_PREFIX + template agent = StructuredChatAgent.from_llm_and_tools(llm, tools, human_message_template=template, input_variables=input_variables) agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools= tools, verbose=verbose) return ChainExecutor(chain=agent_executor)
Load an agent executor. Args: llm: BaseLanguageModel tools: List[BaseTool] verbose: bool. Defaults to False. include_task_in_prompt: bool. Defaults to False. Returns: ChainExecutor
test_json_distance_evaluator_evaluate_strings_complex_diff
prediction = '{"a":1, "b": {"c": 2, "d": 3}}' reference = '{"a": 1, "b": {"c": 2, "d": 4}}' result = json_distance_evaluator._evaluate_strings(prediction=prediction, reference=reference) pytest.approx(1 / len(reference.replace(' ', '')), result['score'])
@pytest.mark.requires('rapidfuzz') def test_json_distance_evaluator_evaluate_strings_complex_diff( json_distance_evaluator: JsonEditDistanceEvaluator) ->None: prediction = '{"a":1, "b": {"c": 2, "d": 3}}' reference = '{"a": 1, "b": {"c": 2, "d": 4}}' result = json_distance_evaluator._evaluate_strings(prediction= prediction, reference=reference) pytest.approx(1 / len(reference.replace(' ', '')), result['score'])
null
test_amazontextract_loader
if create_client: import boto3 textract_client = boto3.client('textract', region_name='us-east-2') loader = AmazonTextractPDFLoader(file_path, textract_features=features, client=textract_client) else: loader = AmazonTextractPDFLoader(file_path, textract_features=features) docs = loader.load() print(docs) assert len(docs) == docs_length
@pytest.mark.parametrize('file_path, features, docs_length, create_client', [( 'https://amazon-textract-public-content.s3.us-east-2.amazonaws.com/langchain/alejandro_rosalez_sample_1.jpg' , ['FORMS', 'TABLES', 'LAYOUT'], 1, False), ( 'https://amazon-textract-public-content.s3.us-east-2.amazonaws.com/langchain/alejandro_rosalez_sample_1.jpg' , [], 1, False), ( 'https://amazon-textract-public-content.s3.us-east-2.amazonaws.com/langchain/alejandro_rosalez_sample_1.jpg' , ['TABLES'], 1, False), ( 'https://amazon-textract-public-content.s3.us-east-2.amazonaws.com/langchain/alejandro_rosalez_sample_1.jpg' , ['FORMS'], 1, False), ( 'https://amazon-textract-public-content.s3.us-east-2.amazonaws.com/langchain/alejandro_rosalez_sample_1.jpg' , ['LAYOUT'], 1, False), (str(Path(__file__).parent.parent / 'examples/hello.pdf'), ['FORMS'], 1, False), (str(Path(__file__).parent .parent / 'examples/hello.pdf'), [], 1, False), ( 's3://amazon-textract-public-content/langchain/layout-parser-paper.pdf', ['FORMS', 'TABLES', 'LAYOUT'], 16, True)]) @pytest.mark.skip(reason='Requires AWS credentials to run') def test_amazontextract_loader(file_path: str, features: Union[Sequence[str ], None], docs_length: int, create_client: bool) ->None: if create_client: import boto3 textract_client = boto3.client('textract', region_name='us-east-2') loader = AmazonTextractPDFLoader(file_path, textract_features= features, client=textract_client) else: loader = AmazonTextractPDFLoader(file_path, textract_features=features) docs = loader.load() print(docs) assert len(docs) == docs_length
null
_get_client
try: from langchainhub import Client except ImportError as e: raise ImportError( 'Could not import langchainhub, please install with `pip install langchainhub`.' ) from e return Client(api_url, api_key=api_key)
def _get_client(api_url: Optional[str]=None, api_key: Optional[str]=None ) ->Client: try: from langchainhub import Client except ImportError as e: raise ImportError( 'Could not import langchainhub, please install with `pip install langchainhub`.' ) from e return Client(api_url, api_key=api_key)
null
_persist_run
"""Persist a run.""" self.runs.append(run)
def _persist_run(self, run: Run) ->None: """Persist a run.""" self.runs.append(run)
Persist a run.
messages
return [SystemMessage(content='You are a test user.'), HumanMessage(content ='Hello, I am a test user.')]
@pytest.fixture def messages() ->list: return [SystemMessage(content='You are a test user.'), HumanMessage( content='Hello, I am a test user.')]
null
test_default_chat_anthropic
base_model = AnthropicFunctions(model='claude-2') self.assertIsInstance(base_model.model, ChatAnthropic) model = base_model.bind(functions=[{'name': 'get_current_weather', 'description': 'Get the current weather in a given location', 'parameters': {'type': 'object', 'properties': {'location': {'type': 'string', 'description': 'The city and state, e.g. San Francisco, CA'}, 'unit': {'type': 'string', 'enum': ['celsius', 'fahrenheit']}}, 'required': ['location']}}], function_call={'name': 'get_current_weather'}) res = model.invoke("What's the weather in San Francisco?") function_call = res.additional_kwargs.get('function_call') assert function_call self.assertEqual(function_call.get('name'), 'get_current_weather') self.assertEqual(function_call.get('arguments'), '{"location": "San Francisco, CA", "unit": "fahrenheit"}')
def test_default_chat_anthropic(self) ->None: base_model = AnthropicFunctions(model='claude-2') self.assertIsInstance(base_model.model, ChatAnthropic) model = base_model.bind(functions=[{'name': 'get_current_weather', 'description': 'Get the current weather in a given location', 'parameters': {'type': 'object', 'properties': {'location': {'type': 'string', 'description': 'The city and state, e.g. San Francisco, CA'}, 'unit': {'type': 'string', 'enum': ['celsius', 'fahrenheit']}}, 'required': [ 'location']}}], function_call={'name': 'get_current_weather'}) res = model.invoke("What's the weather in San Francisco?") function_call = res.additional_kwargs.get('function_call') assert function_call self.assertEqual(function_call.get('name'), 'get_current_weather') self.assertEqual(function_call.get('arguments'), '{"location": "San Francisco, CA", "unit": "fahrenheit"}')
null
update
""" Update the graph. """ from rdflib.exceptions import ParserError try: self.graph.update(query) except ParserError as e: raise ValueError(f'Generated SPARQL statement is invalid\n{e}') if self.local_copy: self.graph.serialize(destination=self.local_copy, format=self. local_copy.split('.')[-1]) else: raise ValueError('No target file specified for saving the updated file.')
def update(self, query: str) ->None: """ Update the graph. """ from rdflib.exceptions import ParserError try: self.graph.update(query) except ParserError as e: raise ValueError(f'Generated SPARQL statement is invalid\n{e}') if self.local_copy: self.graph.serialize(destination=self.local_copy, format=self. local_copy.split('.')[-1]) else: raise ValueError( 'No target file specified for saving the updated file.')
Update the graph.
get_available_models
"""Get available models from Anyscale API.""" try: anyscale_api_key = anyscale_api_key or os.environ['ANYSCALE_API_KEY'] except KeyError as e: raise ValueError( 'Anyscale API key must be passed as keyword argument or set in environment variable ANYSCALE_API_KEY.' ) from e models_url = f'{anyscale_api_base}/models' models_response = requests.get(models_url, headers={'Authorization': f'Bearer {anyscale_api_key}'}) if models_response.status_code != 200: raise ValueError( f'Error getting models from {models_url}: {models_response.status_code}' ) return {model['id'] for model in models_response.json()['data']}
@staticmethod def get_available_models(anyscale_api_key: Optional[str]=None, anyscale_api_base: str=DEFAULT_API_BASE) ->Set[str]: """Get available models from Anyscale API.""" try: anyscale_api_key = anyscale_api_key or os.environ['ANYSCALE_API_KEY'] except KeyError as e: raise ValueError( 'Anyscale API key must be passed as keyword argument or set in environment variable ANYSCALE_API_KEY.' ) from e models_url = f'{anyscale_api_base}/models' models_response = requests.get(models_url, headers={'Authorization': f'Bearer {anyscale_api_key}'}) if models_response.status_code != 200: raise ValueError( f'Error getting models from {models_url}: {models_response.status_code}' ) return {model['id'] for model in models_response.json()['data']}
Get available models from Anyscale API.
query
return {'query': {'bool': {'must': [{'text_expansion': { f'{vector_query_field}.tokens': {'model_id': self.model_id, 'model_text': query}}}], 'filter': filter}}}
def query(self, query_vector: Union[List[float], None], query: Union[str, None], k: int, fetch_k: int, vector_query_field: str, text_field: str, filter: List[dict], similarity: Union[DistanceStrategy, None]) ->Dict: return {'query': {'bool': {'must': [{'text_expansion': { f'{vector_query_field}.tokens': {'model_id': self.model_id, 'model_text': query}}}], 'filter': filter}}}
null
test_remove_style
html2text_transformer = Html2TextTransformer() with_style_html = ( '<html><style>my_funky_style</style><p>First paragraph.</p></html>') documents = [Document(page_content=with_style_html)] docs_transformed = html2text_transformer.transform_documents(documents) assert docs_transformed[0].page_content == 'First paragraph.\n\n'
@pytest.mark.requires('html2text') def test_remove_style() ->None: html2text_transformer = Html2TextTransformer() with_style_html = ( '<html><style>my_funky_style</style><p>First paragraph.</p></html>') documents = [Document(page_content=with_style_html)] docs_transformed = html2text_transformer.transform_documents(documents) assert docs_transformed[0].page_content == 'First paragraph.\n\n'
null
from_texts
""" Class method that returns a ZepVectorStore instance initialized from texts. If the collection does not exist, it will be created. Args: texts (List[str]): The list of texts to add to the vectorstore. embedding (Optional[Embeddings]): Optional embedding function to use to embed the texts. metadatas (Optional[List[Dict[str, Any]]]): Optional list of metadata associated with the texts. collection_name (str): The name of the collection in the Zep store. api_url (str): The URL of the Zep API. api_key (Optional[str]): The API key for the Zep API. config (Optional[CollectionConfig]): The configuration for the collection. **kwargs: Additional parameters specific to the vectorstore. Returns: ZepVectorStore: An instance of ZepVectorStore. """ vecstore = cls(collection_name, api_url, api_key=api_key, config=config, embedding=embedding) vecstore.add_texts(texts, metadatas) return vecstore
@classmethod def from_texts(cls, texts: List[str], embedding: Optional[Embeddings]=None, metadatas: Optional[List[dict]]=None, collection_name: str='', api_url: str='', api_key: Optional[str]=None, config: Optional[CollectionConfig] =None, **kwargs: Any) ->ZepVectorStore: """ Class method that returns a ZepVectorStore instance initialized from texts. If the collection does not exist, it will be created. Args: texts (List[str]): The list of texts to add to the vectorstore. embedding (Optional[Embeddings]): Optional embedding function to use to embed the texts. metadatas (Optional[List[Dict[str, Any]]]): Optional list of metadata associated with the texts. collection_name (str): The name of the collection in the Zep store. api_url (str): The URL of the Zep API. api_key (Optional[str]): The API key for the Zep API. config (Optional[CollectionConfig]): The configuration for the collection. **kwargs: Additional parameters specific to the vectorstore. Returns: ZepVectorStore: An instance of ZepVectorStore. """ vecstore = cls(collection_name, api_url, api_key=api_key, config=config, embedding=embedding) vecstore.add_texts(texts, metadatas) return vecstore
Class method that returns a ZepVectorStore instance initialized from texts. If the collection does not exist, it will be created. Args: texts (List[str]): The list of texts to add to the vectorstore. embedding (Optional[Embeddings]): Optional embedding function to use to embed the texts. metadatas (Optional[List[Dict[str, Any]]]): Optional list of metadata associated with the texts. collection_name (str): The name of the collection in the Zep store. api_url (str): The URL of the Zep API. api_key (Optional[str]): The API key for the Zep API. config (Optional[CollectionConfig]): The configuration for the collection. **kwargs: Additional parameters specific to the vectorstore. Returns: ZepVectorStore: An instance of ZepVectorStore.
_get_ddg_search
return DuckDuckGoSearchRun(api_wrapper=DuckDuckGoSearchAPIWrapper(**kwargs))
def _get_ddg_search(**kwargs: Any) ->BaseTool: return DuckDuckGoSearchRun(api_wrapper=DuckDuckGoSearchAPIWrapper(**kwargs) )
null
load
"""Load sitemap.""" if self.is_local: try: import bs4 except ImportError: raise ImportError( 'beautifulsoup4 package not found, please install it with `pip install beautifulsoup4`' ) fp = open(self.web_path) soup = bs4.BeautifulSoup(fp, 'xml') else: soup = self._scrape(self.web_path, parser='xml') els = self.parse_sitemap(soup) if self.blocksize is not None: elblocks = list(_batch_block(els, self.blocksize)) blockcount = len(elblocks) if blockcount - 1 < self.blocknum: raise ValueError( 'Selected sitemap does not contain enough blocks for given blocknum' ) else: els = elblocks[self.blocknum] results = self.scrape_all([el['loc'].strip() for el in els if 'loc' in el]) return [Document(page_content=self.parsing_function(results[i]), metadata= self.meta_function(els[i], results[i])) for i in range(len(results))]
def load(self) ->List[Document]: """Load sitemap.""" if self.is_local: try: import bs4 except ImportError: raise ImportError( 'beautifulsoup4 package not found, please install it with `pip install beautifulsoup4`' ) fp = open(self.web_path) soup = bs4.BeautifulSoup(fp, 'xml') else: soup = self._scrape(self.web_path, parser='xml') els = self.parse_sitemap(soup) if self.blocksize is not None: elblocks = list(_batch_block(els, self.blocksize)) blockcount = len(elblocks) if blockcount - 1 < self.blocknum: raise ValueError( 'Selected sitemap does not contain enough blocks for given blocknum' ) else: els = elblocks[self.blocknum] results = self.scrape_all([el['loc'].strip() for el in els if 'loc' in el]) return [Document(page_content=self.parsing_function(results[i]), metadata=self.meta_function(els[i], results[i])) for i in range(len (results))]
Load sitemap.
__init__
self.message = message super().__init__(self.message)
def __init__(self, message: str= 'The prompt contains PII entities and cannot be processed'): self.message = message super().__init__(self.message)
null
combine_docs
"""Combine documents in a map reduce manner. Combine by mapping first chain over all documents, then reducing the results. This reducing can be done recursively if needed (if there are many documents). """ map_results = self.llm_chain.apply([{self.document_variable_name: d. page_content, **kwargs} for d in docs], callbacks=callbacks) question_result_key = self.llm_chain.output_key result_docs = [Document(page_content=r[question_result_key], metadata=docs[ i].metadata) for i, r in enumerate(map_results)] result, extra_return_dict = self.reduce_documents_chain.combine_docs( result_docs, token_max=token_max, callbacks=callbacks, **kwargs) if self.return_intermediate_steps: intermediate_steps = [r[question_result_key] for r in map_results] extra_return_dict['intermediate_steps'] = intermediate_steps return result, extra_return_dict
def combine_docs(self, docs: List[Document], token_max: Optional[int]=None, callbacks: Callbacks=None, **kwargs: Any) ->Tuple[str, dict]: """Combine documents in a map reduce manner. Combine by mapping first chain over all documents, then reducing the results. This reducing can be done recursively if needed (if there are many documents). """ map_results = self.llm_chain.apply([{self.document_variable_name: d. page_content, **kwargs} for d in docs], callbacks=callbacks) question_result_key = self.llm_chain.output_key result_docs = [Document(page_content=r[question_result_key], metadata= docs[i].metadata) for i, r in enumerate(map_results)] result, extra_return_dict = self.reduce_documents_chain.combine_docs( result_docs, token_max=token_max, callbacks=callbacks, **kwargs) if self.return_intermediate_steps: intermediate_steps = [r[question_result_key] for r in map_results] extra_return_dict['intermediate_steps'] = intermediate_steps return result, extra_return_dict
Combine documents in a map reduce manner. Combine by mapping first chain over all documents, then reducing the results. This reducing can be done recursively if needed (if there are many documents).
_import_sleep_tool
from langchain_community.tools.sleep.tool import SleepTool return SleepTool
def _import_sleep_tool() ->Any: from langchain_community.tools.sleep.tool import SleepTool return SleepTool
null
_copy
"""Copy a run.""" try: return run.copy(deep=True) except TypeError: return run.copy()
def _copy(run: Run) ->Run: """Copy a run.""" try: return run.copy(deep=True) except TypeError: return run.copy()
Copy a run.
add_texts
""" Add texts through the embeddings and add to the vectorstore. Args: texts: list of text strings to add to the jaguar vector store. metadatas: Optional list of metadatas associated with the texts. [{"m1": "v11", "m2": "v12", "m3": "v13", "filecol": "path_file1.jpg" }, {"m1": "v21", "m2": "v22", "m3": "v23", "filecol": "path_file2.jpg" }, {"m1": "v31", "m2": "v32", "m3": "v33", "filecol": "path_file3.jpg" }, {"m1": "v41", "m2": "v42", "m3": "v43", "filecol": "path_file4.jpg" }] kwargs: vector_index=name_of_vector_index file_column=name_of_file_column Returns: List of ids from adding the texts into the vectorstore """ vcol = self._vector_index filecol = kwargs.get('file_column', '') podstorevcol = self._pod + '.' + self._store + '.' + vcol q = 'textcol ' + podstorevcol js = self.run(q) if js == '': return [] textcol = js['data'] embeddings = self._embedding.embed_documents(list(texts)) ids = [] if metadatas is None: i = 0 for vec in embeddings: str_vec = [str(x) for x in vec] values_comma = ','.join(str_vec) podstore = self._pod + '.' + self._store q = 'insert into ' + podstore + ' (' q += vcol + ',' + textcol + ") values ('" + values_comma txt = texts[i].replace("'", "\\'") q += "','" + txt + "')" js = self.run(q, False) ids.append(js['zid']) i += 1 else: i = 0 for vec in embeddings: str_vec = [str(x) for x in vec] nvec, vvec, filepath = self._parseMeta(metadatas[i], filecol) if filecol != '': rc = self._jag.postFile(self._token, filepath, 1) if not rc: return [] names_comma = ','.join(nvec) names_comma += ',' + vcol values_comma = "'" + "','".join(vvec) + "'" values_comma += ",'" + ','.join(str_vec) + "'" podstore = self._pod + '.' + self._store q = 'insert into ' + podstore + ' (' q += names_comma + ',' + textcol + ') values (' + values_comma txt = texts[i].replace("'", "\\'") q += ",'" + txt + "')" if filecol != '': js = self.run(q, True) else: js = self.run(q, False) ids.append(js['zid']) i += 1 return ids
def add_texts(self, texts: List[str], metadatas: Optional[List[dict]]=None, **kwargs: Any) ->List[str]: """ Add texts through the embeddings and add to the vectorstore. Args: texts: list of text strings to add to the jaguar vector store. metadatas: Optional list of metadatas associated with the texts. [{"m1": "v11", "m2": "v12", "m3": "v13", "filecol": "path_file1.jpg" }, {"m1": "v21", "m2": "v22", "m3": "v23", "filecol": "path_file2.jpg" }, {"m1": "v31", "m2": "v32", "m3": "v33", "filecol": "path_file3.jpg" }, {"m1": "v41", "m2": "v42", "m3": "v43", "filecol": "path_file4.jpg" }] kwargs: vector_index=name_of_vector_index file_column=name_of_file_column Returns: List of ids from adding the texts into the vectorstore """ vcol = self._vector_index filecol = kwargs.get('file_column', '') podstorevcol = self._pod + '.' + self._store + '.' + vcol q = 'textcol ' + podstorevcol js = self.run(q) if js == '': return [] textcol = js['data'] embeddings = self._embedding.embed_documents(list(texts)) ids = [] if metadatas is None: i = 0 for vec in embeddings: str_vec = [str(x) for x in vec] values_comma = ','.join(str_vec) podstore = self._pod + '.' + self._store q = 'insert into ' + podstore + ' (' q += vcol + ',' + textcol + ") values ('" + values_comma txt = texts[i].replace("'", "\\'") q += "','" + txt + "')" js = self.run(q, False) ids.append(js['zid']) i += 1 else: i = 0 for vec in embeddings: str_vec = [str(x) for x in vec] nvec, vvec, filepath = self._parseMeta(metadatas[i], filecol) if filecol != '': rc = self._jag.postFile(self._token, filepath, 1) if not rc: return [] names_comma = ','.join(nvec) names_comma += ',' + vcol values_comma = "'" + "','".join(vvec) + "'" values_comma += ",'" + ','.join(str_vec) + "'" podstore = self._pod + '.' + self._store q = 'insert into ' + podstore + ' (' q += names_comma + ',' + textcol + ') values (' + values_comma txt = texts[i].replace("'", "\\'") q += ",'" + txt + "')" if filecol != '': js = self.run(q, True) else: js = self.run(q, False) ids.append(js['zid']) i += 1 return ids
Add texts through the embeddings and add to the vectorstore. Args: texts: list of text strings to add to the jaguar vector store. metadatas: Optional list of metadatas associated with the texts. [{"m1": "v11", "m2": "v12", "m3": "v13", "filecol": "path_file1.jpg" }, {"m1": "v21", "m2": "v22", "m3": "v23", "filecol": "path_file2.jpg" }, {"m1": "v31", "m2": "v32", "m3": "v33", "filecol": "path_file3.jpg" }, {"m1": "v41", "m2": "v42", "m3": "v43", "filecol": "path_file4.jpg" }] kwargs: vector_index=name_of_vector_index file_column=name_of_file_column Returns: List of ids from adding the texts into the vectorstore
__init__
try: import sqlite3 except ImportError: raise ImportError( 'Could not import sqlite3 python package. Please install it with `pip install sqlite3`.' ) super().__init__(*args, **kwargs) self.conn = sqlite3.connect(db_file) self.session_id = session_id self.table_name = table_name self._create_table_if_not_exists()
def __init__(self, session_id: str='default', db_file: str='entities.db', table_name: str='memory_store', *args: Any, **kwargs: Any): try: import sqlite3 except ImportError: raise ImportError( 'Could not import sqlite3 python package. Please install it with `pip install sqlite3`.' ) super().__init__(*args, **kwargs) self.conn = sqlite3.connect(db_file) self.session_id = session_id self.table_name = table_name self._create_table_if_not_exists()
null
on_chain_start
"""Run when chain starts running.""" self.step += 1 self.chain_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({'action': 'on_chain_start'}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) for chain_input_key, chain_input_val in inputs.items(): if isinstance(chain_input_val, str): input_resp = deepcopy(resp) if self.stream_logs: self._log_stream(chain_input_val, resp, self.step) input_resp.update({chain_input_key: chain_input_val}) self.action_records.append(input_resp) else: self.comet_ml.LOGGER.warning( f'Unexpected data format provided! Input Value for {chain_input_key} will not be logged' )
def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any) ->None: """Run when chain starts running.""" self.step += 1 self.chain_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({'action': 'on_chain_start'}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) for chain_input_key, chain_input_val in inputs.items(): if isinstance(chain_input_val, str): input_resp = deepcopy(resp) if self.stream_logs: self._log_stream(chain_input_val, resp, self.step) input_resp.update({chain_input_key: chain_input_val}) self.action_records.append(input_resp) else: self.comet_ml.LOGGER.warning( f'Unexpected data format provided! Input Value for {chain_input_key} will not be logged' )
Run when chain starts running.
test_octoai_embedding_query
"""Test octoai embeddings.""" document = 'foo bar' embedding = OctoAIEmbeddings(endpoint_url='<endpoint_url>', octoai_api_token='<octoai_api_token>', embed_instruction= 'Represent this input: ', query_instruction='Represent this input: ', model_kwargs=None) output = embedding.embed_query(document) assert len(output) == 768
def test_octoai_embedding_query() ->None: """Test octoai embeddings.""" document = 'foo bar' embedding = OctoAIEmbeddings(endpoint_url='<endpoint_url>', octoai_api_token='<octoai_api_token>', embed_instruction= 'Represent this input: ', query_instruction= 'Represent this input: ', model_kwargs=None) output = embedding.embed_query(document) assert len(output) == 768
Test octoai embeddings.
test_meilisearch
"""Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] vectorstore = Meilisearch.from_texts(texts=texts, embedding=FakeEmbeddings( ), url=TEST_MEILI_HTTP_ADDR, api_key=TEST_MEILI_MASTER_KEY, index_name= INDEX_NAME) self._wait_last_task() output = vectorstore.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
def test_meilisearch(self) ->None: """Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] vectorstore = Meilisearch.from_texts(texts=texts, embedding= FakeEmbeddings(), url=TEST_MEILI_HTTP_ADDR, api_key= TEST_MEILI_MASTER_KEY, index_name=INDEX_NAME) self._wait_last_task() output = vectorstore.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
Test end to end construction and search.
_url_to_pil
if PIL is None: raise ImportError( 'PIL is required to load images. Please install it with `pip install pillow`' ) try: if isinstance(image_source, IMAGE_TYPES): return image_source elif _is_url(image_source): if image_source.startswith('gs://'): return _load_image_from_gcs(image_source) response = requests.get(image_source) response.raise_for_status() return PIL.Image.open(BytesIO(response.content)) elif _is_b64(image_source): _, encoded = image_source.split(',', 1) data = base64.b64decode(encoded) return PIL.Image.open(BytesIO(data)) elif os.path.exists(image_source): return PIL.Image.open(image_source) else: raise ValueError( 'The provided string is not a valid URL, base64, or file path.') except Exception as e: raise ValueError(f'Unable to process the provided image source: {e}')
def _url_to_pil(image_source: str) ->Image: if PIL is None: raise ImportError( 'PIL is required to load images. Please install it with `pip install pillow`' ) try: if isinstance(image_source, IMAGE_TYPES): return image_source elif _is_url(image_source): if image_source.startswith('gs://'): return _load_image_from_gcs(image_source) response = requests.get(image_source) response.raise_for_status() return PIL.Image.open(BytesIO(response.content)) elif _is_b64(image_source): _, encoded = image_source.split(',', 1) data = base64.b64decode(encoded) return PIL.Image.open(BytesIO(data)) elif os.path.exists(image_source): return PIL.Image.open(image_source) else: raise ValueError( 'The provided string is not a valid URL, base64, or file path.' ) except Exception as e: raise ValueError(f'Unable to process the provided image source: {e}')
null
_parse_list
"""Parse a newline-separated string into a list of strings.""" lines = re.split('\\n', text.strip()) lines = [line for line in lines if line.strip()] return [re.sub('^\\s*\\d+\\.\\s*', '', line).strip() for line in lines]
@staticmethod def _parse_list(text: str) ->List[str]: """Parse a newline-separated string into a list of strings.""" lines = re.split('\\n', text.strip()) lines = [line for line in lines if line.strip()] return [re.sub('^\\s*\\d+\\.\\s*', '', line).strip() for line in lines]
Parse a newline-separated string into a list of strings.
_select_relevance_score_fn
""" The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.override_relevance_score_fn is not None: return self.override_relevance_score_fn if self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: return self._max_inner_product_relevance_score_fn elif self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: return self._euclidean_relevance_score_fn else: raise ValueError( 'Unknown distance strategy, must be cosine, max_inner_product, or euclidean' )
def _select_relevance_score_fn(self) ->Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.override_relevance_score_fn is not None: return self.override_relevance_score_fn if self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: return self._max_inner_product_relevance_score_fn elif self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: return self._euclidean_relevance_score_fn else: raise ValueError( 'Unknown distance strategy, must be cosine, max_inner_product, or euclidean' )
The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc.
test_summary_buffer_memory_summary
"""Test ConversationSummaryBufferMemory when only buffer.""" memory = ConversationSummaryBufferMemory(llm=FakeLLM(), memory_key='baz', max_token_limit=13) memory.save_context({'input': 'bar'}, {'output': 'foo'}) memory.save_context({'input': 'bar1'}, {'output': 'foo1'}) assert memory.buffer == ["""Human: bar1 AI: foo1"""] output = memory.load_memory_variables({}) assert output == {'baz': """foo Human: bar1 AI: foo1"""}
def test_summary_buffer_memory_summary() ->None: """Test ConversationSummaryBufferMemory when only buffer.""" memory = ConversationSummaryBufferMemory(llm=FakeLLM(), memory_key= 'baz', max_token_limit=13) memory.save_context({'input': 'bar'}, {'output': 'foo'}) memory.save_context({'input': 'bar1'}, {'output': 'foo1'}) assert memory.buffer == ['Human: bar1\nAI: foo1'] output = memory.load_memory_variables({}) assert output == {'baz': 'foo\nHuman: bar1\nAI: foo1'}
Test ConversationSummaryBufferMemory when only buffer.
run_async
"""Run an async coroutine. Args: coro: The coroutine to run. Coroutine[Any, Any, T] Returns: T: The result of the coroutine. """ event_loop = asyncio.get_event_loop() return event_loop.run_until_complete(coro)
def run_async(coro: Coroutine[Any, Any, T]) ->T: """Run an async coroutine. Args: coro: The coroutine to run. Coroutine[Any, Any, T] Returns: T: The result of the coroutine. """ event_loop = asyncio.get_event_loop() return event_loop.run_until_complete(coro)
Run an async coroutine. Args: coro: The coroutine to run. Coroutine[Any, Any, T] Returns: T: The result of the coroutine.
_construct_path
"""Construct the path from the deserialized input.""" path = self.api_operation.base_url + self.api_operation.path for param in self.param_mapping.path_params: path = path.replace(f'{{{param}}}', str(args.pop(param, ''))) return path
def _construct_path(self, args: Dict[str, str]) ->str: """Construct the path from the deserialized input.""" path = self.api_operation.base_url + self.api_operation.path for param in self.param_mapping.path_params: path = path.replace(f'{{{param}}}', str(args.pop(param, ''))) return path
Construct the path from the deserialized input.
_load_generations_from_json
"""Load generations from json. Args: generations_json (str): A string of json representing a list of generations. Raises: ValueError: Could not decode json string to list of generations. Returns: RETURN_VAL_TYPE: A list of generations. Warning: would not work well with arbitrary subclasses of `Generation` """ try: results = json.loads(generations_json) return [Generation(**generation_dict) for generation_dict in results] except json.JSONDecodeError: raise ValueError( f'Could not decode json to list of generations: {generations_json}')
def _load_generations_from_json(generations_json: str) ->RETURN_VAL_TYPE: """Load generations from json. Args: generations_json (str): A string of json representing a list of generations. Raises: ValueError: Could not decode json string to list of generations. Returns: RETURN_VAL_TYPE: A list of generations. Warning: would not work well with arbitrary subclasses of `Generation` """ try: results = json.loads(generations_json) return [Generation(**generation_dict) for generation_dict in results] except json.JSONDecodeError: raise ValueError( f'Could not decode json to list of generations: {generations_json}' )
Load generations from json. Args: generations_json (str): A string of json representing a list of generations. Raises: ValueError: Could not decode json string to list of generations. Returns: RETURN_VAL_TYPE: A list of generations. Warning: would not work well with arbitrary subclasses of `Generation`
message_history
from momento import CacheClient, Configurations, CredentialProvider cache_name = f'langchain-test-cache-{random_string()}' client = CacheClient(Configurations.Laptop.v1(), CredentialProvider. from_environment_variable('MOMENTO_API_KEY'), default_ttl=timedelta( seconds=30)) try: chat_message_history = MomentoChatMessageHistory(session_id= 'my-test-session', cache_client=client, cache_name=cache_name) yield chat_message_history finally: client.delete_cache(cache_name)
@pytest.fixture(scope='function') def message_history() ->Iterator[MomentoChatMessageHistory]: from momento import CacheClient, Configurations, CredentialProvider cache_name = f'langchain-test-cache-{random_string()}' client = CacheClient(Configurations.Laptop.v1(), CredentialProvider. from_environment_variable('MOMENTO_API_KEY'), default_ttl=timedelta (seconds=30)) try: chat_message_history = MomentoChatMessageHistory(session_id= 'my-test-session', cache_client=client, cache_name=cache_name) yield chat_message_history finally: client.delete_cache(cache_name)
null
get_result_from_sqldb
result = db._execute(cmd, fetch='all') return result
def get_result_from_sqldb(db: SQLDatabase, cmd: str) ->Sequence[Dict[str, Any] ]: result = db._execute(cmd, fetch='all') return result
null
test_message_chunks
assert AIMessageChunk(content='I am') + AIMessageChunk(content=' indeed.' ) == AIMessageChunk(content='I am indeed.' ), 'MessageChunk + MessageChunk should be a MessageChunk' assert AIMessageChunk(content='I am') + HumanMessageChunk(content=' indeed.' ) == AIMessageChunk(content='I am indeed.' ), 'MessageChunk + MessageChunk should be a MessageChunk of same class as the left side' assert AIMessageChunk(content='', additional_kwargs={'foo': 'bar'} ) + AIMessageChunk(content='', additional_kwargs={'baz': 'foo'} ) == AIMessageChunk(content='', additional_kwargs={'foo': 'bar', 'baz': 'foo'} ), 'MessageChunk + MessageChunk should be a MessageChunk with merged additional_kwargs' assert AIMessageChunk(content='', additional_kwargs={'function_call': { 'name': 'web_search'}}) + AIMessageChunk(content='', additional_kwargs= {'function_call': {'arguments': None}}) + AIMessageChunk(content='', additional_kwargs={'function_call': {'arguments': '{\n'}} ) + AIMessageChunk(content='', additional_kwargs={'function_call': { 'arguments': """ "query": "turtles" }"""}}) == AIMessageChunk(content= '', additional_kwargs={'function_call': {'name': 'web_search', 'arguments': """{ "query": "turtles" }"""}} ), 'MessageChunk + MessageChunk should be a MessageChunk with merged additional_kwargs'
def test_message_chunks() ->None: assert AIMessageChunk(content='I am') + AIMessageChunk(content=' indeed.' ) == AIMessageChunk(content='I am indeed.' ), 'MessageChunk + MessageChunk should be a MessageChunk' assert AIMessageChunk(content='I am') + HumanMessageChunk(content= ' indeed.') == AIMessageChunk(content='I am indeed.' ), 'MessageChunk + MessageChunk should be a MessageChunk of same class as the left side' assert AIMessageChunk(content='', additional_kwargs={'foo': 'bar'} ) + AIMessageChunk(content='', additional_kwargs={'baz': 'foo'} ) == AIMessageChunk(content='', additional_kwargs={'foo': 'bar', 'baz': 'foo'} ), 'MessageChunk + MessageChunk should be a MessageChunk with merged additional_kwargs' assert AIMessageChunk(content='', additional_kwargs={'function_call': { 'name': 'web_search'}}) + AIMessageChunk(content='', additional_kwargs={'function_call': {'arguments': None}} ) + AIMessageChunk(content='', additional_kwargs={'function_call': {'arguments': '{\n'}}) + AIMessageChunk(content='', additional_kwargs={'function_call': {'arguments': """ "query": "turtles" }"""}}) == AIMessageChunk(content='', additional_kwargs={'function_call': {'name': 'web_search', 'arguments': """{ "query": "turtles" }"""}} ), 'MessageChunk + MessageChunk should be a MessageChunk with merged additional_kwargs'
null
_load
"""Load the collection if available.""" from pymilvus import Collection if isinstance(self.col, Collection) and self._get_index() is not None: self.col.load()
def _load(self) ->None: """Load the collection if available.""" from pymilvus import Collection if isinstance(self.col, Collection) and self._get_index() is not None: self.col.load()
Load the collection if available.
__init__
merged = {**__steps} if __steps is not None else {} merged.update(kwargs) super().__init__(steps={key: coerce_to_runnable(r) for key, r in merged. items()})
def __init__(self, __steps: Optional[Mapping[str, Union[Runnable[Input, Any ], Callable[[Input], Any], Mapping[str, Union[Runnable[Input, Any], Callable[[Input], Any]]]]]]=None, **kwargs: Union[Runnable[Input, Any], Callable[[Input], Any], Mapping[str, Union[Runnable[Input, Any], Callable[[Input], Any]]]]) ->None: merged = {**__steps} if __steps is not None else {} merged.update(kwargs) super().__init__(steps={key: coerce_to_runnable(r) for key, r in merged .items()})
null
test_chat_openai_extra_kwargs
"""Test extra kwargs to chat openai.""" llm = ChatOpenAI(foo=3, max_tokens=10) assert llm.max_tokens == 10 assert llm.model_kwargs == {'foo': 3} llm = ChatOpenAI(foo=3, model_kwargs={'bar': 2}) assert llm.model_kwargs == {'foo': 3, 'bar': 2} with pytest.raises(ValueError): ChatOpenAI(foo=3, model_kwargs={'foo': 2}) with pytest.raises(ValueError): ChatOpenAI(model_kwargs={'temperature': 0.2}) with pytest.raises(ValueError): ChatOpenAI(model_kwargs={'model': 'gpt-3.5-turbo-instruct'})
def test_chat_openai_extra_kwargs() ->None: """Test extra kwargs to chat openai.""" llm = ChatOpenAI(foo=3, max_tokens=10) assert llm.max_tokens == 10 assert llm.model_kwargs == {'foo': 3} llm = ChatOpenAI(foo=3, model_kwargs={'bar': 2}) assert llm.model_kwargs == {'foo': 3, 'bar': 2} with pytest.raises(ValueError): ChatOpenAI(foo=3, model_kwargs={'foo': 2}) with pytest.raises(ValueError): ChatOpenAI(model_kwargs={'temperature': 0.2}) with pytest.raises(ValueError): ChatOpenAI(model_kwargs={'model': 'gpt-3.5-turbo-instruct'})
Test extra kwargs to chat openai.
test__get_hours_passed
time1 = datetime(2023, 4, 14, 14, 30) time2 = datetime(2023, 4, 14, 12, 0) expected_hours_passed = 2.5 hours_passed = _get_hours_passed(time1, time2) assert hours_passed == expected_hours_passed
def test__get_hours_passed() ->None: time1 = datetime(2023, 4, 14, 14, 30) time2 = datetime(2023, 4, 14, 12, 0) expected_hours_passed = 2.5 hours_passed = _get_hours_passed(time1, time2) assert hours_passed == expected_hours_passed
null
_llm_type
"""Return type of llm.""" return 'anthropic-llm'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'anthropic-llm'
Return type of llm.
test_qdrant_add_texts_stores_duplicated_texts
"""Test end to end Qdrant.add_texts stores duplicated texts separately.""" from qdrant_client import QdrantClient from qdrant_client.http import models as rest client = QdrantClient(':memory:') collection_name = uuid.uuid4().hex vectors_config = rest.VectorParams(size=10, distance=rest.Distance.COSINE) if vector_name is not None: vectors_config = {vector_name: vectors_config} client.recreate_collection(collection_name, vectors_config=vectors_config) vec_store = Qdrant(client, collection_name, embeddings= ConsistentFakeEmbeddings(), vector_name=vector_name) ids = vec_store.add_texts(['abc', 'abc'], [{'a': 1}, {'a': 2}]) assert 2 == len(set(ids)) assert 2 == client.count(collection_name).count
@pytest.mark.parametrize('vector_name', [None, 'my-vector']) def test_qdrant_add_texts_stores_duplicated_texts(vector_name: Optional[str] ) ->None: """Test end to end Qdrant.add_texts stores duplicated texts separately.""" from qdrant_client import QdrantClient from qdrant_client.http import models as rest client = QdrantClient(':memory:') collection_name = uuid.uuid4().hex vectors_config = rest.VectorParams(size=10, distance=rest.Distance.COSINE) if vector_name is not None: vectors_config = {vector_name: vectors_config} client.recreate_collection(collection_name, vectors_config=vectors_config) vec_store = Qdrant(client, collection_name, embeddings= ConsistentFakeEmbeddings(), vector_name=vector_name) ids = vec_store.add_texts(['abc', 'abc'], [{'a': 1}, {'a': 2}]) assert 2 == len(set(ids)) assert 2 == client.count(collection_name).count
Test end to end Qdrant.add_texts stores duplicated texts separately.
test_find_all_links_drop_fragment
html = 'href="foobar.com/woah#section_one"' actual = find_all_links(html) assert actual == ['foobar.com/woah']
def test_find_all_links_drop_fragment() ->None: html = 'href="foobar.com/woah#section_one"' actual = find_all_links(html) assert actual == ['foobar.com/woah']
null
messages
"""Messages in this chat history.""" return messages_from_dict(self._query( f""" SELECT * FROM UNNEST (( SELECT "{self.messages_key}" FROM {self.location} WHERE _id = :session_id )) """ , session_id=self.session_id))
@property def messages(self) ->List[BaseMessage]: """Messages in this chat history.""" return messages_from_dict(self._query( f""" SELECT * FROM UNNEST (( SELECT "{self.messages_key}" FROM {self.location} WHERE _id = :session_id )) """ , session_id=self.session_id))
Messages in this chat history.
_import_gmail_GmailGetMessage
from langchain_community.tools.gmail import GmailGetMessage return GmailGetMessage
def _import_gmail_GmailGetMessage() ->Any: from langchain_community.tools.gmail import GmailGetMessage return GmailGetMessage
null
test_unstructured_loader_with_post_processor
def add_the_end(text: str) ->str: return text + 'THE END!' file_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, 'layout-parser-paper.pdf') loader = UnstructuredFileLoader(file_path=file_path, post_processors=[ add_the_end], strategy='fast', mode='elements') docs = loader.load() assert len(docs) > 1 assert docs[0].page_content.endswith('THE END!')
def test_unstructured_loader_with_post_processor() ->None: def add_the_end(text: str) ->str: return text + 'THE END!' file_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, 'layout-parser-paper.pdf') loader = UnstructuredFileLoader(file_path=file_path, post_processors=[ add_the_end], strategy='fast', mode='elements') docs = loader.load() assert len(docs) > 1 assert docs[0].page_content.endswith('THE END!')
null
fake_llm_sudoku
"""This is a fake LLM that responds to the sudoku problem.""" queries = {i: next_step.strip() for i, next_step in enumerate(solutions)} return FakeLLM(queries=queries, sequential_responses=True)
@pytest.fixture def fake_llm_sudoku() ->FakeLLM: """This is a fake LLM that responds to the sudoku problem.""" queries = {i: next_step.strip() for i, next_step in enumerate(solutions)} return FakeLLM(queries=queries, sequential_responses=True)
This is a fake LLM that responds to the sudoku problem.
_llm_type
"""Return type of llm.""" return 'stochasticai'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'stochasticai'
Return type of llm.
get_history
input.pop('question') window = 3 data = graph.query( """ MATCH (u:User {id:$user_id})-[:HAS_SESSION]->(s:Session {id:$session_id}), (s)-[:LAST_MESSAGE]->(last_message) MATCH p=(last_message)<-[:NEXT*0..""" + str(window) + """]-() WITH p, length(p) AS length ORDER BY length DESC LIMIT 1 UNWIND reverse(nodes(p)) AS node MATCH (node)-[:HAS_ANSWER]->(answer) RETURN {question:node.text, answer:answer.text} AS result """ , params=input) history = convert_messages(data) return history.messages
def get_history(input: Dict[str, Any]) ->ChatMessageHistory: input.pop('question') window = 3 data = graph.query( """ MATCH (u:User {id:$user_id})-[:HAS_SESSION]->(s:Session {id:$session_id}), (s)-[:LAST_MESSAGE]->(last_message) MATCH p=(last_message)<-[:NEXT*0..""" + str(window) + """]-() WITH p, length(p) AS length ORDER BY length DESC LIMIT 1 UNWIND reverse(nodes(p)) AS node MATCH (node)-[:HAS_ANSWER]->(answer) RETURN {question:node.text, answer:answer.text} AS result """ , params=input) history = convert_messages(data) return history.messages
null
run
"""Run query through GoogleSearch and parse result.""" results = self._google_serper_api_results(query, gl=self.gl, hl=self.hl, num=self.k, tbs=self.tbs, search_type=self.type, **kwargs) return self._parse_results(results)
def run(self, query: str, **kwargs: Any) ->str: """Run query through GoogleSearch and parse result.""" results = self._google_serper_api_results(query, gl=self.gl, hl=self.hl, num=self.k, tbs=self.tbs, search_type=self.type, **kwargs) return self._parse_results(results)
Run query through GoogleSearch and parse result.
fn
return attrdict({'resources': {'123': attrdict({'fields': {'456': attrdict( {'paragraphs': {'123/t/text/0-14': attrdict({'text': 'This is a test', 'order': 0})}})}, 'data': {'texts': {'text': {'body': 'This is a test'} }}, 'extra': attrdict({'metadata': {'some': 'metadata'}})})}})
def fn(self: Any, **kwargs: Any) ->Any: return attrdict({'resources': {'123': attrdict({'fields': {'456': attrdict({'paragraphs': {'123/t/text/0-14': attrdict({'text': 'This is a test', 'order': 0})}})}, 'data': {'texts': {'text': { 'body': 'This is a test'}}}, 'extra': attrdict({'metadata': {'some': 'metadata'}})})}})
null
add_texts
"""Add more texts to the vectorstore. Args: texts (Iterable[str]): Iterable of strings/text to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. Defaults to None. embeddings (Optional[List[List[float]]], optional): Optional pre-generated embeddings. Defaults to None. Returns: List[str]: empty list """ conn = self.connection_pool.connect() try: cur = conn.cursor() try: for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} embedding = embeddings[i ] if embeddings else self.embedding.embed_documents([text])[0] cur.execute('INSERT INTO {} VALUES (%s, JSON_ARRAY_PACK(%s), %s)' .format(self.table_name), (text, '[{}]'.format(','.join(map (str, embedding))), json.dumps(metadata))) finally: cur.close() finally: conn.close() return []
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, embeddings: Optional[List[List[float]]]=None, **kwargs: Any) ->List[ str]: """Add more texts to the vectorstore. Args: texts (Iterable[str]): Iterable of strings/text to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. Defaults to None. embeddings (Optional[List[List[float]]], optional): Optional pre-generated embeddings. Defaults to None. Returns: List[str]: empty list """ conn = self.connection_pool.connect() try: cur = conn.cursor() try: for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} embedding = embeddings[i ] if embeddings else self.embedding.embed_documents([text] )[0] cur.execute( 'INSERT INTO {} VALUES (%s, JSON_ARRAY_PACK(%s), %s)'. format(self.table_name), (text, '[{}]'.format(','.join( map(str, embedding))), json.dumps(metadata))) finally: cur.close() finally: conn.close() return []
Add more texts to the vectorstore. Args: texts (Iterable[str]): Iterable of strings/text to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. Defaults to None. embeddings (Optional[List[List[float]]], optional): Optional pre-generated embeddings. Defaults to None. Returns: List[str]: empty list
load_memory_variables
"""Return history buffer.""" if self.return_messages: buffer: Any = [self.summary_message_cls(content=self.buffer)] else: buffer = self.buffer return {self.memory_key: buffer}
def load_memory_variables(self, inputs: Dict[str, Any]) ->Dict[str, Any]: """Return history buffer.""" if self.return_messages: buffer: Any = [self.summary_message_cls(content=self.buffer)] else: buffer = self.buffer return {self.memory_key: buffer}
Return history buffer.
test_zilliz_with_score
"""Test end to end construction and search with scores and IDs.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = _zilliz_from_texts(metadatas=metadatas) output = docsearch.similarity_search_with_score('foo', k=3) docs = [o[0] for o in output] scores = [o[1] for o in output] assert docs == [Document(page_content='foo', metadata={'page': 0}), Document(page_content='bar', metadata={'page': 1}), Document( page_content='baz', metadata={'page': 2})] assert scores[0] < scores[1] < scores[2]
def test_zilliz_with_score() ->None: """Test end to end construction and search with scores and IDs.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = _zilliz_from_texts(metadatas=metadatas) output = docsearch.similarity_search_with_score('foo', k=3) docs = [o[0] for o in output] scores = [o[1] for o in output] assert docs == [Document(page_content='foo', metadata={'page': 0}), Document(page_content='bar', metadata={'page': 1}), Document( page_content='baz', metadata={'page': 2})] assert scores[0] < scores[1] < scores[2]
Test end to end construction and search with scores and IDs.
InputType
return self.first.InputType
@property def InputType(self) ->Type[Input]: return self.first.InputType
null
test_modelscope_embedding_query
"""Test modelscope embeddings for query.""" document = 'foo bar' embedding = ModelScopeEmbeddings() output = embedding.embed_query(document) assert len(output) == 512
def test_modelscope_embedding_query() ->None: """Test modelscope embeddings for query.""" document = 'foo bar' embedding = ModelScopeEmbeddings() output = embedding.embed_query(document) assert len(output) == 512
Test modelscope embeddings for query.
on_chain_error
"""Handle an error for a chain run.""" chain_run = self._get_run(run_id) chain_run.error = self._get_stacktrace(error) chain_run.end_time = datetime.now(timezone.utc) chain_run.events.append({'name': 'error', 'time': chain_run.end_time}) if inputs is not None: chain_run.inputs = inputs if isinstance(inputs, dict) else {'input': inputs } self._end_trace(chain_run) self._on_chain_error(chain_run) return chain_run
def on_chain_error(self, error: BaseException, *, inputs: Optional[Dict[str, Any]]=None, run_id: UUID, **kwargs: Any) ->Run: """Handle an error for a chain run.""" chain_run = self._get_run(run_id) chain_run.error = self._get_stacktrace(error) chain_run.end_time = datetime.now(timezone.utc) chain_run.events.append({'name': 'error', 'time': chain_run.end_time}) if inputs is not None: chain_run.inputs = inputs if isinstance(inputs, dict) else {'input': inputs} self._end_trace(chain_run) self._on_chain_error(chain_run) return chain_run
Handle an error for a chain run.
embed_documents
""" Generates embeddings for a list of documents. Args: texts (List[str]): The documents to generate embeddings for. Returns: A list of embeddings, one for each document. """ return [self.nlp(text).vector.tolist() for text in texts]
def embed_documents(self, texts: List[str]) ->List[List[float]]: """ Generates embeddings for a list of documents. Args: texts (List[str]): The documents to generate embeddings for. Returns: A list of embeddings, one for each document. """ return [self.nlp(text).vector.tolist() for text in texts]
Generates embeddings for a list of documents. Args: texts (List[str]): The documents to generate embeddings for. Returns: A list of embeddings, one for each document.
test_qdrant_similarity_search_by_vector
"""Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] docsearch = Qdrant.from_texts(texts, ConsistentFakeEmbeddings(), location= ':memory:', content_payload_key=content_payload_key, metadata_payload_key=metadata_payload_key, batch_size=batch_size, vector_name=vector_name) embeddings = ConsistentFakeEmbeddings().embed_query('foo') output = docsearch.similarity_search_by_vector(embeddings, k=1) assert output == [Document(page_content='foo')]
@pytest.mark.parametrize('batch_size', [1, 64]) @pytest.mark.parametrize('content_payload_key', [Qdrant.CONTENT_KEY, 'foo']) @pytest.mark.parametrize('metadata_payload_key', [Qdrant.METADATA_KEY, 'bar']) @pytest.mark.parametrize('vector_name', [None, 'my-vector']) def test_qdrant_similarity_search_by_vector(batch_size: int, content_payload_key: str, metadata_payload_key: str, vector_name: Optional[str]) ->None: """Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] docsearch = Qdrant.from_texts(texts, ConsistentFakeEmbeddings(), location=':memory:', content_payload_key=content_payload_key, metadata_payload_key=metadata_payload_key, batch_size=batch_size, vector_name=vector_name) embeddings = ConsistentFakeEmbeddings().embed_query('foo') output = docsearch.similarity_search_by_vector(embeddings, k=1) assert output == [Document(page_content='foo')]
Test end to end construction and search.
similarity_search_with_score
"""Return docs most similar to query. Uses cosine similarity. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: A dictionary of metadata fields and values to filter by. Defaults to None. Returns: List of Documents most similar to the query and score for each """ embedding = self.embedding.embed_query(query) conn = self.connection_pool.connect() result = [] where_clause: str = '' where_clause_values: List[Any] = [] if filter: where_clause = 'WHERE ' arguments = [] def build_where_clause(where_clause_values: List[Any], sub_filter: dict, prefix_args: Optional[List[str]]=None) ->None: prefix_args = prefix_args or [] for key in sub_filter.keys(): if isinstance(sub_filter[key], dict): build_where_clause(where_clause_values, sub_filter[key], prefix_args + [key]) else: arguments.append('JSON_EXTRACT_JSON({}, {}) = %s'.format( self.metadata_field, ', '.join(['%s'] * (len( prefix_args) + 1)))) where_clause_values += prefix_args + [key] where_clause_values.append(json.dumps(sub_filter[key])) build_where_clause(where_clause_values, filter) where_clause += ' AND '.join(arguments) try: cur = conn.cursor() try: cur.execute( """SELECT {}, {}, {}({}, JSON_ARRAY_PACK(%s)) as __score FROM {} {} ORDER BY __score {} LIMIT %s""" .format(self.content_field, self.metadata_field, self. distance_strategy.name if isinstance(self.distance_strategy, DistanceStrategy) else self.distance_strategy, self. vector_field, self.table_name, where_clause, ORDERING_DIRECTIVE [self.distance_strategy]), ('[{}]'.format(','.join(map(str, embedding))),) + tuple(where_clause_values) + (k,)) for row in cur.fetchall(): doc = Document(page_content=row[0], metadata=row[1]) result.append((doc, float(row[2]))) finally: cur.close() finally: conn.close() return result
def similarity_search_with_score(self, query: str, k: int=4, filter: Optional[dict]=None) ->List[Tuple[Document, float]]: """Return docs most similar to query. Uses cosine similarity. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: A dictionary of metadata fields and values to filter by. Defaults to None. Returns: List of Documents most similar to the query and score for each """ embedding = self.embedding.embed_query(query) conn = self.connection_pool.connect() result = [] where_clause: str = '' where_clause_values: List[Any] = [] if filter: where_clause = 'WHERE ' arguments = [] def build_where_clause(where_clause_values: List[Any], sub_filter: dict, prefix_args: Optional[List[str]]=None) ->None: prefix_args = prefix_args or [] for key in sub_filter.keys(): if isinstance(sub_filter[key], dict): build_where_clause(where_clause_values, sub_filter[key], prefix_args + [key]) else: arguments.append('JSON_EXTRACT_JSON({}, {}) = %s'. format(self.metadata_field, ', '.join(['%s'] * (len (prefix_args) + 1)))) where_clause_values += prefix_args + [key] where_clause_values.append(json.dumps(sub_filter[key])) build_where_clause(where_clause_values, filter) where_clause += ' AND '.join(arguments) try: cur = conn.cursor() try: cur.execute( """SELECT {}, {}, {}({}, JSON_ARRAY_PACK(%s)) as __score FROM {} {} ORDER BY __score {} LIMIT %s""" .format(self.content_field, self.metadata_field, self. distance_strategy.name if isinstance(self.distance_strategy, DistanceStrategy) else self.distance_strategy, self. vector_field, self.table_name, where_clause, ORDERING_DIRECTIVE[self.distance_strategy]), ('[{}]'.format (','.join(map(str, embedding))),) + tuple( where_clause_values) + (k,)) for row in cur.fetchall(): doc = Document(page_content=row[0], metadata=row[1]) result.append((doc, float(row[2]))) finally: cur.close() finally: conn.close() return result
Return docs most similar to query. Uses cosine similarity. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: A dictionary of metadata fields and values to filter by. Defaults to None. Returns: List of Documents most similar to the query and score for each
__init__
""" Initialize with a ready session and a keyspace name. Args: session (cassandra.cluster.Session): an open Cassandra session keyspace (str): the keyspace to use for storing the cache table_name (str): name of the Cassandra table to use as cache ttl_seconds (optional int): time-to-live for cache entries (default: None, i.e. forever) """ try: from cassio.table import ElasticCassandraTable except (ImportError, ModuleNotFoundError): raise ValueError( 'Could not import cassio python package. Please install it with `pip install cassio`.' ) self.session = session self.keyspace = keyspace self.table_name = table_name self.ttl_seconds = ttl_seconds self.kv_cache = ElasticCassandraTable(session=self.session, keyspace=self. keyspace, table=self.table_name, keys=['llm_string', 'prompt'], primary_key_type=['TEXT', 'TEXT'], ttl_seconds=self.ttl_seconds, skip_provisioning=skip_provisioning)
def __init__(self, session: Optional[CassandraSession]=None, keyspace: Optional[str]=None, table_name: str=CASSANDRA_CACHE_DEFAULT_TABLE_NAME, ttl_seconds: Optional[int]=CASSANDRA_CACHE_DEFAULT_TTL_SECONDS, skip_provisioning: bool=False): """ Initialize with a ready session and a keyspace name. Args: session (cassandra.cluster.Session): an open Cassandra session keyspace (str): the keyspace to use for storing the cache table_name (str): name of the Cassandra table to use as cache ttl_seconds (optional int): time-to-live for cache entries (default: None, i.e. forever) """ try: from cassio.table import ElasticCassandraTable except (ImportError, ModuleNotFoundError): raise ValueError( 'Could not import cassio python package. Please install it with `pip install cassio`.' ) self.session = session self.keyspace = keyspace self.table_name = table_name self.ttl_seconds = ttl_seconds self.kv_cache = ElasticCassandraTable(session=self.session, keyspace= self.keyspace, table=self.table_name, keys=['llm_string', 'prompt'], primary_key_type=['TEXT', 'TEXT'], ttl_seconds=self.ttl_seconds, skip_provisioning=skip_provisioning)
Initialize with a ready session and a keyspace name. Args: session (cassandra.cluster.Session): an open Cassandra session keyspace (str): the keyspace to use for storing the cache table_name (str): name of the Cassandra table to use as cache ttl_seconds (optional int): time-to-live for cache entries (default: None, i.e. forever)
to_document
"""Convert the DocumentWithState to a Document.""" return Document(page_content=self.page_content, metadata=self.metadata)
def to_document(self) ->Document: """Convert the DocumentWithState to a Document.""" return Document(page_content=self.page_content, metadata=self.metadata)
Convert the DocumentWithState to a Document.
mock_feature_layer
feature_layer = MagicMock() feature_layer.query.return_value = [MagicMock(as_dict={'attributes': { 'field': 'value'}})] feature_layer.url = 'https://example.com/layer_url' feature_layer.properties = {'description': '<html><body>Some HTML content</body></html>', 'name': 'test', 'serviceItemId': 'testItemId'} return feature_layer
@pytest.fixture def mock_feature_layer(): feature_layer = MagicMock() feature_layer.query.return_value = [MagicMock(as_dict={'attributes': { 'field': 'value'}})] feature_layer.url = 'https://example.com/layer_url' feature_layer.properties = {'description': '<html><body>Some HTML content</body></html>', 'name': 'test', 'serviceItemId': 'testItemId'} return feature_layer
null
_headers
return {'Authorization': f'Bearer {self.api_key}'}
@property def _headers(self) ->dict: return {'Authorization': f'Bearer {self.api_key}'}
null
create_react_agent
"""Create an agent that uses ReAct prompting. Examples: .. code-block:: python from langchain import hub from langchain_community.llms import OpenAI from langchain.agents import AgentExecutor, create_react_agent prompt = hub.pull("hwchase17/react") model = OpenAI() tools = ... agent = create_react_agent(model, tools, prompt) agent_executor = AgentExecutor(agent=agent, tools=tools) agent_executor.invoke({"input": "hi"}) # Use with chat history from langchain_core.messages import AIMessage, HumanMessage agent_executor.invoke( { "input": "what's my name?", # Notice that chat_history is a string # since this prompt is aimed at LLMs, not chat models "chat_history": "Human: My name is Bob AI: Hello Bob!", } ) Args: llm: LLM to use as the agent. tools: Tools this agent has access to. prompt: The prompt to use, must have input keys of `tools`, `tool_names`, and `agent_scratchpad`. Returns: A runnable sequence representing an agent. It takes as input all the same input variables as the prompt passed in does. It returns as output either an AgentAction or AgentFinish. """ missing_vars = {'tools', 'tool_names', 'agent_scratchpad'}.difference(prompt .input_variables) if missing_vars: raise ValueError(f'Prompt missing required variables: {missing_vars}') prompt = prompt.partial(tools=render_text_description(list(tools)), tool_names=', '.join([t.name for t in tools])) llm_with_stop = llm.bind(stop=['\nObservation']) agent = RunnablePassthrough.assign(agent_scratchpad=lambda x: format_log_to_str(x['intermediate_steps']) ) | prompt | llm_with_stop | ReActSingleInputOutputParser() return agent
def create_react_agent(llm: BaseLanguageModel, tools: Sequence[BaseTool], prompt: BasePromptTemplate) ->Runnable: """Create an agent that uses ReAct prompting. Examples: .. code-block:: python from langchain import hub from langchain_community.llms import OpenAI from langchain.agents import AgentExecutor, create_react_agent prompt = hub.pull("hwchase17/react") model = OpenAI() tools = ... agent = create_react_agent(model, tools, prompt) agent_executor = AgentExecutor(agent=agent, tools=tools) agent_executor.invoke({"input": "hi"}) # Use with chat history from langchain_core.messages import AIMessage, HumanMessage agent_executor.invoke( { "input": "what's my name?", # Notice that chat_history is a string # since this prompt is aimed at LLMs, not chat models "chat_history": "Human: My name is Bob AI: Hello Bob!", } ) Args: llm: LLM to use as the agent. tools: Tools this agent has access to. prompt: The prompt to use, must have input keys of `tools`, `tool_names`, and `agent_scratchpad`. Returns: A runnable sequence representing an agent. It takes as input all the same input variables as the prompt passed in does. It returns as output either an AgentAction or AgentFinish. """ missing_vars = {'tools', 'tool_names', 'agent_scratchpad'}.difference( prompt.input_variables) if missing_vars: raise ValueError(f'Prompt missing required variables: {missing_vars}') prompt = prompt.partial(tools=render_text_description(list(tools)), tool_names=', '.join([t.name for t in tools])) llm_with_stop = llm.bind(stop=['\nObservation']) agent = RunnablePassthrough.assign(agent_scratchpad=lambda x: format_log_to_str(x['intermediate_steps']) ) | prompt | llm_with_stop | ReActSingleInputOutputParser() return agent
Create an agent that uses ReAct prompting. Examples: .. code-block:: python from langchain import hub from langchain_community.llms import OpenAI from langchain.agents import AgentExecutor, create_react_agent prompt = hub.pull("hwchase17/react") model = OpenAI() tools = ... agent = create_react_agent(model, tools, prompt) agent_executor = AgentExecutor(agent=agent, tools=tools) agent_executor.invoke({"input": "hi"}) # Use with chat history from langchain_core.messages import AIMessage, HumanMessage agent_executor.invoke( { "input": "what's my name?", # Notice that chat_history is a string # since this prompt is aimed at LLMs, not chat models "chat_history": "Human: My name is Bob AI: Hello Bob!", } ) Args: llm: LLM to use as the agent. tools: Tools this agent has access to. prompt: The prompt to use, must have input keys of `tools`, `tool_names`, and `agent_scratchpad`. Returns: A runnable sequence representing an agent. It takes as input all the same input variables as the prompt passed in does. It returns as output either an AgentAction or AgentFinish.
test_nlpcloud_api_key
"""Test that nlpcloud api key is a secret key.""" assert isinstance(NLPCloud(nlpcloud_api_key='1').nlpcloud_api_key, SecretStr) monkeypatch.setenv('NLPCLOUD_API_KEY', 'secret-api-key') llm = NLPCloud() assert isinstance(llm.nlpcloud_api_key, SecretStr) assert cast(SecretStr, llm.nlpcloud_api_key).get_secret_value( ) == 'secret-api-key' print(llm.nlpcloud_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
def test_nlpcloud_api_key(monkeypatch: MonkeyPatch, capsys: CaptureFixture ) ->None: """Test that nlpcloud api key is a secret key.""" assert isinstance(NLPCloud(nlpcloud_api_key='1').nlpcloud_api_key, SecretStr) monkeypatch.setenv('NLPCLOUD_API_KEY', 'secret-api-key') llm = NLPCloud() assert isinstance(llm.nlpcloud_api_key, SecretStr) assert cast(SecretStr, llm.nlpcloud_api_key).get_secret_value( ) == 'secret-api-key' print(llm.nlpcloud_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
Test that nlpcloud api key is a secret key.
on_chat_model_start
assert all(isinstance(m, BaseMessage) for m in chain(*messages)) self.on_chat_model_start_common()
def on_chat_model_start(self, serialized: Dict[str, Any], messages: List[ List[BaseMessage]], *, run_id: UUID, parent_run_id: Optional[UUID]=None, **kwargs: Any) ->Any: assert all(isinstance(m, BaseMessage) for m in chain(*messages)) self.on_chat_model_start_common()
null
embedding_openai
if not os.environ.get('OPENAI_API_KEY'): raise ValueError('OPENAI_API_KEY is not set') return OpenAIEmbeddings()
@pytest.fixture(scope='module') def embedding_openai() ->OpenAIEmbeddings: if not os.environ.get('OPENAI_API_KEY'): raise ValueError('OPENAI_API_KEY is not set') return OpenAIEmbeddings()
null
test__convert_dict_to_message_ai
message_dict = {'role': 'assistant', 'content': 'foo'} result = _convert_dict_to_message(message_dict) expected_output = AIMessage(content='foo') assert result == expected_output
def test__convert_dict_to_message_ai() ->None: message_dict = {'role': 'assistant', 'content': 'foo'} result = _convert_dict_to_message(message_dict) expected_output = AIMessage(content='foo') assert result == expected_output
null
add_documents
"""Add documents to vectorstore.""" current_time = kwargs.get('current_time') if current_time is None: current_time = datetime.datetime.now() dup_docs = [deepcopy(d) for d in documents] for i, doc in enumerate(dup_docs): if 'last_accessed_at' not in doc.metadata: doc.metadata['last_accessed_at'] = current_time if 'created_at' not in doc.metadata: doc.metadata['created_at'] = current_time doc.metadata['buffer_idx'] = len(self.memory_stream) + i self.memory_stream.extend(dup_docs) return self.vectorstore.add_documents(dup_docs, **kwargs)
def add_documents(self, documents: List[Document], **kwargs: Any) ->List[str]: """Add documents to vectorstore.""" current_time = kwargs.get('current_time') if current_time is None: current_time = datetime.datetime.now() dup_docs = [deepcopy(d) for d in documents] for i, doc in enumerate(dup_docs): if 'last_accessed_at' not in doc.metadata: doc.metadata['last_accessed_at'] = current_time if 'created_at' not in doc.metadata: doc.metadata['created_at'] = current_time doc.metadata['buffer_idx'] = len(self.memory_stream) + i self.memory_stream.extend(dup_docs) return self.vectorstore.add_documents(dup_docs, **kwargs)
Add documents to vectorstore.
lc_attributes
attributes: Dict[str, Any] = {} if self.region_name: attributes['region_name'] = self.region_name return attributes
@property def lc_attributes(self) ->Dict[str, Any]: attributes: Dict[str, Any] = {} if self.region_name: attributes['region_name'] = self.region_name return attributes
null
_import_analyzer_engine
try: from presidio_analyzer import AnalyzerEngine except ImportError as e: raise ImportError( 'Could not import presidio_analyzer, please install with `pip install presidio-analyzer`. You will also need to download a spaCy model to use the analyzer, e.g. `python -m spacy download en_core_web_lg`.' ) from e return AnalyzerEngine
def _import_analyzer_engine() ->'AnalyzerEngine': try: from presidio_analyzer import AnalyzerEngine except ImportError as e: raise ImportError( 'Could not import presidio_analyzer, please install with `pip install presidio-analyzer`. You will also need to download a spaCy model to use the analyzer, e.g. `python -m spacy download en_core_web_lg`.' ) from e return AnalyzerEngine
null
on_retry_common
self.retries += 1
def on_retry_common(self) ->None: self.retries += 1
null
test_pairwise_embedding_distance_eval_chain_embedding_distance
"""Test the embedding distance.""" result = pairwise_embedding_distance_eval_chain.evaluate_string_pairs( prediction='A single cat', prediction_b='A single cat') assert np.isclose(result['score'], 0.0)
@pytest.mark.requires('openai', 'tiktoken') def test_pairwise_embedding_distance_eval_chain_embedding_distance( pairwise_embedding_distance_eval_chain: PairwiseEmbeddingDistanceEvalChain ) ->None: """Test the embedding distance.""" result = pairwise_embedding_distance_eval_chain.evaluate_string_pairs( prediction='A single cat', prediction_b='A single cat') assert np.isclose(result['score'], 0.0)
Test the embedding distance.
test_whole_class_deprecation
"""Test whole class deprecation.""" @deprecated(since='2.0.0', removal='3.0.0') class DeprecatedClass: def __init__(self) ->None: """original doc""" pass @deprecated(since='2.0.0', removal='3.0.0') def deprecated_method(self) ->str: """original doc""" return 'This is a deprecated method.' with warnings.catch_warnings(record=True) as warning_list: warnings.simplefilter('always') obj = DeprecatedClass() assert obj.deprecated_method() == 'This is a deprecated method.' assert len(warning_list) == 2 warning = warning_list[0].message assert str(warning ) == 'The class `DeprecatedClass` was deprecated in LangChain 2.0.0 and will be removed in 3.0.0' warning = warning_list[1].message assert str(warning ) == 'The function `deprecated_method` was deprecated in LangChain 2.0.0 and will be removed in 3.0.0'
def test_whole_class_deprecation() ->None: """Test whole class deprecation.""" @deprecated(since='2.0.0', removal='3.0.0') class DeprecatedClass: def __init__(self) ->None: """original doc""" pass @deprecated(since='2.0.0', removal='3.0.0') def deprecated_method(self) ->str: """original doc""" return 'This is a deprecated method.' with warnings.catch_warnings(record=True) as warning_list: warnings.simplefilter('always') obj = DeprecatedClass() assert obj.deprecated_method() == 'This is a deprecated method.' assert len(warning_list) == 2 warning = warning_list[0].message assert str(warning ) == 'The class `DeprecatedClass` was deprecated in LangChain 2.0.0 and will be removed in 3.0.0' warning = warning_list[1].message assert str(warning ) == 'The function `deprecated_method` was deprecated in LangChain 2.0.0 and will be removed in 3.0.0'
Test whole class deprecation.
test_md_header_text_splitter_preserve_headers_2
"""Test markdown splitter by header: Preserve Headers.""" markdown_document = """# Foo ## Bar Hi this is Jim Hi this is Joe ### Boo Hi this is Lance ## Baz Hi this is Molly ## Buz # Bop""" headers_to_split_on = [('#', 'Header 1'), ('##', 'Header 2'), ('###', 'Header 3')] markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on= headers_to_split_on, strip_headers=False) output = markdown_splitter.split_text(markdown_document) expected_output = [Document(page_content= """# Foo ## Bar Hi this is Jim Hi this is Joe""", metadata={ 'Header 1': 'Foo', 'Header 2': 'Bar'}), Document(page_content= """### Boo Hi this is Lance""", metadata={'Header 1': 'Foo', 'Header 2': 'Bar', 'Header 3': 'Boo'}), Document(page_content= """## Baz Hi this is Molly""", metadata={'Header 1': 'Foo', 'Header 2': 'Baz'}), Document(page_content='## Buz', metadata={ 'Header 1': 'Foo', 'Header 2': 'Buz'}), Document(page_content='# Bop', metadata={'Header 1': 'Bop'})] assert output == expected_output
def test_md_header_text_splitter_preserve_headers_2() ->None: """Test markdown splitter by header: Preserve Headers.""" markdown_document = """# Foo ## Bar Hi this is Jim Hi this is Joe ### Boo Hi this is Lance ## Baz Hi this is Molly ## Buz # Bop""" headers_to_split_on = [('#', 'Header 1'), ('##', 'Header 2'), ('###', 'Header 3')] markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on= headers_to_split_on, strip_headers=False) output = markdown_splitter.split_text(markdown_document) expected_output = [Document(page_content= '# Foo \n## Bar \nHi this is Jim \nHi this is Joe', metadata={ 'Header 1': 'Foo', 'Header 2': 'Bar'}), Document(page_content= """### Boo Hi this is Lance""", metadata={'Header 1': 'Foo', 'Header 2': 'Bar', 'Header 3': 'Boo'}), Document(page_content= """## Baz Hi this is Molly""", metadata={'Header 1': 'Foo', 'Header 2': 'Baz'}), Document(page_content='## Buz', metadata={ 'Header 1': 'Foo', 'Header 2': 'Buz'}), Document(page_content= '# Bop', metadata={'Header 1': 'Bop'})] assert output == expected_output
Test markdown splitter by header: Preserve Headers.
run
"""Get the current weather information for a specified location.""" mgr = self.owm.weather_manager() observation = mgr.weather_at_place(location) w = observation.weather return self._format_weather_info(location, w)
def run(self, location: str) ->str: """Get the current weather information for a specified location.""" mgr = self.owm.weather_manager() observation = mgr.weather_at_place(location) w = observation.weather return self._format_weather_info(location, w)
Get the current weather information for a specified location.
get_resized_images
""" Resize images from base64-encoded strings. :param docs: A list of base64-encoded image to be resized. :return: Dict containing a list of resized base64-encoded strings. """ b64_images = [] for doc in docs: if isinstance(doc, Document): doc = doc.page_content b64_images.append(doc) return {'images': b64_images}
def get_resized_images(docs): """ Resize images from base64-encoded strings. :param docs: A list of base64-encoded image to be resized. :return: Dict containing a list of resized base64-encoded strings. """ b64_images = [] for doc in docs: if isinstance(doc, Document): doc = doc.page_content b64_images.append(doc) return {'images': b64_images}
Resize images from base64-encoded strings. :param docs: A list of base64-encoded image to be resized. :return: Dict containing a list of resized base64-encoded strings.
visit_operation
"""Translate an Operation."""
@abstractmethod def visit_operation(self, operation: Operation) ->Any: """Translate an Operation."""
Translate an Operation.
__init__
"""Initialize with necessary components.""" try: import qdrant_client except ImportError: raise ImportError( 'Could not import qdrant-client python package. Please install it with `pip install qdrant-client`.' ) if not isinstance(client, qdrant_client.QdrantClient): raise ValueError( f'client should be an instance of qdrant_client.QdrantClient, got {type(client)}' ) if async_client is not None and not isinstance(async_client, qdrant_client. AsyncQdrantClient): raise ValueError( f'async_client should be an instance of qdrant_client.AsyncQdrantClientgot {type(async_client)}' ) if embeddings is None and embedding_function is None: raise ValueError( "`embeddings` value can't be None. Pass `Embeddings` instance.") if embeddings is not None and embedding_function is not None: raise ValueError( 'Both `embeddings` and `embedding_function` are passed. Use `embeddings` only.' ) self._embeddings = embeddings self._embeddings_function = embedding_function self.client: qdrant_client.QdrantClient = client self.async_client: Optional[qdrant_client.AsyncQdrantClient] = async_client self.collection_name = collection_name self.content_payload_key = content_payload_key or self.CONTENT_KEY self.metadata_payload_key = metadata_payload_key or self.METADATA_KEY self.vector_name = vector_name or self.VECTOR_NAME if embedding_function is not None: warnings.warn( 'Using `embedding_function` is deprecated. Pass `Embeddings` instance to `embeddings` instead.' ) if not isinstance(embeddings, Embeddings): warnings.warn( '`embeddings` should be an instance of `Embeddings`.Using `embeddings` as `embedding_function` which is deprecated' ) self._embeddings_function = embeddings self._embeddings = None self.distance_strategy = distance_strategy.upper()
def __init__(self, client: Any, collection_name: str, embeddings: Optional[ Embeddings]=None, content_payload_key: str=CONTENT_KEY, metadata_payload_key: str=METADATA_KEY, distance_strategy: str='COSINE', vector_name: Optional[str]=VECTOR_NAME, async_client: Optional[Any]= None, embedding_function: Optional[Callable]=None): """Initialize with necessary components.""" try: import qdrant_client except ImportError: raise ImportError( 'Could not import qdrant-client python package. Please install it with `pip install qdrant-client`.' ) if not isinstance(client, qdrant_client.QdrantClient): raise ValueError( f'client should be an instance of qdrant_client.QdrantClient, got {type(client)}' ) if async_client is not None and not isinstance(async_client, qdrant_client.AsyncQdrantClient): raise ValueError( f'async_client should be an instance of qdrant_client.AsyncQdrantClientgot {type(async_client)}' ) if embeddings is None and embedding_function is None: raise ValueError( "`embeddings` value can't be None. Pass `Embeddings` instance.") if embeddings is not None and embedding_function is not None: raise ValueError( 'Both `embeddings` and `embedding_function` are passed. Use `embeddings` only.' ) self._embeddings = embeddings self._embeddings_function = embedding_function self.client: qdrant_client.QdrantClient = client self.async_client: Optional[qdrant_client.AsyncQdrantClient] = async_client self.collection_name = collection_name self.content_payload_key = content_payload_key or self.CONTENT_KEY self.metadata_payload_key = metadata_payload_key or self.METADATA_KEY self.vector_name = vector_name or self.VECTOR_NAME if embedding_function is not None: warnings.warn( 'Using `embedding_function` is deprecated. Pass `Embeddings` instance to `embeddings` instead.' ) if not isinstance(embeddings, Embeddings): warnings.warn( '`embeddings` should be an instance of `Embeddings`.Using `embeddings` as `embedding_function` which is deprecated' ) self._embeddings_function = embeddings self._embeddings = None self.distance_strategy = distance_strategy.upper()
Initialize with necessary components.
get_boolean_env_var
"""Retrieve the boolean value of an environment variable. Args: var_name (str): The name of the environment variable to retrieve. default_value (bool): The default value to return if the variable is not found. Returns: bool: The value of the environment variable, interpreted as a boolean. """ true_values = {'true', '1', 't', 'y', 'yes'} false_values = {'false', '0', 'f', 'n', 'no'} value = os.getenv(var_name, '').lower() if value in true_values: return True elif value in false_values: return False else: return default_value
def get_boolean_env_var(var_name, default_value=False): """Retrieve the boolean value of an environment variable. Args: var_name (str): The name of the environment variable to retrieve. default_value (bool): The default value to return if the variable is not found. Returns: bool: The value of the environment variable, interpreted as a boolean. """ true_values = {'true', '1', 't', 'y', 'yes'} false_values = {'false', '0', 'f', 'n', 'no'} value = os.getenv(var_name, '').lower() if value in true_values: return True elif value in false_values: return False else: return default_value
Retrieve the boolean value of an environment variable. Args: var_name (str): The name of the environment variable to retrieve. default_value (bool): The default value to return if the variable is not found. Returns: bool: The value of the environment variable, interpreted as a boolean.
_to_args_and_kwargs
"""Convert tool input to pydantic model.""" args, kwargs = super()._to_args_and_kwargs(tool_input) all_args = list(args) + list(kwargs.values()) if len(all_args) != 1: raise ToolException( f'Too many arguments to single-input tool {self.name}. Args: {all_args}' ) return tuple(all_args), {}
def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) ->Tuple[Tuple, Dict ]: """Convert tool input to pydantic model.""" args, kwargs = super()._to_args_and_kwargs(tool_input) all_args = list(args) + list(kwargs.values()) if len(all_args) != 1: raise ToolException( f'Too many arguments to single-input tool {self.name}. Args: {all_args}' ) return tuple(all_args), {}
Convert tool input to pydantic model.
_extract_code
start = node.lineno - 1 end = node.end_lineno return '\n'.join(self.source_lines[start:end])
def _extract_code(self, node: Any) ->str: start = node.lineno - 1 end = node.end_lineno return '\n'.join(self.source_lines[start:end])
null
delete
"""Deleting IDs from in memory dictionary.""" overlapping = set(ids).intersection(self._dict) if not overlapping: raise ValueError(f'Tried to delete ids that does not exist: {ids}') for _id in ids: self._dict.pop(_id)
def delete(self, ids: List) ->None: """Deleting IDs from in memory dictionary.""" overlapping = set(ids).intersection(self._dict) if not overlapping: raise ValueError(f'Tried to delete ids that does not exist: {ids}') for _id in ids: self._dict.pop(_id)
Deleting IDs from in memory dictionary.
__init__
zone = os.environ.get('NUCLIA_ZONE', 'europe-1') self._config['BACKEND'] = f'https://{zone}.nuclia.cloud/api/v1' key = os.environ.get('NUCLIA_NUA_KEY') if not key: raise ValueError('NUCLIA_NUA_KEY environment variable not set') else: self._config['NUA_KEY'] = key self._config['enable_ml'] = enable_ml super().__init__()
def __init__(self, enable_ml: bool=False) ->None: zone = os.environ.get('NUCLIA_ZONE', 'europe-1') self._config['BACKEND'] = f'https://{zone}.nuclia.cloud/api/v1' key = os.environ.get('NUCLIA_NUA_KEY') if not key: raise ValueError('NUCLIA_NUA_KEY environment variable not set') else: self._config['NUA_KEY'] = key self._config['enable_ml'] = enable_ml super().__init__()
null
test_load_returns_limited_docs
"""Test that returns several docs""" expected_docs = 2 api_client = ArxivAPIWrapper(load_max_docs=expected_docs) docs = api_client.load('ChatGPT') assert len(docs) == expected_docs assert_docs(docs)
def test_load_returns_limited_docs() ->None: """Test that returns several docs""" expected_docs = 2 api_client = ArxivAPIWrapper(load_max_docs=expected_docs) docs = api_client.load('ChatGPT') assert len(docs) == expected_docs assert_docs(docs)
Test that returns several docs
test_nvai_play_embedding_documents_multiple
"""Test NVIDIA embeddings for multiple documents.""" documents = ['foo bar', 'bar foo', 'foo'] embedding = NVIDIAEmbeddings(model='nvolveqa_40k') output = embedding.embed_documents(documents) assert len(output) == 3 assert all(len(doc) == 1024 for doc in output)
def test_nvai_play_embedding_documents_multiple() ->None: """Test NVIDIA embeddings for multiple documents.""" documents = ['foo bar', 'bar foo', 'foo'] embedding = NVIDIAEmbeddings(model='nvolveqa_40k') output = embedding.embed_documents(documents) assert len(output) == 3 assert all(len(doc) == 1024 for doc in output)
Test NVIDIA embeddings for multiple documents.