method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
---|---|---|---|
similarity_search | docs_and_scores = self.similarity_search_with_score(query, k=k)
return [doc for doc, _ in docs_and_scores] | def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[
Document]:
docs_and_scores = self.similarity_search_with_score(query, k=k)
return [doc for doc, _ in docs_and_scores] | null |
__init__ | """Initialize with a file path."""
try:
import pdfplumber
except ImportError:
raise ImportError(
'pdfplumber package not found, please install it with `pip install pdfplumber`'
)
super().__init__(file_path, headers=headers)
self.text_kwargs = text_kwargs or {}
self.dedupe = dedupe
self.extract_images = extract_images | def __init__(self, file_path: str, text_kwargs: Optional[Mapping[str, Any]]
=None, dedupe: bool=False, headers: Optional[Dict]=None, extract_images:
bool=False) ->None:
"""Initialize with a file path."""
try:
import pdfplumber
except ImportError:
raise ImportError(
'pdfplumber package not found, please install it with `pip install pdfplumber`'
)
super().__init__(file_path, headers=headers)
self.text_kwargs = text_kwargs or {}
self.dedupe = dedupe
self.extract_images = extract_images | Initialize with a file path. |
store | """BigQueryVectorStore tests context.
In order to run this test, you define PROJECT environment variable
with GCP project id.
Example:
export PROJECT=...
"""
from google.cloud import bigquery
bigquery.Client(location='US').split_and_push(TestBigQueryVectorStore.
dataset_name, exists_ok=True)
TestBigQueryVectorStore.store = BigQueryVectorSearch(project_id=os.environ.
get('PROJECT', None), embedding=FakeEmbeddings(), dataset_name=
TestBigQueryVectorStore.dataset_name, table_name=TEST_TABLE_NAME)
TestBigQueryVectorStore.store.add_texts(TestBigQueryVectorStore.texts,
TestBigQueryVectorStore.metadatas)
def teardown() ->None:
bigquery.Client(location='US').delete_dataset(TestBigQueryVectorStore.
dataset_name, delete_contents=True, not_found_ok=True)
request.addfinalizer(teardown)
return TestBigQueryVectorStore.store | @pytest.fixture(scope='class')
def store(request: pytest.FixtureRequest) ->BigQueryVectorSearch:
"""BigQueryVectorStore tests context.
In order to run this test, you define PROJECT environment variable
with GCP project id.
Example:
export PROJECT=...
"""
from google.cloud import bigquery
bigquery.Client(location='US').split_and_push(TestBigQueryVectorStore.
dataset_name, exists_ok=True)
TestBigQueryVectorStore.store = BigQueryVectorSearch(project_id=os.
environ.get('PROJECT', None), embedding=FakeEmbeddings(),
dataset_name=TestBigQueryVectorStore.dataset_name, table_name=
TEST_TABLE_NAME)
TestBigQueryVectorStore.store.add_texts(TestBigQueryVectorStore.texts,
TestBigQueryVectorStore.metadatas)
def teardown() ->None:
bigquery.Client(location='US').delete_dataset(TestBigQueryVectorStore
.dataset_name, delete_contents=True, not_found_ok=True)
request.addfinalizer(teardown)
return TestBigQueryVectorStore.store | BigQueryVectorStore tests context.
In order to run this test, you define PROJECT environment variable
with GCP project id.
Example:
export PROJECT=... |
invoke | """Transform a single input into an output. Override to implement.
Args:
input: The input to the runnable.
config: A config to use when invoking the runnable.
The config supports standard keys like 'tags', 'metadata' for tracing
purposes, 'max_concurrency' for controlling how much work to do
in parallel, and other keys. Please refer to the RunnableConfig
for more details.
Returns:
The output of the runnable.
""" | @abstractmethod
def invoke(self, input: Input, config: Optional[RunnableConfig]=None) ->Output:
"""Transform a single input into an output. Override to implement.
Args:
input: The input to the runnable.
config: A config to use when invoking the runnable.
The config supports standard keys like 'tags', 'metadata' for tracing
purposes, 'max_concurrency' for controlling how much work to do
in parallel, and other keys. Please refer to the RunnableConfig
for more details.
Returns:
The output of the runnable.
""" | Transform a single input into an output. Override to implement.
Args:
input: The input to the runnable.
config: A config to use when invoking the runnable.
The config supports standard keys like 'tags', 'metadata' for tracing
purposes, 'max_concurrency' for controlling how much work to do
in parallel, and other keys. Please refer to the RunnableConfig
for more details.
Returns:
The output of the runnable. |
test_python_ast_repl_print | program = """python
string = "racecar"
if string == string[::-1]:
print(string, "is a palindrome")
else:
print(string, "is not a palindrome")"""
tool = PythonAstREPLTool()
assert tool.run(program) == 'racecar is a palindrome\n' | @pytest.mark.skipif(sys.version_info < (3, 9), reason=
'Requires python version >= 3.9 to run.')
def test_python_ast_repl_print() ->None:
program = """python
string = "racecar"
if string == string[::-1]:
print(string, "is a palindrome")
else:
print(string, "is not a palindrome")"""
tool = PythonAstREPLTool()
assert tool.run(program) == 'racecar is a palindrome\n' | null |
get_action_and_input | output = output_parser.parse_folder(text)
if isinstance(output, AgentAction):
return output.tool, str(output.tool_input)
elif isinstance(output, AgentFinish):
return output.return_values['output'], output.log
else:
raise ValueError('Unexpected output type') | def get_action_and_input(text: str) ->Tuple[str, str]:
output = output_parser.parse_folder(text)
if isinstance(output, AgentAction):
return output.tool, str(output.tool_input)
elif isinstance(output, AgentFinish):
return output.return_values['output'], output.log
else:
raise ValueError('Unexpected output type') | null |
from_text_similarity_search_test | """Test end to end construction and search."""
embeddings: OpenAIEmbeddings = OpenAIEmbeddings(model=model, chunk_size=1)
vector_store: AzureSearch = AzureSearch.from_texts(azure_search_endpoint=
vector_store_address, azure_search_key=vector_store_password,
index_name=index_name, texts=['Test 1', 'Test 2', 'Test 3'], embedding=
embeddings)
time.sleep(1)
res = vector_store.similarity_search(query='Test 1', k=3)
assert len(res) == 3 | def from_text_similarity_search_test() ->None:
"""Test end to end construction and search."""
embeddings: OpenAIEmbeddings = OpenAIEmbeddings(model=model, chunk_size=1)
vector_store: AzureSearch = AzureSearch.from_texts(azure_search_endpoint
=vector_store_address, azure_search_key=vector_store_password,
index_name=index_name, texts=['Test 1', 'Test 2', 'Test 3'],
embedding=embeddings)
time.sleep(1)
res = vector_store.similarity_search(query='Test 1', k=3)
assert len(res) == 3 | Test end to end construction and search. |
_import_oci_md_vllm | from langchain_community.llms.oci_data_science_model_deployment_endpoint import OCIModelDeploymentVLLM
return OCIModelDeploymentVLLM | def _import_oci_md_vllm() ->Any:
from langchain_community.llms.oci_data_science_model_deployment_endpoint import OCIModelDeploymentVLLM
return OCIModelDeploymentVLLM | null |
test_all_imports | assert set(__all__) == set(EXPECTED_ALL) | def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL) | null |
test_copy_file_with_root_dir | """Test the FileCopy tool when a root dir is specified."""
with TemporaryDirectory() as temp_dir:
tool = CopyFileTool(root_dir=temp_dir)
source_file = Path(temp_dir) / 'source.txt'
destination_file = Path(temp_dir) / 'destination.txt'
source_file.write_text('Hello, world!')
tool.run({'source_path': 'source.txt', 'destination_path':
'destination.txt'})
assert source_file.exists()
assert destination_file.exists()
assert source_file.read_text() == 'Hello, world!'
assert destination_file.read_text() == 'Hello, world!' | def test_copy_file_with_root_dir() ->None:
"""Test the FileCopy tool when a root dir is specified."""
with TemporaryDirectory() as temp_dir:
tool = CopyFileTool(root_dir=temp_dir)
source_file = Path(temp_dir) / 'source.txt'
destination_file = Path(temp_dir) / 'destination.txt'
source_file.write_text('Hello, world!')
tool.run({'source_path': 'source.txt', 'destination_path':
'destination.txt'})
assert source_file.exists()
assert destination_file.exists()
assert source_file.read_text() == 'Hello, world!'
assert destination_file.read_text() == 'Hello, world!' | Test the FileCopy tool when a root dir is specified. |
remove_exact_and_partial_keys | """Recursively removes exact and partial keys from a dictionary.
:param obj: The dictionary to remove keys from.
:return: The modified dictionary.
"""
if isinstance(obj, dict):
obj = {k: v for k, v in obj.items() if k not in exact_keys and not any(
partial in k for partial in partial_keys)}
for k, v in obj.items():
obj[k] = remove_exact_and_partial_keys(v)
elif isinstance(obj, list):
obj = [remove_exact_and_partial_keys(x) for x in obj]
return obj | def remove_exact_and_partial_keys(obj: Dict[str, Any]) ->Dict[str, Any]:
"""Recursively removes exact and partial keys from a dictionary.
:param obj: The dictionary to remove keys from.
:return: The modified dictionary.
"""
if isinstance(obj, dict):
obj = {k: v for k, v in obj.items() if k not in exact_keys and not
any(partial in k for partial in partial_keys)}
for k, v in obj.items():
obj[k] = remove_exact_and_partial_keys(v)
elif isinstance(obj, list):
obj = [remove_exact_and_partial_keys(x) for x in obj]
return obj | Recursively removes exact and partial keys from a dictionary.
:param obj: The dictionary to remove keys from.
:return: The modified dictionary. |
output_keys | """Return output key.
:meta private:
"""
return self.combine_docs_chain.output_keys | @property
def output_keys(self) ->List[str]:
"""Return output key.
:meta private:
"""
return self.combine_docs_chain.output_keys | Return output key.
:meta private: |
validate_prompt_input_variables | """Validate that prompt input variables are consistent."""
prompt_variables = values['prompt'].input_variables
expected_keys = {'summary', 'new_lines'}
if expected_keys != set(prompt_variables):
raise ValueError(
f'Got unexpected prompt input variables. The prompt expects {prompt_variables}, but it should have {expected_keys}.'
)
return values | @root_validator()
def validate_prompt_input_variables(cls, values: Dict) ->Dict:
"""Validate that prompt input variables are consistent."""
prompt_variables = values['prompt'].input_variables
expected_keys = {'summary', 'new_lines'}
if expected_keys != set(prompt_variables):
raise ValueError(
f'Got unexpected prompt input variables. The prompt expects {prompt_variables}, but it should have {expected_keys}.'
)
return values | Validate that prompt input variables are consistent. |
test_sitemap_metadata_extraction | def sitemap_metadata_two(meta: dict, content: Any) ->dict:
title = content.find('title')
if title:
return {**meta, 'title': title.get_text()}
return meta
"""Test sitemap loader."""
loader = SitemapLoader('https://api.python.langchain.com/sitemap.xml',
meta_function=sitemap_metadata_two)
documents = loader.load()
assert len(documents) > 1
assert 'title' in documents[0].metadata
assert 'LangChain' in documents[0].metadata['title'] | def test_sitemap_metadata_extraction() ->None:
def sitemap_metadata_two(meta: dict, content: Any) ->dict:
title = content.find('title')
if title:
return {**meta, 'title': title.get_text()}
return meta
"""Test sitemap loader."""
loader = SitemapLoader('https://api.python.langchain.com/sitemap.xml',
meta_function=sitemap_metadata_two)
documents = loader.load()
assert len(documents) > 1
assert 'title' in documents[0].metadata
assert 'LangChain' in documents[0].metadata['title'] | null |
_import_human | from langchain_community.llms.human import HumanInputLLM
return HumanInputLLM | def _import_human() ->Any:
from langchain_community.llms.human import HumanInputLLM
return HumanInputLLM | null |
_default_knn_mapping | return {'properties': {'text': {'type': 'text'}, 'vector': {'type':
'dense_vector', 'dims': dims, 'index': True, 'similarity': similarity}}} | @staticmethod
def _default_knn_mapping(dims: int, similarity: Optional[str]='dot_product'
) ->Dict:
return {'properties': {'text': {'type': 'text'}, 'vector': {'type':
'dense_vector', 'dims': dims, 'index': True, 'similarity': similarity}}
} | null |
test__signature | secret_key = SecretStr('YOUR_SECRET_KEY')
result = _signature(secret_key=secret_key, payload={'model':
'Baichuan2-53B', 'messages': [{'role': 'user', 'content': 'Hi'}]},
timestamp=1697734335)
expected_output = '24a50b2db1648e25a244c67c5ab57d3f'
assert result == expected_output | def test__signature() ->None:
secret_key = SecretStr('YOUR_SECRET_KEY')
result = _signature(secret_key=secret_key, payload={'model':
'Baichuan2-53B', 'messages': [{'role': 'user', 'content': 'Hi'}]},
timestamp=1697734335)
expected_output = '24a50b2db1648e25a244c67c5ab57d3f'
assert result == expected_output | null |
similarity_search_by_vector | """Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(embedding=
embedding, k=k, filter=filter, predicates=predicates, **kwargs)
return [doc for doc, _ in docs_and_scores] | def similarity_search_by_vector(self, embedding: Optional[List[float]], k:
int=4, filter: Optional[Union[dict, list]]=None, predicates: Optional[
Predicates]=None, **kwargs: Any) ->List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(embedding
=embedding, k=k, filter=filter, predicates=predicates, **kwargs)
return [doc for doc, _ in docs_and_scores] | Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector. |
get_current_table | """Get the current table."""
return self.using_table_name | def get_current_table(self, **kwargs: Any) ->str:
"""Get the current table."""
return self.using_table_name | Get the current table. |
test_promptlayer_chat_openai_system_message | """Test PromptLayerChatOpenAI wrapper with system message."""
chat = PromptLayerChatOpenAI(max_tokens=10)
system_message = SystemMessage(content='You are to chat with the user.')
human_message = HumanMessage(content='Hello')
response = chat([system_message, human_message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str) | def test_promptlayer_chat_openai_system_message() ->None:
"""Test PromptLayerChatOpenAI wrapper with system message."""
chat = PromptLayerChatOpenAI(max_tokens=10)
system_message = SystemMessage(content='You are to chat with the user.')
human_message = HumanMessage(content='Hello')
response = chat([system_message, human_message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str) | Test PromptLayerChatOpenAI wrapper with system message. |
delete | """Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
raise NotImplementedError('Deletions are not available in ScaNN, yet.') | def delete(self, ids: Optional[List[str]]=None, **kwargs: Any) ->Optional[bool
]:
"""Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
raise NotImplementedError('Deletions are not available in ScaNN, yet.') | Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented. |
clear | """Empty the collection."""
self.table.clear() | def clear(self) ->None:
"""Empty the collection."""
self.table.clear() | Empty the collection. |
similarity_search_with_score | """Run similarity search with distance."""
raise NotImplementedError | def similarity_search_with_score(self, *args: Any, **kwargs: Any) ->List[Tuple
[Document, float]]:
"""Run similarity search with distance."""
raise NotImplementedError | Run similarity search with distance. |
similarity_search_with_score | embeddings = self._embedding.embed_query(query)
docs = self._similarity_search_with_score(embeddings=embeddings, k=k)
return docs | def similarity_search_with_score(self, query: str, k: int=4) ->List[Tuple[
Document, float]]:
embeddings = self._embedding.embed_query(query)
docs = self._similarity_search_with_score(embeddings=embeddings, k=k)
return docs | null |
fetch_list_id | """Fetch the list id."""
if folder_id:
url = f'{DEFAULT_URL}/folder/{folder_id}/list'
else:
url = f'{DEFAULT_URL}/space/{space_id}/list'
data = fetch_data(url, access_token, query={'archived': 'false'})
if folder_id and 'id' in data:
return data['id']
else:
return fetch_first_id(data, 'lists') | def fetch_list_id(space_id: int, folder_id: int, access_token: str) ->Optional[
int]:
"""Fetch the list id."""
if folder_id:
url = f'{DEFAULT_URL}/folder/{folder_id}/list'
else:
url = f'{DEFAULT_URL}/space/{space_id}/list'
data = fetch_data(url, access_token, query={'archived': 'false'})
if folder_id and 'id' in data:
return data['id']
else:
return fetch_first_id(data, 'lists') | Fetch the list id. |
test_limit_question | """Test question about limits that needs sympy"""
question = 'What is the limit of sin(x) / x as x goes to 0?'
output = fake_llm_symbolic_math_chain.run(question)
assert output == 'Answer: 1' | def test_limit_question(fake_llm_symbolic_math_chain: LLMSymbolicMathChain
) ->None:
"""Test question about limits that needs sympy"""
question = 'What is the limit of sin(x) / x as x goes to 0?'
output = fake_llm_symbolic_math_chain.run(question)
assert output == 'Answer: 1' | Test question about limits that needs sympy |
test_rss_loader_with_opml | file_path = Path(__file__).parent.parent / 'examples'
with open(file_path.joinpath('sample_rss_feeds.opml'), 'r') as f:
loader = RSSFeedLoader(opml=f.read())
docs = loader.load()
assert docs[0] is not None
assert hasattr(docs[0], 'page_content')
assert hasattr(docs[0], 'metadata')
metadata = docs[0].metadata
assert 'feed' in metadata
assert 'title' in metadata
assert 'link' in metadata
assert 'authors' in metadata
assert 'language' in metadata
assert 'description' in metadata
assert 'publish_date' in metadata | def test_rss_loader_with_opml() ->None:
file_path = Path(__file__).parent.parent / 'examples'
with open(file_path.joinpath('sample_rss_feeds.opml'), 'r') as f:
loader = RSSFeedLoader(opml=f.read())
docs = loader.load()
assert docs[0] is not None
assert hasattr(docs[0], 'page_content')
assert hasattr(docs[0], 'metadata')
metadata = docs[0].metadata
assert 'feed' in metadata
assert 'title' in metadata
assert 'link' in metadata
assert 'authors' in metadata
assert 'language' in metadata
assert 'description' in metadata
assert 'publish_date' in metadata | null |
get_payload | """Generates payload for the _NVIDIAClient API to send to service."""
return {**self.preprocess(inputs=inputs, labels=labels), **kwargs} | def get_payload(self, inputs: Sequence[Dict], labels: Optional[dict]=None,
**kwargs: Any) ->dict:
"""Generates payload for the _NVIDIAClient API to send to service."""
return {**self.preprocess(inputs=inputs, labels=labels), **kwargs} | Generates payload for the _NVIDIAClient API to send to service. |
test_all_imports | """Simple test to make sure all things can be imported."""
for cls in vectorstores.__all__:
if cls not in ['AlibabaCloudOpenSearchSettings', 'ClickhouseSettings',
'MyScaleSettings']:
assert issubclass(getattr(vectorstores, cls), VectorStore) | def test_all_imports() ->None:
"""Simple test to make sure all things can be imported."""
for cls in vectorstores.__all__:
if cls not in ['AlibabaCloudOpenSearchSettings',
'ClickhouseSettings', 'MyScaleSettings']:
assert issubclass(getattr(vectorstores, cls), VectorStore) | Simple test to make sure all things can be imported. |
test_tracer_llm_run_on_error | """Test tracer on an LLM run with an error."""
exception = Exception('test')
uuid = uuid4()
compare_run = Run(id=str(uuid), start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc), events=[{'name': 'start', 'time':
datetime.now(timezone.utc)}, {'name': 'error', 'time': datetime.now(
timezone.utc)}], extra={}, execution_order=1, child_execution_order=1,
serialized=SERIALIZED, inputs=dict(prompts=[]), outputs=None, error=
repr(exception), run_type='llm', trace_id=uuid, dotted_order=
f'20230101T000000000000Z{uuid}')
tracer = FakeTracer()
tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid)
tracer.on_llm_error(exception, run_id=uuid)
assert len(tracer.runs) == 1
_compare_run_with_error(tracer.runs[0], compare_run) | @freeze_time('2023-01-01')
def test_tracer_llm_run_on_error() ->None:
"""Test tracer on an LLM run with an error."""
exception = Exception('test')
uuid = uuid4()
compare_run = Run(id=str(uuid), start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc), events=[{'name': 'start',
'time': datetime.now(timezone.utc)}, {'name': 'error', 'time':
datetime.now(timezone.utc)}], extra={}, execution_order=1,
child_execution_order=1, serialized=SERIALIZED, inputs=dict(prompts
=[]), outputs=None, error=repr(exception), run_type='llm', trace_id
=uuid, dotted_order=f'20230101T000000000000Z{uuid}')
tracer = FakeTracer()
tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid)
tracer.on_llm_error(exception, run_id=uuid)
assert len(tracer.runs) == 1
_compare_run_with_error(tracer.runs[0], compare_run) | Test tracer on an LLM run with an error. |
__init__ | """
Language parser that split code using the respective language syntax.
Args:
language: If None (default), it will try to infer language from source.
parser_threshold: Minimum lines needed to activate parsing (0 by default).
"""
self.language = language
self.parser_threshold = parser_threshold | def __init__(self, language: Optional[Language]=None, parser_threshold: int=0):
"""
Language parser that split code using the respective language syntax.
Args:
language: If None (default), it will try to infer language from source.
parser_threshold: Minimum lines needed to activate parsing (0 by default).
"""
self.language = language
self.parser_threshold = parser_threshold | Language parser that split code using the respective language syntax.
Args:
language: If None (default), it will try to infer language from source.
parser_threshold: Minimum lines needed to activate parsing (0 by default). |
test_call | """Test that call gives the correct answer."""
search = BingSearchAPIWrapper()
output = search.run("Obama's first name")
assert 'Barack Hussein Obama' in output | def test_call() ->None:
"""Test that call gives the correct answer."""
search = BingSearchAPIWrapper()
output = search.run("Obama's first name")
assert 'Barack Hussein Obama' in output | Test that call gives the correct answer. |
__repr__ | from pprint import pformat
return f'RunLog({pformat(self.state)})' | def __repr__(self) ->str:
from pprint import pformat
return f'RunLog({pformat(self.state)})' | null |
test_generate_stream | """Test valid call to qianfan."""
llm = QianfanLLMEndpoint()
output = llm.stream('write a joke')
assert isinstance(output, Generator) | def test_generate_stream() ->None:
"""Test valid call to qianfan."""
llm = QianfanLLMEndpoint()
output = llm.stream('write a joke')
assert isinstance(output, Generator) | Test valid call to qianfan. |
_embed_documents | """Inference function to send to the remote hardware.
Accepts a sentence_transformer model_id and
returns a list of embeddings for each document in the batch.
"""
return pipeline(*args, **kwargs) | def _embed_documents(pipeline: Any, *args: Any, **kwargs: Any) ->List[List[
float]]:
"""Inference function to send to the remote hardware.
Accepts a sentence_transformer model_id and
returns a list of embeddings for each document in the batch.
"""
return pipeline(*args, **kwargs) | Inference function to send to the remote hardware.
Accepts a sentence_transformer model_id and
returns a list of embeddings for each document in the batch. |
_process_supported_media_type | """Process the media type of the request body."""
from openapi_pydantic import Reference
references_used = []
schema = media_type_obj.media_type_schema
if isinstance(schema, Reference):
references_used.append(schema.ref.split('/')[-1])
schema = spec.get_referenced_schema(schema)
if schema is None:
raise ValueError(
f'Could not resolve schema for media type: {media_type_obj}')
api_request_body_properties = []
required_properties = schema.required or []
if schema.type == 'object' and schema.properties:
for prop_name, prop_schema in schema.properties.items():
if isinstance(prop_schema, Reference):
prop_schema = spec.get_referenced_schema(prop_schema)
api_request_body_properties.append(APIRequestBodyProperty.
from_schema(schema=prop_schema, name=prop_name, required=
prop_name in required_properties, spec=spec))
else:
api_request_body_properties.append(APIRequestBodyProperty(name='body',
required=True, type=schema.type, default=schema.default,
description=schema.description, properties=[], references_used=
references_used))
return api_request_body_properties | @classmethod
def _process_supported_media_type(cls, media_type_obj: MediaType, spec:
OpenAPISpec) ->List[APIRequestBodyProperty]:
"""Process the media type of the request body."""
from openapi_pydantic import Reference
references_used = []
schema = media_type_obj.media_type_schema
if isinstance(schema, Reference):
references_used.append(schema.ref.split('/')[-1])
schema = spec.get_referenced_schema(schema)
if schema is None:
raise ValueError(
f'Could not resolve schema for media type: {media_type_obj}')
api_request_body_properties = []
required_properties = schema.required or []
if schema.type == 'object' and schema.properties:
for prop_name, prop_schema in schema.properties.items():
if isinstance(prop_schema, Reference):
prop_schema = spec.get_referenced_schema(prop_schema)
api_request_body_properties.append(APIRequestBodyProperty.
from_schema(schema=prop_schema, name=prop_name, required=
prop_name in required_properties, spec=spec))
else:
api_request_body_properties.append(APIRequestBodyProperty(name=
'body', required=True, type=schema.type, default=schema.default,
description=schema.description, properties=[], references_used=
references_used))
return api_request_body_properties | Process the media type of the request body. |
_invoke_triton | if not self.client.is_model_ready(model_name):
raise RuntimeError('Cannot request streaming, model is not loaded')
request_id = str(random.randint(1, 9999999))
result_queue = StreamingResponseGenerator(self, request_id, force_batch=
False, stop_words=stop_words)
self.client.start_stream(callback=partial(self._stream_callback,
result_queue, stop_words=stop_words))
self.client.async_stream_infer(model_name=model_name, inputs=inputs,
outputs=outputs, request_id=request_id)
return result_queue | def _invoke_triton(self, model_name, inputs, outputs, stop_words):
if not self.client.is_model_ready(model_name):
raise RuntimeError('Cannot request streaming, model is not loaded')
request_id = str(random.randint(1, 9999999))
result_queue = StreamingResponseGenerator(self, request_id, force_batch
=False, stop_words=stop_words)
self.client.start_stream(callback=partial(self._stream_callback,
result_queue, stop_words=stop_words))
self.client.async_stream_infer(model_name=model_name, inputs=inputs,
outputs=outputs, request_id=request_id)
return result_queue | null |
_run | """Use the tool."""
all_params = {'text': query, 'language': self.language, 'option': self.
voice, 'return_type': self.return_type, 'rate': self.rate, 'pitch':
self.pitch, 'volume': self.volume, 'audio_format': self.audio_format,
'sampling_rate': self.sampling_rate, 'settings': self.voice_models}
query_params = {k: v for k, v in all_params.items() if v is not None}
return self._call_eden_ai(query_params) | def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the tool."""
all_params = {'text': query, 'language': self.language, 'option': self.
voice, 'return_type': self.return_type, 'rate': self.rate, 'pitch':
self.pitch, 'volume': self.volume, 'audio_format': self.
audio_format, 'sampling_rate': self.sampling_rate, 'settings': self
.voice_models}
query_params = {k: v for k, v in all_params.items() if v is not None}
return self._call_eden_ai(query_params) | Use the tool. |
date | item = str(item).strip('"\'')
try:
datetime.datetime.strptime(item, '%Y-%m-%d')
except ValueError:
warnings.warn(
'Dates are expected to be provided in ISO 8601 date format (YYYY-MM-DD).'
)
return {'date': item, 'type': 'date'} | def date(self, item: Any) ->ISO8601Date:
item = str(item).strip('"\'')
try:
datetime.datetime.strptime(item, '%Y-%m-%d')
except ValueError:
warnings.warn(
'Dates are expected to be provided in ISO 8601 date format (YYYY-MM-DD).'
)
return {'date': item, 'type': 'date'} | null |
test_presigned_loading | mocker.register_uri('GET', requests_mock.ANY, text='data')
loader = LakeFSLoader(self.lakefs_access_key, self.lakefs_secret_key, self.
endpoint)
loader.set_repo(self.repo)
loader.set_ref(self.ref)
loader.set_path(self.path)
loader.load() | @requests_mock.Mocker()
@pytest.mark.usefixtures('mock_lakefs_client')
def test_presigned_loading(self, mocker: Mocker) ->None:
mocker.register_uri('GET', requests_mock.ANY, text='data')
loader = LakeFSLoader(self.lakefs_access_key, self.lakefs_secret_key,
self.endpoint)
loader.set_repo(self.repo)
loader.set_ref(self.ref)
loader.set_path(self.path)
loader.load() | null |
get_available_models | """Get available models from EverlyAI API."""
return set(['meta-llama/Llama-2-7b-chat-hf',
'meta-llama/Llama-2-13b-chat-hf-quantized']) | @staticmethod
def get_available_models() ->Set[str]:
"""Get available models from EverlyAI API."""
return set(['meta-llama/Llama-2-7b-chat-hf',
'meta-llama/Llama-2-13b-chat-hf-quantized']) | Get available models from EverlyAI API. |
_res_to_str | return '<' + str(res[var]) + '> (' + self._get_local_name(res[var]
) + ', ' + str(res['com']) + ')' | def _res_to_str(self, res: rdflib.query.ResultRow, var: str) ->str:
return '<' + str(res[var]) + '> (' + self._get_local_name(res[var]
) + ', ' + str(res['com']) + ')' | null |
__init__ | self.api_key = api_key | def __init__(self, api_key: str):
self.api_key = api_key | null |
load | """Load documents."""
if self.folder_id:
return self._load_documents_from_folder(self.folder_id, file_types=self
.file_types)
elif self.document_ids:
return self._load_documents_from_ids()
else:
return self._load_file_from_ids() | def load(self) ->List[Document]:
"""Load documents."""
if self.folder_id:
return self._load_documents_from_folder(self.folder_id, file_types=
self.file_types)
elif self.document_ids:
return self._load_documents_from_ids()
else:
return self._load_file_from_ids() | Load documents. |
requires_reference | """Return whether the chain requires a reference.
Returns:
bool: True if the chain requires a reference, False otherwise.
"""
return False | @property
def requires_reference(self) ->bool:
"""Return whether the chain requires a reference.
Returns:
bool: True if the chain requires a reference, False otherwise.
"""
return False | Return whether the chain requires a reference.
Returns:
bool: True if the chain requires a reference, False otherwise. |
on_text | """
Run when agent is ending.
"""
self.step += 1
self.text_ctr += 1
resp = self._init_resp()
resp.update({'action': 'on_text'})
resp.update(self.get_custom_callback_meta())
if self.stream_logs:
self._log_stream(text, resp, self.step)
resp.update({'text': text})
self.action_records.append(resp) | def on_text(self, text: str, **kwargs: Any) ->None:
"""
Run when agent is ending.
"""
self.step += 1
self.text_ctr += 1
resp = self._init_resp()
resp.update({'action': 'on_text'})
resp.update(self.get_custom_callback_meta())
if self.stream_logs:
self._log_stream(text, resp, self.step)
resp.update({'text': text})
self.action_records.append(resp) | Run when agent is ending. |
remove_newlines | """Recursively remove newlines, no matter the data structure they are stored in."""
import pandas as pd
if isinstance(x, str):
return x.replace('\n', '')
elif isinstance(x, list):
return [remove_newlines(elem) for elem in x]
elif isinstance(x, pd.DataFrame):
return x.applymap(remove_newlines)
else:
return x | def remove_newlines(x: Any) ->Any:
"""Recursively remove newlines, no matter the data structure they are stored in."""
import pandas as pd
if isinstance(x, str):
return x.replace('\n', '')
elif isinstance(x, list):
return [remove_newlines(elem) for elem in x]
elif isinstance(x, pd.DataFrame):
return x.applymap(remove_newlines)
else:
return x | Recursively remove newlines, no matter the data structure they are stored in. |
_call | _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
input_text = f"""{inputs[self.input_key]}
ESQuery:"""
_run_manager.on_text(input_text, verbose=self.verbose)
indices = self._list_indices()
indices_info = self._get_indices_infos(indices)
query_inputs: dict = {'input': input_text, 'top_k': str(self.top_k),
'indices_info': indices_info, 'stop': ['\nESResult:']}
intermediate_steps: List = []
try:
intermediate_steps.append(query_inputs)
es_cmd = self.query_chain.run(callbacks=_run_manager.get_child(), **
query_inputs)
_run_manager.on_text(es_cmd, color='green', verbose=self.verbose)
intermediate_steps.append(es_cmd)
intermediate_steps.append({'es_cmd': es_cmd})
result = self._search(indices=indices, query=es_cmd)
intermediate_steps.append(str(result))
_run_manager.on_text('\nESResult: ', verbose=self.verbose)
_run_manager.on_text(result, color='yellow', verbose=self.verbose)
_run_manager.on_text('\nAnswer:', verbose=self.verbose)
answer_inputs: dict = {'data': result, 'input': input_text}
intermediate_steps.append(answer_inputs)
final_result = self.answer_chain.run(callbacks=_run_manager.get_child(),
**answer_inputs)
intermediate_steps.append(final_result)
_run_manager.on_text(final_result, color='green', verbose=self.verbose)
chain_result: Dict[str, Any] = {self.output_key: final_result}
if self.return_intermediate_steps:
chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
return chain_result
except Exception as exc:
exc.intermediate_steps = intermediate_steps
raise exc | def _call(self, inputs: Dict[str, Any], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
input_text = f'{inputs[self.input_key]}\nESQuery:'
_run_manager.on_text(input_text, verbose=self.verbose)
indices = self._list_indices()
indices_info = self._get_indices_infos(indices)
query_inputs: dict = {'input': input_text, 'top_k': str(self.top_k),
'indices_info': indices_info, 'stop': ['\nESResult:']}
intermediate_steps: List = []
try:
intermediate_steps.append(query_inputs)
es_cmd = self.query_chain.run(callbacks=_run_manager.get_child(),
**query_inputs)
_run_manager.on_text(es_cmd, color='green', verbose=self.verbose)
intermediate_steps.append(es_cmd)
intermediate_steps.append({'es_cmd': es_cmd})
result = self._search(indices=indices, query=es_cmd)
intermediate_steps.append(str(result))
_run_manager.on_text('\nESResult: ', verbose=self.verbose)
_run_manager.on_text(result, color='yellow', verbose=self.verbose)
_run_manager.on_text('\nAnswer:', verbose=self.verbose)
answer_inputs: dict = {'data': result, 'input': input_text}
intermediate_steps.append(answer_inputs)
final_result = self.answer_chain.run(callbacks=_run_manager.
get_child(), **answer_inputs)
intermediate_steps.append(final_result)
_run_manager.on_text(final_result, color='green', verbose=self.verbose)
chain_result: Dict[str, Any] = {self.output_key: final_result}
if self.return_intermediate_steps:
chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
return chain_result
except Exception as exc:
exc.intermediate_steps = intermediate_steps
raise exc | null |
test_agent_iterator_with_callbacks | """Test react chain iterator with callbacks by setting verbose globally."""
handler1 = FakeCallbackHandler()
handler2 = FakeCallbackHandler()
bad_action_name = 'BadAction'
responses = [
f"""I'm turning evil
Action: {bad_action_name}
Action Input: misalignment"""
, """Oh well
Final Answer: curses foiled again"""]
fake_llm = FakeListLLM(cache=False, responses=responses, callbacks=[handler2])
tools = [Tool(name='Search', func=lambda x: x, description=
'Useful for searching'), Tool(name='Lookup', func=lambda x: x,
description='Useful for looking up things in a table')]
agent = initialize_agent(tools, fake_llm, agent=AgentType.
ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
agent_iter = agent.iter(inputs='when was langchain made', callbacks=[
handler1], include_run_info=True)
outputs = []
for step in agent_iter:
outputs.append(step)
assert isinstance(outputs[-1], dict)
assert outputs[-1]['output'] == 'curses foiled again'
assert isinstance(outputs[-1][RUN_KEY].run_id, UUID)
assert handler1.chain_starts == handler1.chain_ends == 3
assert handler1.llm_starts == handler1.llm_ends == 2
assert handler1.tool_starts == 1
assert handler1.tool_ends == 1
assert handler1.starts == 7
assert handler1.ends == 7
print('h:', handler1)
assert handler1.errors == 0
assert handler1.text == 2
assert handler2.llm_starts == 2
assert handler2.llm_ends == 2
assert handler2.chain_starts == handler2.tool_starts == handler2.tool_ends == handler2.chain_ends == 0 | def test_agent_iterator_with_callbacks() ->None:
"""Test react chain iterator with callbacks by setting verbose globally."""
handler1 = FakeCallbackHandler()
handler2 = FakeCallbackHandler()
bad_action_name = 'BadAction'
responses = [
f"I'm turning evil\nAction: {bad_action_name}\nAction Input: misalignment"
, """Oh well
Final Answer: curses foiled again"""]
fake_llm = FakeListLLM(cache=False, responses=responses, callbacks=[
handler2])
tools = [Tool(name='Search', func=lambda x: x, description=
'Useful for searching'), Tool(name='Lookup', func=lambda x: x,
description='Useful for looking up things in a table')]
agent = initialize_agent(tools, fake_llm, agent=AgentType.
ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
agent_iter = agent.iter(inputs='when was langchain made', callbacks=[
handler1], include_run_info=True)
outputs = []
for step in agent_iter:
outputs.append(step)
assert isinstance(outputs[-1], dict)
assert outputs[-1]['output'] == 'curses foiled again'
assert isinstance(outputs[-1][RUN_KEY].run_id, UUID)
assert handler1.chain_starts == handler1.chain_ends == 3
assert handler1.llm_starts == handler1.llm_ends == 2
assert handler1.tool_starts == 1
assert handler1.tool_ends == 1
assert handler1.starts == 7
assert handler1.ends == 7
print('h:', handler1)
assert handler1.errors == 0
assert handler1.text == 2
assert handler2.llm_starts == 2
assert handler2.llm_ends == 2
assert handler2.chain_starts == handler2.tool_starts == handler2.tool_ends == handler2.chain_ends == 0 | Test react chain iterator with callbacks by setting verbose globally. |
create_tagging_chain | """Creates a chain that extracts information from a passage
based on a schema.
Args:
schema: The schema of the entities to extract.
llm: The language model to use.
Returns:
Chain (LLMChain) that can be used to extract information from a passage.
"""
function = _get_tagging_function(schema)
prompt = prompt or ChatPromptTemplate.from_template(_TAGGING_TEMPLATE)
output_parser = JsonOutputFunctionsParser()
llm_kwargs = get_llm_kwargs(function)
chain = LLMChain(llm=llm, prompt=prompt, llm_kwargs=llm_kwargs,
output_parser=output_parser, **kwargs)
return chain | def create_tagging_chain(schema: dict, llm: BaseLanguageModel, prompt:
Optional[ChatPromptTemplate]=None, **kwargs: Any) ->Chain:
"""Creates a chain that extracts information from a passage
based on a schema.
Args:
schema: The schema of the entities to extract.
llm: The language model to use.
Returns:
Chain (LLMChain) that can be used to extract information from a passage.
"""
function = _get_tagging_function(schema)
prompt = prompt or ChatPromptTemplate.from_template(_TAGGING_TEMPLATE)
output_parser = JsonOutputFunctionsParser()
llm_kwargs = get_llm_kwargs(function)
chain = LLMChain(llm=llm, prompt=prompt, llm_kwargs=llm_kwargs,
output_parser=output_parser, **kwargs)
return chain | Creates a chain that extracts information from a passage
based on a schema.
Args:
schema: The schema of the entities to extract.
llm: The language model to use.
Returns:
Chain (LLMChain) that can be used to extract information from a passage. |
_create_retry_decorator | import cohere
min_seconds = 4
max_seconds = 10
return retry(reraise=True, stop=stop_after_attempt(llm.max_retries), wait=
wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry
=retry_if_exception_type(cohere.error.CohereError), before_sleep=
before_sleep_log(logger, logging.WARNING)) | def _create_retry_decorator(llm: Cohere) ->Callable[[Any], Any]:
import cohere
min_seconds = 4
max_seconds = 10
return retry(reraise=True, stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=
max_seconds), retry=retry_if_exception_type(cohere.error.
CohereError), before_sleep=before_sleep_log(logger, logging.WARNING)) | null |
__init__ | self.embedding = embedding
self.index_name = index_name
self.query_field = kwargs.get('query_field', 'text')
self.vector_query_field = kwargs.get('vector_query_field', 'vector')
self.space_type = kwargs.get('space_type', 'cosine')
self.index_type = kwargs.get('index_type', 'linear')
self.index_params = kwargs.get('index_params') or {}
if bes_url is not None:
self.client = BESVectorStore.bes_client(bes_url=bes_url, username=user,
password=password)
else:
raise ValueError('Please specified a bes connection url.') | def __init__(self, index_name: str, bes_url: str, user: Optional[str]=None,
password: Optional[str]=None, embedding: Optional[Embeddings]=None, **
kwargs: Optional[dict]) ->None:
self.embedding = embedding
self.index_name = index_name
self.query_field = kwargs.get('query_field', 'text')
self.vector_query_field = kwargs.get('vector_query_field', 'vector')
self.space_type = kwargs.get('space_type', 'cosine')
self.index_type = kwargs.get('index_type', 'linear')
self.index_params = kwargs.get('index_params') or {}
if bes_url is not None:
self.client = BESVectorStore.bes_client(bes_url=bes_url, username=
user, password=password)
else:
raise ValueError('Please specified a bes connection url.') | null |
buffer_as_str | """Exposes the buffer as a string in case return_messages is False."""
return get_buffer_string(self.chat_memory.messages, human_prefix=self.
human_prefix, ai_prefix=self.ai_prefix) | @property
def buffer_as_str(self) ->str:
"""Exposes the buffer as a string in case return_messages is False."""
return get_buffer_string(self.chat_memory.messages, human_prefix=self.
human_prefix, ai_prefix=self.ai_prefix) | Exposes the buffer as a string in case return_messages is False. |
__post_init__ | """Initialize the store."""
self.create_vector_extension()
EmbeddingStore, CollectionStore = _get_embedding_collection_store()
self.CollectionStore = CollectionStore
self.EmbeddingStore = EmbeddingStore
self.create_tables_if_not_exists()
self.create_collection() | def __post_init__(self) ->None:
"""Initialize the store."""
self.create_vector_extension()
EmbeddingStore, CollectionStore = _get_embedding_collection_store()
self.CollectionStore = CollectionStore
self.EmbeddingStore = EmbeddingStore
self.create_tables_if_not_exists()
self.create_collection() | Initialize the store. |
from_llm | """Initialize from llm using default template.
The prompt used here expects a single input: `question`
Args:
retriever: retriever to query documents from
llm: llm for query generation using DEFAULT_QUERY_PROMPT
prompt: prompt template for query generation
Returns:
RePhraseQueryRetriever
"""
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(retriever=retriever, llm_chain=llm_chain) | @classmethod
def from_llm(cls, retriever: BaseRetriever, llm: BaseLLM, prompt:
PromptTemplate=DEFAULT_QUERY_PROMPT) ->'RePhraseQueryRetriever':
"""Initialize from llm using default template.
The prompt used here expects a single input: `question`
Args:
retriever: retriever to query documents from
llm: llm for query generation using DEFAULT_QUERY_PROMPT
prompt: prompt template for query generation
Returns:
RePhraseQueryRetriever
"""
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(retriever=retriever, llm_chain=llm_chain) | Initialize from llm using default template.
The prompt used here expects a single input: `question`
Args:
retriever: retriever to query documents from
llm: llm for query generation using DEFAULT_QUERY_PROMPT
prompt: prompt template for query generation
Returns:
RePhraseQueryRetriever |
test_confident_deepeval | """Test valid call to Beam."""
from deepeval.metrics.answer_relevancy import AnswerRelevancy
from langchain_community.callbacks.confident_callback import DeepEvalCallbackHandler
from langchain_community.llms import OpenAI
answer_relevancy = AnswerRelevancy(minimum_score=0.3)
deepeval_callback = DeepEvalCallbackHandler(implementation_name=
'exampleImplementation', metrics=[answer_relevancy])
llm = OpenAI(temperature=0, callbacks=[deepeval_callback], verbose=True,
openai_api_key='<YOUR_API_KEY>')
llm.generate(['What is the best evaluation tool out there? (no bias at all)'])
assert answer_relevancy.is_successful(), 'Answer not relevant' | def test_confident_deepeval() ->None:
"""Test valid call to Beam."""
from deepeval.metrics.answer_relevancy import AnswerRelevancy
from langchain_community.callbacks.confident_callback import DeepEvalCallbackHandler
from langchain_community.llms import OpenAI
answer_relevancy = AnswerRelevancy(minimum_score=0.3)
deepeval_callback = DeepEvalCallbackHandler(implementation_name=
'exampleImplementation', metrics=[answer_relevancy])
llm = OpenAI(temperature=0, callbacks=[deepeval_callback], verbose=True,
openai_api_key='<YOUR_API_KEY>')
llm.generate([
'What is the best evaluation tool out there? (no bias at all)'])
assert answer_relevancy.is_successful(), 'Answer not relevant' | Test valid call to Beam. |
__init__ | super().__init__(function=print, **kwargs) | def __init__(self, **kwargs: Any) ->None:
super().__init__(function=print, **kwargs) | null |
_chat_with_retry | try:
return generation_method(**kwargs)
except InvalidArgument as e:
raise ChatGoogleGenerativeAIError(
f'Invalid argument provided to Gemini: {e}') from e
except Exception as e:
raise e | @retry_decorator
def _chat_with_retry(**kwargs: Any) ->Any:
try:
return generation_method(**kwargs)
except InvalidArgument as e:
raise ChatGoogleGenerativeAIError(
f'Invalid argument provided to Gemini: {e}') from e
except Exception as e:
raise e | null |
_coerce_set_value | if not isinstance(value, Runnable) and not callable(value):
return coerce_to_runnable(lambda _: value)
return coerce_to_runnable(value) | def _coerce_set_value(value: SetValue) ->Runnable[Input, Output]:
if not isinstance(value, Runnable) and not callable(value):
return coerce_to_runnable(lambda _: value)
return coerce_to_runnable(value) | null |
from_llm | if llm is not None:
base_parser = base_parser or StructuredChatOutputParser()
output_fixing_parser: OutputFixingParser = OutputFixingParser.from_llm(llm
=llm, parser=base_parser)
return cls(output_fixing_parser=output_fixing_parser)
elif base_parser is not None:
return cls(base_parser=base_parser)
else:
return cls() | @classmethod
def from_llm(cls, llm: Optional[BaseLanguageModel]=None, base_parser:
Optional[StructuredChatOutputParser]=None
) ->StructuredChatOutputParserWithRetries:
if llm is not None:
base_parser = base_parser or StructuredChatOutputParser()
output_fixing_parser: OutputFixingParser = OutputFixingParser.from_llm(
llm=llm, parser=base_parser)
return cls(output_fixing_parser=output_fixing_parser)
elif base_parser is not None:
return cls(base_parser=base_parser)
else:
return cls() | null |
from_db_credentials | """Convenience constructor that builds Arango DB from credentials.
Args:
url: Arango DB url. Can be passed in as named arg or set as environment
var ``ARANGODB_URL``. Defaults to "http://localhost:8529".
dbname: Arango DB name. Can be passed in as named arg or set as
environment var ``ARANGODB_DBNAME``. Defaults to "_system".
username: Can be passed in as named arg or set as environment var
``ARANGODB_USERNAME``. Defaults to "root".
password: Can be passed ni as named arg or set as environment var
``ARANGODB_PASSWORD``. Defaults to "".
Returns:
An arango.database.StandardDatabase.
"""
db = get_arangodb_client(url=url, dbname=dbname, username=username,
password=password)
return cls(db) | @classmethod
def from_db_credentials(cls, url: Optional[str]=None, dbname: Optional[str]
=None, username: Optional[str]=None, password: Optional[str]=None) ->Any:
"""Convenience constructor that builds Arango DB from credentials.
Args:
url: Arango DB url. Can be passed in as named arg or set as environment
var ``ARANGODB_URL``. Defaults to "http://localhost:8529".
dbname: Arango DB name. Can be passed in as named arg or set as
environment var ``ARANGODB_DBNAME``. Defaults to "_system".
username: Can be passed in as named arg or set as environment var
``ARANGODB_USERNAME``. Defaults to "root".
password: Can be passed ni as named arg or set as environment var
``ARANGODB_PASSWORD``. Defaults to "".
Returns:
An arango.database.StandardDatabase.
"""
db = get_arangodb_client(url=url, dbname=dbname, username=username,
password=password)
return cls(db) | Convenience constructor that builds Arango DB from credentials.
Args:
url: Arango DB url. Can be passed in as named arg or set as environment
var ``ARANGODB_URL``. Defaults to "http://localhost:8529".
dbname: Arango DB name. Can be passed in as named arg or set as
environment var ``ARANGODB_DBNAME``. Defaults to "_system".
username: Can be passed in as named arg or set as environment var
``ARANGODB_USERNAME``. Defaults to "root".
password: Can be passed ni as named arg or set as environment var
``ARANGODB_PASSWORD``. Defaults to "".
Returns:
An arango.database.StandardDatabase. |
_run | """Use the tool."""
return str(self.wrapper.results(query, self.num_results, **self.kwargs)) | def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the tool."""
return str(self.wrapper.results(query, self.num_results, **self.kwargs)) | Use the tool. |
convert_pydantic_to_openai_function | """Converts a Pydantic model to a function description for the OpenAI API."""
schema = dereference_refs(model.schema())
schema.pop('definitions', None)
return {'name': name or schema['title'], 'description': description or
schema['description'], 'parameters': schema} | def convert_pydantic_to_openai_function(model: Type[BaseModel], *, name:
Optional[str]=None, description: Optional[str]=None) ->FunctionDescription:
"""Converts a Pydantic model to a function description for the OpenAI API."""
schema = dereference_refs(model.schema())
schema.pop('definitions', None)
return {'name': name or schema['title'], 'description': description or
schema['description'], 'parameters': schema} | Converts a Pydantic model to a function description for the OpenAI API. |
_chunk | for i in range(0, len(texts), size):
yield texts[i:i + size] | def _chunk(texts: List[str], size: int) ->Iterator[List[str]]:
for i in range(0, len(texts), size):
yield texts[i:i + size] | null |
__init__ | """
Initialize with client_id, client_secret, user_agent, search_queries, mode,
categories, number_posts.
Example: https://www.reddit.com/r/learnpython/
Args:
client_id: Reddit client id.
client_secret: Reddit client secret.
user_agent: Reddit user agent.
search_queries: The search queries.
mode: The mode.
categories: The categories. Default: ["new"]
number_posts: The number of posts. Default: 10
"""
self.client_id = client_id
self.client_secret = client_secret
self.user_agent = user_agent
self.search_queries = search_queries
self.mode = mode
self.categories = categories
self.number_posts = number_posts | def __init__(self, client_id: str, client_secret: str, user_agent: str,
search_queries: Sequence[str], mode: str, categories: Sequence[str]=[
'new'], number_posts: Optional[int]=10):
"""
Initialize with client_id, client_secret, user_agent, search_queries, mode,
categories, number_posts.
Example: https://www.reddit.com/r/learnpython/
Args:
client_id: Reddit client id.
client_secret: Reddit client secret.
user_agent: Reddit user agent.
search_queries: The search queries.
mode: The mode.
categories: The categories. Default: ["new"]
number_posts: The number of posts. Default: 10
"""
self.client_id = client_id
self.client_secret = client_secret
self.user_agent = user_agent
self.search_queries = search_queries
self.mode = mode
self.categories = categories
self.number_posts = number_posts | Initialize with client_id, client_secret, user_agent, search_queries, mode,
categories, number_posts.
Example: https://www.reddit.com/r/learnpython/
Args:
client_id: Reddit client id.
client_secret: Reddit client secret.
user_agent: Reddit user agent.
search_queries: The search queries.
mode: The mode.
categories: The categories. Default: ["new"]
number_posts: The number of posts. Default: 10 |
test_func_call_oldstyle | parser = OpenAIFunctionsAgentOutputParser()
msg = AIMessage(content='LLM thoughts.', additional_kwargs={'function_call':
{'name': 'foo', 'arguments': '{"__arg1": "42"}'}})
result = parser.invoke(msg)
assert isinstance(result, AgentActionMessageLog)
assert result.tool == 'foo'
assert result.tool_input == '42'
assert result.log == """
Invoking: `foo` with `42`
responded: LLM thoughts.
"""
assert result.message_log == [msg] | def test_func_call_oldstyle() ->None:
parser = OpenAIFunctionsAgentOutputParser()
msg = AIMessage(content='LLM thoughts.', additional_kwargs={
'function_call': {'name': 'foo', 'arguments': '{"__arg1": "42"}'}})
result = parser.invoke(msg)
assert isinstance(result, AgentActionMessageLog)
assert result.tool == 'foo'
assert result.tool_input == '42'
assert result.log == '\nInvoking: `foo` with `42`\nresponded: LLM thoughts.\n\n'
assert result.message_log == [msg] | null |
_combine_documents | doc_strings = [format_document(doc, document_prompt) for doc in docs]
return document_separator.join(doc_strings) | def _combine_documents(docs: List[Document], document_prompt:
PromptTemplate=DEFAULT_DOCUMENT_PROMPT, document_separator: str='\n\n'):
doc_strings = [format_document(doc, document_prompt) for doc in docs]
return document_separator.join(doc_strings) | null |
test_messages_to_prompt_dict_raises_with_example_after_real | pytest.importorskip('google.generativeai')
with pytest.raises(ChatGooglePalmError) as e:
_messages_to_prompt_dict([HumanMessage(example=False, content=
'Real message'), HumanMessage(example=True, content=
'Human example #1'), AIMessage(example=True, content='AI example #1')])
assert 'Message examples must come before other' in str(e) | def test_messages_to_prompt_dict_raises_with_example_after_real() ->None:
pytest.importorskip('google.generativeai')
with pytest.raises(ChatGooglePalmError) as e:
_messages_to_prompt_dict([HumanMessage(example=False, content=
'Real message'), HumanMessage(example=True, content=
'Human example #1'), AIMessage(example=True, content=
'AI example #1')])
assert 'Message examples must come before other' in str(e) | null |
test_custom | loader = ReadTheDocsLoader(PARENT_DIR / 'custom', custom_html_tag=(
'article', {'role': 'main'}))
documents = loader.load()
assert len(documents[0].page_content) != 0 | @pytest.mark.requires('bs4')
def test_custom() ->None:
loader = ReadTheDocsLoader(PARENT_DIR / 'custom', custom_html_tag=(
'article', {'role': 'main'}))
documents = loader.load()
assert len(documents[0].page_content) != 0 | null |
setup | aim = import_aim()
if not self._run:
if self._run_hash:
self._run = aim.Run(self._run_hash, repo=self.repo,
system_tracking_interval=self.system_tracking_interval)
else:
self._run = aim.Run(repo=self.repo, experiment=self.experiment_name,
system_tracking_interval=self.system_tracking_interval,
log_system_params=self.log_system_params)
self._run_hash = self._run.hash
if kwargs:
for key, value in kwargs.items():
self._run.set(key, value, strict=False) | def setup(self, **kwargs: Any) ->None:
aim = import_aim()
if not self._run:
if self._run_hash:
self._run = aim.Run(self._run_hash, repo=self.repo,
system_tracking_interval=self.system_tracking_interval)
else:
self._run = aim.Run(repo=self.repo, experiment=self.
experiment_name, system_tracking_interval=self.
system_tracking_interval, log_system_params=self.
log_system_params)
self._run_hash = self._run.hash
if kwargs:
for key, value in kwargs.items():
self._run.set(key, value, strict=False) | null |
on_llm_new_token | """Do nothing when a new token is generated."""
pass | def on_llm_new_token(self, token: str, **kwargs: Any) ->None:
"""Do nothing when a new token is generated."""
pass | Do nothing when a new token is generated. |
from_llm | """Convenience constructor."""
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(llm_chain=llm_chain, **kwargs) | @classmethod
def from_llm(cls, llm: BaseLanguageModel, prompt: BasePromptTemplate, **
kwargs: Any) ->LLMRouterChain:
"""Convenience constructor."""
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(llm_chain=llm_chain, **kwargs) | Convenience constructor. |
_generate | response = self.responses[self.i]
if self.i < len(self.responses) - 1:
self.i += 1
else:
self.i = 0
generation = ChatGeneration(message=response)
return ChatResult(generations=[generation]) | def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->ChatResult:
response = self.responses[self.i]
if self.i < len(self.responses) - 1:
self.i += 1
else:
self.i = 0
generation = ChatGeneration(message=response)
return ChatResult(generations=[generation]) | null |
format_document | """Format a document into a string based on a prompt template.
First, this pulls information from the document from two sources:
1. `page_content`:
This takes the information from the `document.page_content`
and assigns it to a variable named `page_content`.
2. metadata:
This takes information from `document.metadata` and assigns
it to variables of the same name.
Those variables are then passed into the `prompt` to produce a formatted string.
Args:
doc: Document, the page_content and metadata will be used to create
the final string.
prompt: BasePromptTemplate, will be used to format the page_content
and metadata into the final string.
Returns:
string of the document formatted.
Example:
.. code-block:: python
from langchain_core import Document
from langchain_core.prompts import PromptTemplate
doc = Document(page_content="This is a joke", metadata={"page": "1"})
prompt = PromptTemplate.from_template("Page {page}: {page_content}")
format_document(doc, prompt)
>>> "Page 1: This is a joke"
"""
base_info = {'page_content': doc.page_content, **doc.metadata}
missing_metadata = set(prompt.input_variables).difference(base_info)
if len(missing_metadata) > 0:
required_metadata = [iv for iv in prompt.input_variables if iv !=
'page_content']
raise ValueError(
f'Document prompt requires documents to have metadata variables: {required_metadata}. Received document with missing metadata: {list(missing_metadata)}.'
)
document_info = {k: base_info[k] for k in prompt.input_variables}
return prompt.format(**document_info) | def format_document(doc: Document, prompt: BasePromptTemplate) ->str:
"""Format a document into a string based on a prompt template.
First, this pulls information from the document from two sources:
1. `page_content`:
This takes the information from the `document.page_content`
and assigns it to a variable named `page_content`.
2. metadata:
This takes information from `document.metadata` and assigns
it to variables of the same name.
Those variables are then passed into the `prompt` to produce a formatted string.
Args:
doc: Document, the page_content and metadata will be used to create
the final string.
prompt: BasePromptTemplate, will be used to format the page_content
and metadata into the final string.
Returns:
string of the document formatted.
Example:
.. code-block:: python
from langchain_core import Document
from langchain_core.prompts import PromptTemplate
doc = Document(page_content="This is a joke", metadata={"page": "1"})
prompt = PromptTemplate.from_template("Page {page}: {page_content}")
format_document(doc, prompt)
>>> "Page 1: This is a joke"
"""
base_info = {'page_content': doc.page_content, **doc.metadata}
missing_metadata = set(prompt.input_variables).difference(base_info)
if len(missing_metadata) > 0:
required_metadata = [iv for iv in prompt.input_variables if iv !=
'page_content']
raise ValueError(
f'Document prompt requires documents to have metadata variables: {required_metadata}. Received document with missing metadata: {list(missing_metadata)}.'
)
document_info = {k: base_info[k] for k in prompt.input_variables}
return prompt.format(**document_info) | Format a document into a string based on a prompt template.
First, this pulls information from the document from two sources:
1. `page_content`:
This takes the information from the `document.page_content`
and assigns it to a variable named `page_content`.
2. metadata:
This takes information from `document.metadata` and assigns
it to variables of the same name.
Those variables are then passed into the `prompt` to produce a formatted string.
Args:
doc: Document, the page_content and metadata will be used to create
the final string.
prompt: BasePromptTemplate, will be used to format the page_content
and metadata into the final string.
Returns:
string of the document formatted.
Example:
.. code-block:: python
from langchain_core import Document
from langchain_core.prompts import PromptTemplate
doc = Document(page_content="This is a joke", metadata={"page": "1"})
prompt = PromptTemplate.from_template("Page {page}: {page_content}")
format_document(doc, prompt)
>>> "Page 1: This is a joke" |
map_to_database | result = ''
for entity in entities.names:
response = graph.query(
"CALL db.index.fulltext.queryNodes('entity', $entity + '*', {limit:1}) YIELD node,score RETURN node.name AS result"
, {'entity': entity})
try:
result += f"{entity} maps to {response[0]['result']} in database\n"
except IndexError:
pass
return result | def map_to_database(entities: Entities) ->Optional[str]:
result = ''
for entity in entities.names:
response = graph.query(
"CALL db.index.fulltext.queryNodes('entity', $entity + '*', {limit:1}) YIELD node,score RETURN node.name AS result"
, {'entity': entity})
try:
result += f"{entity} maps to {response[0]['result']} in database\n"
except IndexError:
pass
return result | null |
json_validity_evaluator | return JsonValidityEvaluator() | @pytest.fixture
def json_validity_evaluator() ->JsonValidityEvaluator:
return JsonValidityEvaluator() | null |
_similarity_search_with_relevance_scores | return self.similarity_search_with_score(query, k, **kwargs) | def _similarity_search_with_relevance_scores(self, query: str, k: int=4, **
kwargs: Any) ->List[Tuple[Document, float]]:
return self.similarity_search_with_score(query, k, **kwargs) | null |
_run | """Use the tool."""
return self.api_wrapper.run(query) | def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the tool."""
return self.api_wrapper.run(query) | Use the tool. |
detect_node_variables | """
Args:
query: cypher query
"""
nodes = re.findall(self.node_pattern, query)
nodes = [self.clean_node(node) for node in nodes]
res: Dict[str, Any] = {}
for node in nodes:
parts = node.split(':')
if parts == '':
continue
variable = parts[0]
if variable not in res:
res[variable] = []
res[variable] += parts[1:]
return res | def detect_node_variables(self, query: str) ->Dict[str, List[str]]:
"""
Args:
query: cypher query
"""
nodes = re.findall(self.node_pattern, query)
nodes = [self.clean_node(node) for node in nodes]
res: Dict[str, Any] = {}
for node in nodes:
parts = node.split(':')
if parts == '':
continue
variable = parts[0]
if variable not in res:
res[variable] = []
res[variable] += parts[1:]
return res | Args:
query: cypher query |
__init__ | try:
import redis
except ImportError:
raise ImportError(
'Could not import redis python package. Please install it with `pip install redis`.'
)
try:
self.redis_client = get_client(redis_url=url)
except redis.exceptions.ConnectionError as error:
logger.error(error)
self.session_id = session_id
self.key_prefix = key_prefix
self.ttl = ttl | def __init__(self, session_id: str, url: str='redis://localhost:6379/0',
key_prefix: str='message_store:', ttl: Optional[int]=None):
try:
import redis
except ImportError:
raise ImportError(
'Could not import redis python package. Please install it with `pip install redis`.'
)
try:
self.redis_client = get_client(redis_url=url)
except redis.exceptions.ConnectionError as error:
logger.error(error)
self.session_id = session_id
self.key_prefix = key_prefix
self.ttl = ttl | null |
_create_retry_decorator | import openai
min_seconds = 1
max_seconds = 60
return retry(reraise=True, stop=stop_after_attempt(llm.max_retries), wait=
wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry
=retry_if_exception_type(openai.error.Timeout) |
retry_if_exception_type(openai.error.APIError) |
retry_if_exception_type(openai.error.APIConnectionError) |
retry_if_exception_type(openai.error.RateLimitError) |
retry_if_exception_type(openai.error.ServiceUnavailableError),
before_sleep=before_sleep_log(logger, logging.WARNING)) | def _create_retry_decorator(llm: JinaChat) ->Callable[[Any], Any]:
import openai
min_seconds = 1
max_seconds = 60
return retry(reraise=True, stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=
max_seconds), retry=retry_if_exception_type(openai.error.Timeout) |
retry_if_exception_type(openai.error.APIError) |
retry_if_exception_type(openai.error.APIConnectionError) |
retry_if_exception_type(openai.error.RateLimitError) |
retry_if_exception_type(openai.error.ServiceUnavailableError),
before_sleep=before_sleep_log(logger, logging.WARNING)) | null |
combined_exact_fuzzy_matching_strategy | """
RECOMMENDED STRATEGY.
Combined exact and fuzzy matching strategy for deanonymization.
Args:
text: text to deanonymize
deanonymizer_mapping: mapping between anonymized entities and original ones
max_l_dist: maximum Levenshtein distance between the anonymized entity and the
text segment to consider it a match
Examples of matching:
Kaenu Reves -> Keanu Reeves
John F. Kennedy -> John Kennedy
"""
text = exact_matching_strategy(text, deanonymizer_mapping)
text = fuzzy_matching_strategy(text, deanonymizer_mapping, max_l_dist)
return text | def combined_exact_fuzzy_matching_strategy(text: str, deanonymizer_mapping:
MappingDataType, max_l_dist: int=3) ->str:
"""
RECOMMENDED STRATEGY.
Combined exact and fuzzy matching strategy for deanonymization.
Args:
text: text to deanonymize
deanonymizer_mapping: mapping between anonymized entities and original ones
max_l_dist: maximum Levenshtein distance between the anonymized entity and the
text segment to consider it a match
Examples of matching:
Kaenu Reves -> Keanu Reeves
John F. Kennedy -> John Kennedy
"""
text = exact_matching_strategy(text, deanonymizer_mapping)
text = fuzzy_matching_strategy(text, deanonymizer_mapping, max_l_dist)
return text | RECOMMENDED STRATEGY.
Combined exact and fuzzy matching strategy for deanonymization.
Args:
text: text to deanonymize
deanonymizer_mapping: mapping between anonymized entities and original ones
max_l_dist: maximum Levenshtein distance between the anonymized entity and the
text segment to consider it a match
Examples of matching:
Kaenu Reves -> Keanu Reeves
John F. Kennedy -> John Kennedy |
from_texts | """Construct LLMRails wrapper from raw documents.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import LLMRails
llm_rails = LLMRails.from_texts(
texts,
datastore_id=datastore_id,
api_key=llm_rails_api_key
)
"""
llm_rails = cls(**kwargs)
llm_rails.add_texts(texts)
return llm_rails | @classmethod
def from_texts(cls, texts: List[str], embedding: Optional[Embeddings]=None,
metadatas: Optional[List[dict]]=None, **kwargs: Any) ->LLMRails:
"""Construct LLMRails wrapper from raw documents.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import LLMRails
llm_rails = LLMRails.from_texts(
texts,
datastore_id=datastore_id,
api_key=llm_rails_api_key
)
"""
llm_rails = cls(**kwargs)
llm_rails.add_texts(texts)
return llm_rails | Construct LLMRails wrapper from raw documents.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import LLMRails
llm_rails = LLMRails.from_texts(
texts,
datastore_id=datastore_id,
api_key=llm_rails_api_key
) |
_parse_output | if not raw_output:
return None
if not isinstance(raw_output, dict):
return _serialize(raw_output)
text_value = raw_output.get('text')
output_value = raw_output.get('output')
output_text_value = raw_output.get('output_text')
answer_value = raw_output.get('answer')
result_value = raw_output.get('result')
if text_value:
return text_value
if answer_value:
return answer_value
if output_value:
return output_value
if output_text_value:
return output_text_value
if result_value:
return result_value
return _serialize(raw_output) | def _parse_output(raw_output: dict) ->Any:
if not raw_output:
return None
if not isinstance(raw_output, dict):
return _serialize(raw_output)
text_value = raw_output.get('text')
output_value = raw_output.get('output')
output_text_value = raw_output.get('output_text')
answer_value = raw_output.get('answer')
result_value = raw_output.get('result')
if text_value:
return text_value
if answer_value:
return answer_value
if output_value:
return output_value
if output_text_value:
return output_text_value
if result_value:
return result_value
return _serialize(raw_output) | null |
_stream | params = self._convert_prompt_msg_params(prompt, **{**kwargs, 'stream': True})
for res in self.client.do(**params):
if res:
chunk = GenerationChunk(text=res['result'])
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text) | def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[
GenerationChunk]:
params = self._convert_prompt_msg_params(prompt, **{**kwargs, 'stream':
True})
for res in self.client.do(**params):
if res:
chunk = GenerationChunk(text=res['result'])
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text) | null |
similarity_search | """Run similarity search with Hologres with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding_function.embed_query(text=query)
return self.similarity_search_by_vector(embedding=embedding, k=k, filter=filter
) | def similarity_search(self, query: str, k: int=4, filter: Optional[dict]=
None, **kwargs: Any) ->List[Document]:
"""Run similarity search with Hologres with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding_function.embed_query(text=query)
return self.similarity_search_by_vector(embedding=embedding, k=k,
filter=filter) | Run similarity search with Hologres with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query. |
test_find_all_links_ignore_suffix | html = 'href="foobar{suffix}"'
for suffix in SUFFIXES_TO_IGNORE:
actual = find_all_links(html.format(suffix=suffix))
assert actual == []
html = 'href="foobar{suffix}more"'
for suffix in SUFFIXES_TO_IGNORE:
actual = find_all_links(html.format(suffix=suffix))
assert actual == [f'foobar{suffix}more'] | def test_find_all_links_ignore_suffix() ->None:
html = 'href="foobar{suffix}"'
for suffix in SUFFIXES_TO_IGNORE:
actual = find_all_links(html.format(suffix=suffix))
assert actual == []
html = 'href="foobar{suffix}more"'
for suffix in SUFFIXES_TO_IGNORE:
actual = find_all_links(html.format(suffix=suffix))
assert actual == [f'foobar{suffix}more'] | null |
_request_bodies_strict | """Get the request body or err."""
request_bodies = self._components_strict.requestBodies
if request_bodies is None:
raise ValueError('No request body found in spec. ')
return request_bodies | @property
def _request_bodies_strict(self) ->Dict[str, Union[RequestBody, Reference]]:
"""Get the request body or err."""
request_bodies = self._components_strict.requestBodies
if request_bodies is None:
raise ValueError('No request body found in spec. ')
return request_bodies | Get the request body or err. |
add | """Add texts to in memory dictionary.
Args:
texts: dictionary of id -> document.
Returns:
None
"""
overlapping = set(texts).intersection(self._dict)
if overlapping:
raise ValueError(f'Tried to add ids that already exist: {overlapping}')
self._dict = {**self._dict, **texts} | def add(self, texts: Dict[str, Document]) ->None:
"""Add texts to in memory dictionary.
Args:
texts: dictionary of id -> document.
Returns:
None
"""
overlapping = set(texts).intersection(self._dict)
if overlapping:
raise ValueError(f'Tried to add ids that already exist: {overlapping}')
self._dict = {**self._dict, **texts} | Add texts to in memory dictionary.
Args:
texts: dictionary of id -> document.
Returns:
None |
add_texts | """Turn texts into embedding and add it to the database
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
Returns:
List of ids of the added texts.
"""
docs = []
ids = ids or [str(uuid.uuid4()) for _ in texts]
embeddings = self._embedding.embed_documents(list(texts))
for idx, text in enumerate(texts):
embedding = embeddings[idx]
metadata = metadatas[idx] if metadatas else {}
docs.append({self._vector_key: embedding, self._id_key: ids[idx], self.
_text_key: text, **metadata})
self._connection.add(docs)
return ids | def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, ids: Optional[List[str]]=None, **kwargs: Any) ->List[str]:
"""Turn texts into embedding and add it to the database
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
Returns:
List of ids of the added texts.
"""
docs = []
ids = ids or [str(uuid.uuid4()) for _ in texts]
embeddings = self._embedding.embed_documents(list(texts))
for idx, text in enumerate(texts):
embedding = embeddings[idx]
metadata = metadatas[idx] if metadatas else {}
docs.append({self._vector_key: embedding, self._id_key: ids[idx],
self._text_key: text, **metadata})
self._connection.add(docs)
return ids | Turn texts into embedding and add it to the database
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
Returns:
List of ids of the added texts. |
__init__ | """Initialize by passing in the `init` GPTCache func
Args:
redis_url (str): URL to connect to Redis.
embedding (Embedding): Embedding provider for semantic encoding and search.
score_threshold (float, 0.2):
Example:
.. code-block:: python
from langchain_community.globals import set_llm_cache
from langchain_community.cache import RedisSemanticCache
from langchain_community.embeddings import OpenAIEmbeddings
set_llm_cache(RedisSemanticCache(
redis_url="redis://localhost:6379",
embedding=OpenAIEmbeddings()
))
"""
self._cache_dict: Dict[str, RedisVectorstore] = {}
self.redis_url = redis_url
self.embedding = embedding
self.score_threshold = score_threshold | def __init__(self, redis_url: str, embedding: Embeddings, score_threshold:
float=0.2):
"""Initialize by passing in the `init` GPTCache func
Args:
redis_url (str): URL to connect to Redis.
embedding (Embedding): Embedding provider for semantic encoding and search.
score_threshold (float, 0.2):
Example:
.. code-block:: python
from langchain_community.globals import set_llm_cache
from langchain_community.cache import RedisSemanticCache
from langchain_community.embeddings import OpenAIEmbeddings
set_llm_cache(RedisSemanticCache(
redis_url="redis://localhost:6379",
embedding=OpenAIEmbeddings()
))
"""
self._cache_dict: Dict[str, RedisVectorstore] = {}
self.redis_url = redis_url
self.embedding = embedding
self.score_threshold = score_threshold | Initialize by passing in the `init` GPTCache func
Args:
redis_url (str): URL to connect to Redis.
embedding (Embedding): Embedding provider for semantic encoding and search.
score_threshold (float, 0.2):
Example:
.. code-block:: python
from langchain_community.globals import set_llm_cache
from langchain_community.cache import RedisSemanticCache
from langchain_community.embeddings import OpenAIEmbeddings
set_llm_cache(RedisSemanticCache(
redis_url="redis://localhost:6379",
embedding=OpenAIEmbeddings()
)) |
max_marginal_relevance_search | """Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch before filtering (if needed) to
pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self.embedding_function(query)
docs = self.max_marginal_relevance_search_by_vector(embedding, k=k, fetch_k
=fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs)
return docs | def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int=
20, lambda_mult: float=0.5, filter: Optional[Dict[str, Any]]=None, **
kwargs: Any) ->List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch before filtering (if needed) to
pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self.embedding_function(query)
docs = self.max_marginal_relevance_search_by_vector(embedding, k=k,
fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs)
return docs | Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch before filtering (if needed) to
pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance. |
_import_plugin | from langchain_community.tools.plugin import AIPluginTool
return AIPluginTool | def _import_plugin() ->Any:
from langchain_community.tools.plugin import AIPluginTool
return AIPluginTool | null |
test_chat_google_palm | """Test Google PaLM Chat API wrapper."""
chat = ChatGooglePalm()
message = HumanMessage(content='Hello')
response = chat([message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str) | def test_chat_google_palm() ->None:
"""Test Google PaLM Chat API wrapper."""
chat = ChatGooglePalm()
message = HumanMessage(content='Hello')
response = chat([message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str) | Test Google PaLM Chat API wrapper. |
test_rockset_insert_and_search | """Test end to end vector search in Rockset"""
texts = ['foo', 'bar', 'baz']
metadatas = [{'metadata_index': i} for i in range(len(texts))]
ids = self.rockset_vectorstore.add_texts(texts=texts, metadatas=metadatas)
assert len(ids) == len(texts)
output = self.rockset_vectorstore.similarity_search(query='foo',
distance_func=Rockset.DistanceFunction.COSINE_SIM, k=1)
assert output == [Document(page_content='foo', metadata={'metadata_index': 0})]
output = self.rockset_vectorstore.similarity_search(query='foo',
distance_func=Rockset.DistanceFunction.COSINE_SIM, k=1, where_str=
'metadata_index != 0')
assert output == [Document(page_content='bar', metadata={'metadata_index': 1})] | def test_rockset_insert_and_search(self) ->None:
"""Test end to end vector search in Rockset"""
texts = ['foo', 'bar', 'baz']
metadatas = [{'metadata_index': i} for i in range(len(texts))]
ids = self.rockset_vectorstore.add_texts(texts=texts, metadatas=metadatas)
assert len(ids) == len(texts)
output = self.rockset_vectorstore.similarity_search(query='foo',
distance_func=Rockset.DistanceFunction.COSINE_SIM, k=1)
assert output == [Document(page_content='foo', metadata={
'metadata_index': 0})]
output = self.rockset_vectorstore.similarity_search(query='foo',
distance_func=Rockset.DistanceFunction.COSINE_SIM, k=1, where_str=
'metadata_index != 0')
assert output == [Document(page_content='bar', metadata={
'metadata_index': 1})] | Test end to end vector search in Rockset |
test__convert_message_to_dict_human | message = HumanMessage(content='foo')
result = _convert_message_to_dict(message)
expected_output = {'role': 'user', 'content': 'foo'}
assert result == expected_output | def test__convert_message_to_dict_human() ->None:
message = HumanMessage(content='foo')
result = _convert_message_to_dict(message)
expected_output = {'role': 'user', 'content': 'foo'}
assert result == expected_output | null |
__init__ | self.nodes: Dict[Tuple[Union[str, int], str], Any] = dict() | def __init__(self) ->None:
self.nodes: Dict[Tuple[Union[str, int], str], Any] = dict() | null |
similarity_search_by_vector | """Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(embedding=
embedding, k=k, filter=filter)
return [doc for doc, _ in docs_and_scores] | def similarity_search_by_vector(self, embedding: List[float], k: int=4,
filter: Optional[dict]=None, **kwargs: Any) ->List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(embedding
=embedding, k=k, filter=filter)
return [doc for doc, _ in docs_and_scores] | Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector. |
_extract_tokens_and_log_probs | """Extract tokens and log probs from response.""" | @abstractmethod
def _extract_tokens_and_log_probs(self, generations: List[Generation]) ->Tuple[
Sequence[str], Sequence[float]]:
"""Extract tokens and log probs from response.""" | Extract tokens and log probs from response. |
_dereference_refs_helper | if isinstance(obj, dict):
obj_out = {}
for k, v in obj.items():
if k in skip_keys:
obj_out[k] = v
elif k == '$ref':
ref = _retrieve_ref(v, full_schema)
return _dereference_refs_helper(ref, full_schema, skip_keys)
elif isinstance(v, (list, dict)):
obj_out[k] = _dereference_refs_helper(v, full_schema, skip_keys)
else:
obj_out[k] = v
return obj_out
elif isinstance(obj, list):
return [_dereference_refs_helper(el, full_schema, skip_keys) for el in obj]
else:
return obj | def _dereference_refs_helper(obj: Any, full_schema: dict, skip_keys:
Sequence[str]) ->Any:
if isinstance(obj, dict):
obj_out = {}
for k, v in obj.items():
if k in skip_keys:
obj_out[k] = v
elif k == '$ref':
ref = _retrieve_ref(v, full_schema)
return _dereference_refs_helper(ref, full_schema, skip_keys)
elif isinstance(v, (list, dict)):
obj_out[k] = _dereference_refs_helper(v, full_schema, skip_keys
)
else:
obj_out[k] = v
return obj_out
elif isinstance(obj, list):
return [_dereference_refs_helper(el, full_schema, skip_keys) for el in
obj]
else:
return obj | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.