method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
---|---|---|---|
__init__
|
super().__init__(**kwargs)
self.example_id = UUID(example_id) if isinstance(example_id, str
) else example_id
self.client = client or langchain_tracer.get_client()
self.evaluators = evaluators
if max_concurrency is None:
self.executor: Optional[ThreadPoolExecutor] = _get_executor()
elif max_concurrency > 0:
self.executor = ThreadPoolExecutor(max_workers=max_concurrency)
weakref.finalize(self, lambda : cast(ThreadPoolExecutor, self.executor)
.shutdown(wait=True))
else:
self.executor = None
self.futures: weakref.WeakSet[Future] = weakref.WeakSet()
self.skip_unfinished = skip_unfinished
self.project_name = project_name
self.logged_eval_results: Dict[Tuple[str, str], List[EvaluationResult]] = {}
self.lock = threading.Lock()
global _TRACERS
_TRACERS.add(self)
|
def __init__(self, evaluators: Sequence[langsmith.RunEvaluator], client:
Optional[langsmith.Client]=None, example_id: Optional[Union[UUID, str]]
=None, skip_unfinished: bool=True, project_name: Optional[str]=
'evaluators', max_concurrency: Optional[int]=None, **kwargs: Any) ->None:
super().__init__(**kwargs)
self.example_id = UUID(example_id) if isinstance(example_id, str
) else example_id
self.client = client or langchain_tracer.get_client()
self.evaluators = evaluators
if max_concurrency is None:
self.executor: Optional[ThreadPoolExecutor] = _get_executor()
elif max_concurrency > 0:
self.executor = ThreadPoolExecutor(max_workers=max_concurrency)
weakref.finalize(self, lambda : cast(ThreadPoolExecutor, self.
executor).shutdown(wait=True))
else:
self.executor = None
self.futures: weakref.WeakSet[Future] = weakref.WeakSet()
self.skip_unfinished = skip_unfinished
self.project_name = project_name
self.logged_eval_results: Dict[Tuple[str, str], List[EvaluationResult]] = {
}
self.lock = threading.Lock()
global _TRACERS
_TRACERS.add(self)
| null |
check_queries_required
|
if values.get('sequential_response') and not queries:
raise ValueError(
'queries is required when sequential_response is set to True')
return queries
|
@validator('queries', always=True)
def check_queries_required(cls, queries: Optional[Mapping], values: Mapping
[str, Any]) ->Optional[Mapping]:
if values.get('sequential_response') and not queries:
raise ValueError(
'queries is required when sequential_response is set to True')
return queries
| null |
embeddings
|
return self._embedding
|
@property
def embeddings(self) ->Optional[Embeddings]:
return self._embedding
| null |
__init__
|
"""Initialize with API token and the IDs for table and base"""
self.api_token = api_token
"""Airtable API token."""
self.table_id = table_id
"""Airtable table ID."""
self.base_id = base_id
"""Airtable base ID."""
|
def __init__(self, api_token: str, table_id: str, base_id: str):
"""Initialize with API token and the IDs for table and base"""
self.api_token = api_token
"""Airtable API token."""
self.table_id = table_id
"""Airtable table ID."""
self.base_id = base_id
"""Airtable base ID."""
|
Initialize with API token and the IDs for table and base
|
test_failed_request
|
"""Test that a failed request raises an error."""
path = 'chains/path/chain.json'
loader = Mock()
mocked_responses.get(urljoin(URL_BASE.format(ref=DEFAULT_REF), path),
status=500)
with pytest.raises(ValueError, match=re.compile('Could not find file at .*')):
try_load_from_hub(f'lc://{path}', loader, 'chains', {'json'})
loader.assert_not_called()
|
def test_failed_request(mocked_responses: responses.RequestsMock) ->None:
"""Test that a failed request raises an error."""
path = 'chains/path/chain.json'
loader = Mock()
mocked_responses.get(urljoin(URL_BASE.format(ref=DEFAULT_REF), path),
status=500)
with pytest.raises(ValueError, match=re.compile(
'Could not find file at .*')):
try_load_from_hub(f'lc://{path}', loader, 'chains', {'json'})
loader.assert_not_called()
|
Test that a failed request raises an error.
|
test_structured_tool_from_function_with_run_manager
|
"""Test args and schema of structured tool when using callbacks."""
def foo(bar: int, baz: str, callbacks: Optional[CallbackManagerForToolRun]=None
) ->str:
"""Docstring
Args:
bar: int
baz: str
"""
assert callbacks is not None
return str(bar) + baz
handler = FakeCallbackHandler()
structured_tool = StructuredTool.from_function(foo)
assert structured_tool.args == {'bar': {'title': 'Bar', 'type': 'integer'},
'baz': {'title': 'Baz', 'type': 'string'}}
assert structured_tool.args_schema.schema() == {'properties': {'bar': {
'title': 'Bar', 'type': 'integer'}, 'baz': {'title': 'Baz', 'type':
'string'}}, 'title': 'fooSchemaSchema', 'type': 'object', 'required': [
'bar', 'baz']}
assert structured_tool.run(tool_input={'bar': '10', 'baz': 'baz'},
run_manger=[handler]) == '10baz'
|
def test_structured_tool_from_function_with_run_manager() ->None:
"""Test args and schema of structured tool when using callbacks."""
def foo(bar: int, baz: str, callbacks: Optional[
CallbackManagerForToolRun]=None) ->str:
"""Docstring
Args:
bar: int
baz: str
"""
assert callbacks is not None
return str(bar) + baz
handler = FakeCallbackHandler()
structured_tool = StructuredTool.from_function(foo)
assert structured_tool.args == {'bar': {'title': 'Bar', 'type':
'integer'}, 'baz': {'title': 'Baz', 'type': 'string'}}
assert structured_tool.args_schema.schema() == {'properties': {'bar': {
'title': 'Bar', 'type': 'integer'}, 'baz': {'title': 'Baz', 'type':
'string'}}, 'title': 'fooSchemaSchema', 'type': 'object',
'required': ['bar', 'baz']}
assert structured_tool.run(tool_input={'bar': '10', 'baz': 'baz'},
run_manger=[handler]) == '10baz'
|
Test args and schema of structured tool when using callbacks.
|
messages
|
"""Return the messages that correspond to this observation."""
return _convert_agent_observation_to_messages(self.action, self.observation)
|
@property
def messages(self) ->Sequence[BaseMessage]:
"""Return the messages that correspond to this observation."""
return _convert_agent_observation_to_messages(self.action, self.observation
)
|
Return the messages that correspond to this observation.
|
test_model
|
"""Test model kwarg works."""
chat = ChatTongyi(model='qwen-plus')
response = chat(messages=[HumanMessage(content='Hello')])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
def test_model() ->None:
"""Test model kwarg works."""
chat = ChatTongyi(model='qwen-plus')
response = chat(messages=[HumanMessage(content='Hello')])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
Test model kwarg works.
|
get_usable_table_names
|
"""Get names of tables available."""
if self._include_tables:
return self._include_tables
return sorted(self._all_tables - self._ignore_tables)
|
def get_usable_table_names(self) ->Iterable[str]:
"""Get names of tables available."""
if self._include_tables:
return self._include_tables
return sorted(self._all_tables - self._ignore_tables)
|
Get names of tables available.
|
messages
|
"""Retrieve the messages from the local file"""
items = json.loads(self.file_path.read_text())
messages = messages_from_dict(items)
return messages
|
@property
def messages(self) ->List[BaseMessage]:
"""Retrieve the messages from the local file"""
items = json.loads(self.file_path.read_text())
messages = messages_from_dict(items)
return messages
|
Retrieve the messages from the local file
|
test_embed_documents_normalized
|
output = _get_embeddings().embed_documents(['foo walked to the market'])
assert np.isclose(np.linalg.norm(output[0]), 1.0)
|
@pytest.mark.scheduled
def test_embed_documents_normalized() ->None:
output = _get_embeddings().embed_documents(['foo walked to the market'])
assert np.isclose(np.linalg.norm(output[0]), 1.0)
| null |
_generate_rest_batches
|
from qdrant_client import models as rest
texts_iterator = iter(texts)
metadatas_iterator = iter(metadatas or [])
ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)])
while (batch_texts := list(islice(texts_iterator, batch_size))):
batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None
batch_ids = list(islice(ids_iterator, batch_size))
batch_embeddings: List[Tuple[List[int], List[float]]] = [self.
sparse_encoder(text) for text in batch_texts]
points = [rest.PointStruct(id=point_id, vector={self.sparse_vector_name:
rest.SparseVector(indices=sparse_vector[0], values=sparse_vector[1]
)}, payload=payload) for point_id, sparse_vector, payload in zip(
batch_ids, batch_embeddings, Qdrant._build_payloads(batch_texts,
batch_metadatas, self.content_payload_key, self.metadata_payload_key))]
yield batch_ids, points
|
def _generate_rest_batches(self, texts: Iterable[str], metadatas: Optional[
List[dict]]=None, ids: Optional[Sequence[str]]=None, batch_size: int=64
) ->Generator[Tuple[List[str], List[Any]], None, None]:
from qdrant_client import models as rest
texts_iterator = iter(texts)
metadatas_iterator = iter(metadatas or [])
ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)])
while (batch_texts := list(islice(texts_iterator, batch_size))):
batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None
batch_ids = list(islice(ids_iterator, batch_size))
batch_embeddings: List[Tuple[List[int], List[float]]] = [self.
sparse_encoder(text) for text in batch_texts]
points = [rest.PointStruct(id=point_id, vector={self.
sparse_vector_name: rest.SparseVector(indices=sparse_vector[0],
values=sparse_vector[1])}, payload=payload) for point_id,
sparse_vector, payload in zip(batch_ids, batch_embeddings,
Qdrant._build_payloads(batch_texts, batch_metadatas, self.
content_payload_key, self.metadata_payload_key))]
yield batch_ids, points
| null |
_filter_cluster_embeddings
|
"""Filter documents based on proximity of their embeddings to clusters."""
try:
from sklearn.cluster import KMeans
except ImportError:
raise ImportError(
'sklearn package not found, please install it with `pip install scikit-learn`'
)
kmeans = KMeans(n_clusters=num_clusters, random_state=random_state).fit(
embedded_documents)
closest_indices = []
for i in range(num_clusters):
distances = np.linalg.norm(embedded_documents - kmeans.cluster_centers_
[i], axis=1)
if remove_duplicates:
closest_indices_sorted = [x for x in np.argsort(distances)[:
num_closest] if x not in closest_indices]
else:
closest_indices_sorted = [x for x in np.argsort(distances) if x not in
closest_indices][:num_closest]
closest_indices.extend(closest_indices_sorted)
return closest_indices
|
def _filter_cluster_embeddings(embedded_documents: List[List[float]],
num_clusters: int, num_closest: int, random_state: int,
remove_duplicates: bool) ->List[int]:
"""Filter documents based on proximity of their embeddings to clusters."""
try:
from sklearn.cluster import KMeans
except ImportError:
raise ImportError(
'sklearn package not found, please install it with `pip install scikit-learn`'
)
kmeans = KMeans(n_clusters=num_clusters, random_state=random_state).fit(
embedded_documents)
closest_indices = []
for i in range(num_clusters):
distances = np.linalg.norm(embedded_documents - kmeans.
cluster_centers_[i], axis=1)
if remove_duplicates:
closest_indices_sorted = [x for x in np.argsort(distances)[:
num_closest] if x not in closest_indices]
else:
closest_indices_sorted = [x for x in np.argsort(distances) if x
not in closest_indices][:num_closest]
closest_indices.extend(closest_indices_sorted)
return closest_indices
|
Filter documents based on proximity of their embeddings to clusters.
|
test_chat_hunyuan
|
chat = ChatHunyuan()
message = HumanMessage(content='Hello')
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
|
def test_chat_hunyuan() ->None:
chat = ChatHunyuan()
message = HumanMessage(content='Hello')
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
| null |
embed_query
|
"""
Generate an embedding for a single query text.
Args:
text (str): The query text to generate an embedding for.
Returns:
List[float]: The embedding for the input query text.
"""
return self._embedding_func([text])[0]
|
def embed_query(self, text: str) ->List[float]:
"""
Generate an embedding for a single query text.
Args:
text (str): The query text to generate an embedding for.
Returns:
List[float]: The embedding for the input query text.
"""
return self._embedding_func([text])[0]
|
Generate an embedding for a single query text.
Args:
text (str): The query text to generate an embedding for.
Returns:
List[float]: The embedding for the input query text.
|
test_appx_search_with_faiss_efficient_filter
|
"""Test Approximate Search with Faiss Efficient Filter."""
efficient_filter_val = {'bool': {'must': [{'term': {'text': 'bar'}}]}}
docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL, engine='faiss')
output = docsearch.similarity_search('foo', k=3, efficient_filter=
efficient_filter_val)
assert output == [Document(page_content='bar')]
|
def test_appx_search_with_faiss_efficient_filter() ->None:
"""Test Approximate Search with Faiss Efficient Filter."""
efficient_filter_val = {'bool': {'must': [{'term': {'text': 'bar'}}]}}
docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL, engine='faiss')
output = docsearch.similarity_search('foo', k=3, efficient_filter=
efficient_filter_val)
assert output == [Document(page_content='bar')]
|
Test Approximate Search with Faiss Efficient Filter.
|
add_message
|
"""Write a message to the table"""
self.blob_history.store(self.session_id, json.dumps(message_to_dict(message
)), self.ttl_seconds)
|
def add_message(self, message: BaseMessage) ->None:
"""Write a message to the table"""
self.blob_history.store(self.session_id, json.dumps(message_to_dict(
message)), self.ttl_seconds)
|
Write a message to the table
|
create_alibabacloud_opensearch
|
metadatas = [{'string_field': 'value1', 'int_field': 1, 'float_field': 1.0,
'double_field': 2.0}, {'string_field': 'value2', 'int_field': 2,
'float_field': 3.0, 'double_field': 4.0}, {'string_field': 'value3',
'int_field': 3, 'float_field': 5.0, 'double_field': 6.0}]
return AlibabaCloudOpenSearch.from_texts(texts=texts, embedding=
FakeEmbeddingsWithOsDimension(), metadatas=metadatas, config=settings)
|
def create_alibabacloud_opensearch() ->AlibabaCloudOpenSearch:
metadatas = [{'string_field': 'value1', 'int_field': 1, 'float_field':
1.0, 'double_field': 2.0}, {'string_field': 'value2', 'int_field':
2, 'float_field': 3.0, 'double_field': 4.0}, {'string_field':
'value3', 'int_field': 3, 'float_field': 5.0, 'double_field': 6.0}]
return AlibabaCloudOpenSearch.from_texts(texts=texts, embedding=
FakeEmbeddingsWithOsDimension(), metadatas=metadatas, config=settings)
| null |
_import_eleven_labs_text2speech
|
from langchain_community.tools.eleven_labs.text2speech import ElevenLabsText2SpeechTool
return ElevenLabsText2SpeechTool
|
def _import_eleven_labs_text2speech() ->Any:
from langchain_community.tools.eleven_labs.text2speech import ElevenLabsText2SpeechTool
return ElevenLabsText2SpeechTool
| null |
test_all_subclasses_accept_run_manager
|
"""Test that tools defined in this repo accept a run manager argument."""
if cls._run is not BaseTool._arun:
run_func = cls._run
params = inspect.signature(run_func).parameters
assert 'run_manager' in params
pattern = re.compile('(?!Async)CallbackManagerForToolRun')
assert bool(re.search(pattern, str(params['run_manager'].annotation)))
assert params['run_manager'].default is None
if cls._arun is not BaseTool._arun:
run_func = cls._arun
params = inspect.signature(run_func).parameters
assert 'run_manager' in params
assert 'AsyncCallbackManagerForToolRun' in str(params['run_manager'].
annotation)
assert params['run_manager'].default is None
|
@pytest.mark.parametrize('cls', get_non_abstract_subclasses(BaseTool))
def test_all_subclasses_accept_run_manager(cls: Type[BaseTool]) ->None:
"""Test that tools defined in this repo accept a run manager argument."""
if cls._run is not BaseTool._arun:
run_func = cls._run
params = inspect.signature(run_func).parameters
assert 'run_manager' in params
pattern = re.compile('(?!Async)CallbackManagerForToolRun')
assert bool(re.search(pattern, str(params['run_manager'].annotation)))
assert params['run_manager'].default is None
if cls._arun is not BaseTool._arun:
run_func = cls._arun
params = inspect.signature(run_func).parameters
assert 'run_manager' in params
assert 'AsyncCallbackManagerForToolRun' in str(params['run_manager'
].annotation)
assert params['run_manager'].default is None
|
Test that tools defined in this repo accept a run manager argument.
|
_get_tools_requests_get
|
return RequestsGetTool(requests_wrapper=TextRequestsWrapper())
|
def _get_tools_requests_get() ->BaseTool:
return RequestsGetTool(requests_wrapper=TextRequestsWrapper())
| null |
_convert_input
|
if isinstance(input, PromptValue):
return input
elif isinstance(input, str):
return StringPromptValue(text=input)
elif isinstance(input, Sequence):
return ChatPromptValue(messages=input)
else:
raise ValueError(
f'Invalid input type {type(input)}. Must be a PromptValue, str, or list of BaseMessages.'
)
|
def _convert_input(self, input: LanguageModelInput) ->PromptValue:
if isinstance(input, PromptValue):
return input
elif isinstance(input, str):
return StringPromptValue(text=input)
elif isinstance(input, Sequence):
return ChatPromptValue(messages=input)
else:
raise ValueError(
f'Invalid input type {type(input)}. Must be a PromptValue, str, or list of BaseMessages.'
)
| null |
_test_convo_output
|
result = ConvoOutputParser().parse_folder(input.strip())
assert isinstance(result, AgentAction)
assert result.tool == expected_tool
assert result.tool_input == expected_tool_input
|
def _test_convo_output(input: str, expected_tool: str, expected_tool_input: str
) ->None:
result = ConvoOutputParser().parse_folder(input.strip())
assert isinstance(result, AgentAction)
assert result.tool == expected_tool
assert result.tool_input == expected_tool_input
| null |
_get_searchapi_results_json
|
return SearchAPIResults(api_wrapper=SearchApiAPIWrapper(**kwargs))
|
def _get_searchapi_results_json(**kwargs: Any) ->BaseTool:
return SearchAPIResults(api_wrapper=SearchApiAPIWrapper(**kwargs))
| null |
_identifying_params
|
"""Get the identifying parameters."""
return {**{'model_id': self.model_id}, **{'model_kwargs': self.model_kwargs}}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{'model_id': self.model_id}, **{'model_kwargs': self.
model_kwargs}}
|
Get the identifying parameters.
|
test_warn_deprecated
|
"""Test warn deprecated."""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
warn_deprecated(**kwargs)
assert len(warning_list) == 1
warning = warning_list[0].message
assert str(warning) == expected_message
|
@pytest.mark.parametrize('kwargs, expected_message', [({'since': '1.0.0',
'name': 'OldClass', 'alternative': 'NewClass', 'pending': True,
'obj_type': 'class'},
'The class `OldClass` will be deprecated in a future version. Use NewClass instead.'
), ({'since': '2.0.0', 'message': 'This is a custom message', 'name':
'FunctionA', 'alternative': '', 'pending': True, 'obj_type': '',
'addendum': '', 'removal': ''}, 'This is a custom message'), ({'since':
'1.5.0', 'message': '', 'name': 'SomeFunction', 'alternative': '',
'pending': False, 'obj_type': '', 'addendum':
'Please migrate your code.', 'removal': '2.5.0'},
'`SomeFunction` was deprecated in LangChain 1.5.0 and will be removed in 2.5.0 Please migrate your code.'
)])
def test_warn_deprecated(kwargs: Dict[str, Any], expected_message: str) ->None:
"""Test warn deprecated."""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
warn_deprecated(**kwargs)
assert len(warning_list) == 1
warning = warning_list[0].message
assert str(warning) == expected_message
|
Test warn deprecated.
|
_import_predibase
|
from langchain_community.llms.predibase import Predibase
return Predibase
|
def _import_predibase() ->Any:
from langchain_community.llms.predibase import Predibase
return Predibase
| null |
_llm_type
|
"""Return type of llm."""
return 'IBM watsonx.ai'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'IBM watsonx.ai'
|
Return type of llm.
|
input_keys
|
"""Expect input key.
:meta private:
"""
return [self.question_key]
|
@property
def input_keys(self) ->List[str]:
"""Expect input key.
:meta private:
"""
return [self.question_key]
|
Expect input key.
:meta private:
|
_llm_type
|
return 'huggingface_pipeline'
|
@property
def _llm_type(self) ->str:
return 'huggingface_pipeline'
| null |
html
|
"""To log the input html string as html file artifact."""
with self.mlflow.start_run(run_id=self.run.info.run_id, experiment_id=self.
mlf_expid):
self.mlflow.log_text(html, f'{filename}.html')
|
def html(self, html: str, filename: str) ->None:
"""To log the input html string as html file artifact."""
with self.mlflow.start_run(run_id=self.run.info.run_id, experiment_id=
self.mlf_expid):
self.mlflow.log_text(html, f'{filename}.html')
|
To log the input html string as html file artifact.
|
test_create_documents
|
"""Test create documents method."""
texts = ['foo bar', 'baz']
splitter = CharacterTextSplitter(separator=' ', chunk_size=3, chunk_overlap=0)
docs = splitter.create_documents(texts)
expected_docs = [Document(page_content='foo'), Document(page_content='bar'),
Document(page_content='baz')]
assert docs == expected_docs
|
def test_create_documents() ->None:
"""Test create documents method."""
texts = ['foo bar', 'baz']
splitter = CharacterTextSplitter(separator=' ', chunk_size=3,
chunk_overlap=0)
docs = splitter.create_documents(texts)
expected_docs = [Document(page_content='foo'), Document(page_content=
'bar'), Document(page_content='baz')]
assert docs == expected_docs
|
Test create documents method.
|
validate_model
|
"""Validate and update model arguments, including API key and formatting"""
values['nvidia_api_key'] = get_from_dict_or_env(values, 'nvidia_api_key',
'NVIDIA_API_KEY')
if 'nvapi-' not in values.get('nvidia_api_key', ''):
raise ValueError('Invalid NVAPI key detected. Should start with `nvapi-`')
is_staging = 'nvapi-stg-' in values['nvidia_api_key']
values['is_staging'] = is_staging
if 'headers_tmpl' not in values:
values['headers_tmpl'] = {'call': {'Authorization':
'Bearer {nvidia_api_key}', 'Accept': 'application/json'}, 'stream':
{'Authorization': 'Bearer {nvidia_api_key}', 'Accept':
'text/event-stream', 'content-type': 'application/json'}}
values['fetch_url_format'] = cls._stagify(is_staging, values.get(
'fetch_url_format', 'https://api.nvcf.nvidia.com/v2/nvcf/pexec/status/'))
values['call_invoke_base'] = cls._stagify(is_staging, values.get(
'call_invoke_base', 'https://api.nvcf.nvidia.com/v2/nvcf/pexec/functions'))
return values
|
@root_validator(pre=True)
def validate_model(cls, values: Dict[str, Any]) ->Dict[str, Any]:
"""Validate and update model arguments, including API key and formatting"""
values['nvidia_api_key'] = get_from_dict_or_env(values,
'nvidia_api_key', 'NVIDIA_API_KEY')
if 'nvapi-' not in values.get('nvidia_api_key', ''):
raise ValueError(
'Invalid NVAPI key detected. Should start with `nvapi-`')
is_staging = 'nvapi-stg-' in values['nvidia_api_key']
values['is_staging'] = is_staging
if 'headers_tmpl' not in values:
values['headers_tmpl'] = {'call': {'Authorization':
'Bearer {nvidia_api_key}', 'Accept': 'application/json'},
'stream': {'Authorization': 'Bearer {nvidia_api_key}', 'Accept':
'text/event-stream', 'content-type': 'application/json'}}
values['fetch_url_format'] = cls._stagify(is_staging, values.get(
'fetch_url_format',
'https://api.nvcf.nvidia.com/v2/nvcf/pexec/status/'))
values['call_invoke_base'] = cls._stagify(is_staging, values.get(
'call_invoke_base',
'https://api.nvcf.nvidia.com/v2/nvcf/pexec/functions'))
return values
|
Validate and update model arguments, including API key and formatting
|
api_client
|
return MerriamWebsterAPIWrapper()
|
@pytest.fixture
def api_client() ->MerriamWebsterAPIWrapper:
return MerriamWebsterAPIWrapper()
| null |
_run
|
"""Use the GitHub API to run an operation."""
if not instructions or instructions == '{}':
instructions = ''
return self.api_wrapper.run(self.mode, instructions)
|
def _run(self, instructions: Optional[str]='', run_manager: Optional[
CallbackManagerForToolRun]=None) ->str:
"""Use the GitHub API to run an operation."""
if not instructions or instructions == '{}':
instructions = ''
return self.api_wrapper.run(self.mode, instructions)
|
Use the GitHub API to run an operation.
|
test_batch
|
llm = FakeListLLM(responses=['foo'] * 3)
output = llm.batch(['foo', 'bar', 'foo'])
assert output == ['foo'] * 3
output = llm.batch(['foo', 'bar', 'foo'], config={'max_concurrency': 2})
assert output == ['foo'] * 3
|
def test_batch() ->None:
llm = FakeListLLM(responses=['foo'] * 3)
output = llm.batch(['foo', 'bar', 'foo'])
assert output == ['foo'] * 3
output = llm.batch(['foo', 'bar', 'foo'], config={'max_concurrency': 2})
assert output == ['foo'] * 3
| null |
nanoseconds_from_2001_to_datetime
|
timestamp_in_seconds = nanoseconds / 1000000000.0
reference_date_seconds = datetime(2001, 1, 1).timestamp()
actual_timestamp = reference_date_seconds + timestamp_in_seconds
return datetime.fromtimestamp(actual_timestamp)
|
def nanoseconds_from_2001_to_datetime(nanoseconds: int) ->datetime:
timestamp_in_seconds = nanoseconds / 1000000000.0
reference_date_seconds = datetime(2001, 1, 1).timestamp()
actual_timestamp = reference_date_seconds + timestamp_in_seconds
return datetime.fromtimestamp(actual_timestamp)
| null |
add_texts
|
"""
Embed texts and add them to the database.
Args:
texts (Iterable[str]): The texts to embed.
metadatas (Optional[List[dict]]): Metadata dicts
attached to each of the texts. Defaults to None.
collection_name (Optional[str]): Which collection to use.
Defaults to "langchain_collection".
If provided, default collection name will be set as well.
drop_old (Optional[bool]): Whether to drop the previous collection
and create a new one. Defaults to False.
Returns:
List of ids of the added texts.
"""
if not collection_name:
collection_name = self._collection_name
else:
self._collection_name = collection_name
if drop_old:
self._client.drop_db(db_name=collection_name)
texts = list(texts)
try:
embeddings = self._embeddings.embed_documents(texts)
except NotImplementedError:
embeddings = [self._embeddings.embed_query(x) for x in texts]
if len(embeddings) == 0:
logger.debug('Nothing to insert, skipping.')
return []
self._create_collection(table_name=collection_name, embeddings=embeddings,
metadatas=metadatas)
ids = [hash(uuid.uuid4()) for _ in texts]
records = []
for index, id in enumerate(ids):
record = {'id': id, 'text': texts[index], 'embeddings': embeddings[index]}
if metadatas is not None:
metadata = metadatas[index].items()
for key, value in metadata:
record[key] = value
records.append(record)
status_code, response = self._client.insert(table_name=collection_name,
records=records)
if status_code != 200:
logger.error(
f"Failed to add records to {collection_name}: {response['message']}")
raise Exception('Error: {}.'.format(response['message']))
return [str(id) for id in ids]
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, collection_name: Optional[str]='', drop_old: Optional[bool]=False,
**kwargs: Any) ->List[str]:
"""
Embed texts and add them to the database.
Args:
texts (Iterable[str]): The texts to embed.
metadatas (Optional[List[dict]]): Metadata dicts
attached to each of the texts. Defaults to None.
collection_name (Optional[str]): Which collection to use.
Defaults to "langchain_collection".
If provided, default collection name will be set as well.
drop_old (Optional[bool]): Whether to drop the previous collection
and create a new one. Defaults to False.
Returns:
List of ids of the added texts.
"""
if not collection_name:
collection_name = self._collection_name
else:
self._collection_name = collection_name
if drop_old:
self._client.drop_db(db_name=collection_name)
texts = list(texts)
try:
embeddings = self._embeddings.embed_documents(texts)
except NotImplementedError:
embeddings = [self._embeddings.embed_query(x) for x in texts]
if len(embeddings) == 0:
logger.debug('Nothing to insert, skipping.')
return []
self._create_collection(table_name=collection_name, embeddings=
embeddings, metadatas=metadatas)
ids = [hash(uuid.uuid4()) for _ in texts]
records = []
for index, id in enumerate(ids):
record = {'id': id, 'text': texts[index], 'embeddings': embeddings[
index]}
if metadatas is not None:
metadata = metadatas[index].items()
for key, value in metadata:
record[key] = value
records.append(record)
status_code, response = self._client.insert(table_name=collection_name,
records=records)
if status_code != 200:
logger.error(
f"Failed to add records to {collection_name}: {response['message']}"
)
raise Exception('Error: {}.'.format(response['message']))
return [str(id) for id in ids]
|
Embed texts and add them to the database.
Args:
texts (Iterable[str]): The texts to embed.
metadatas (Optional[List[dict]]): Metadata dicts
attached to each of the texts. Defaults to None.
collection_name (Optional[str]): Which collection to use.
Defaults to "langchain_collection".
If provided, default collection name will be set as well.
drop_old (Optional[bool]): Whether to drop the previous collection
and create a new one. Defaults to False.
Returns:
List of ids of the added texts.
|
test_chain_on_kv_singleio_dataset
|
llm = ChatOpenAI(temperature=0)
chain = LLMChain.from_string(llm, 'The answer to the {question} is: ')
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.
CRITERIA])
run_on_dataset(dataset_name=kv_singleio_dataset_name, llm_or_chain_factory=
lambda : chain, client=client, evaluation=eval_config, project_name=
eval_project_name, tags=['shouldpass'])
_check_all_feedback_passed(eval_project_name, client)
|
def test_chain_on_kv_singleio_dataset(kv_singleio_dataset_name: str,
eval_project_name: str, client: Client) ->None:
llm = ChatOpenAI(temperature=0)
chain = LLMChain.from_string(llm, 'The answer to the {question} is: ')
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType
.CRITERIA])
run_on_dataset(dataset_name=kv_singleio_dataset_name,
llm_or_chain_factory=lambda : chain, client=client, evaluation=
eval_config, project_name=eval_project_name, tags=['shouldpass'])
_check_all_feedback_passed(eval_project_name, client)
| null |
lazy_load
|
"""Load documents."""
try:
from qcloud_cos import CosS3Client
except ImportError:
raise ImportError(
'Could not import cos-python-sdk-v5 python package. Please install it with `pip install cos-python-sdk-v5`.'
)
client = CosS3Client(self.conf)
contents = []
marker = ''
while True:
response = client.list_objects(Bucket=self.bucket, Prefix=self.prefix,
Marker=marker, MaxKeys=1000)
if 'Contents' in response:
contents.extend(response['Contents'])
if response['IsTruncated'] == 'false':
break
marker = response['NextMarker']
for content in contents:
if content['Key'].endswith('/'):
continue
loader = TencentCOSFileLoader(self.conf, self.bucket, content['Key'])
yield loader.load()[0]
|
def lazy_load(self) ->Iterator[Document]:
"""Load documents."""
try:
from qcloud_cos import CosS3Client
except ImportError:
raise ImportError(
'Could not import cos-python-sdk-v5 python package. Please install it with `pip install cos-python-sdk-v5`.'
)
client = CosS3Client(self.conf)
contents = []
marker = ''
while True:
response = client.list_objects(Bucket=self.bucket, Prefix=self.
prefix, Marker=marker, MaxKeys=1000)
if 'Contents' in response:
contents.extend(response['Contents'])
if response['IsTruncated'] == 'false':
break
marker = response['NextMarker']
for content in contents:
if content['Key'].endswith('/'):
continue
loader = TencentCOSFileLoader(self.conf, self.bucket, content['Key'])
yield loader.load()[0]
|
Load documents.
|
table
|
"""To log the input pandas dataframe as a html table"""
self.html(dataframe.to_html(), f'table_{name}')
|
def table(self, name: str, dataframe) ->None:
"""To log the input pandas dataframe as a html table"""
self.html(dataframe.to_html(), f'table_{name}')
|
To log the input pandas dataframe as a html table
|
__init__
|
self.value = value
|
def __init__(self, value: Any):
self.value = value
| null |
create
|
models = importlib.import_module('langchain.chat_models')
model_cls = getattr(models, provider)
model_config = model_cls(**kwargs)
converted_messages = convert_openai_messages(messages)
if not stream:
result = model_config.invoke(converted_messages)
return {'choices': [{'message': convert_message_to_dict(result)}]}
else:
return (_convert_message_chunk_to_delta(c, i) for i, c in enumerate(
model_config.stream(converted_messages)))
|
@staticmethod
def create(messages: Sequence[Dict[str, Any]], *, provider: str=
'ChatOpenAI', stream: bool=False, **kwargs: Any) ->Union[dict, Iterable]:
models = importlib.import_module('langchain.chat_models')
model_cls = getattr(models, provider)
model_config = model_cls(**kwargs)
converted_messages = convert_openai_messages(messages)
if not stream:
result = model_config.invoke(converted_messages)
return {'choices': [{'message': convert_message_to_dict(result)}]}
else:
return (_convert_message_chunk_to_delta(c, i) for i, c in enumerate
(model_config.stream(converted_messages)))
| null |
test_extract_more_nested_tags
|
bs_transformer = BeautifulSoupTransformer()
nested_html = (
"<html><div class='some_style'><p><span>First</span> paragraph.</p><p>Second paragraph.</p><p>Third paragraph with a list:<ul><li>First list item.</li><li>Second list item.</li></ul></p><p>Fourth paragraph.</p></div></html>"
)
documents = [Document(page_content=nested_html)]
docs_transformed = bs_transformer.transform_documents(documents)
assert docs_transformed[0
].page_content == 'First paragraph. Second paragraph. Third paragraph with a list: First list item. Second list item. Fourth paragraph.'
|
@pytest.mark.requires('bs4')
def test_extract_more_nested_tags() ->None:
bs_transformer = BeautifulSoupTransformer()
nested_html = (
"<html><div class='some_style'><p><span>First</span> paragraph.</p><p>Second paragraph.</p><p>Third paragraph with a list:<ul><li>First list item.</li><li>Second list item.</li></ul></p><p>Fourth paragraph.</p></div></html>"
)
documents = [Document(page_content=nested_html)]
docs_transformed = bs_transformer.transform_documents(documents)
assert docs_transformed[0
].page_content == 'First paragraph. Second paragraph. Third paragraph with a list: First list item. Second list item. Fourth paragraph.'
| null |
_import_requests_tool_RequestsPostTool
|
from langchain_community.tools.requests.tool import RequestsPostTool
return RequestsPostTool
|
def _import_requests_tool_RequestsPostTool() ->Any:
from langchain_community.tools.requests.tool import RequestsPostTool
return RequestsPostTool
| null |
_search_points
|
"""Search points."""
if self.distance_strategy == DistanceStrategy.COSINE:
vec = np.array(embedding)
vec = vec / np.linalg.norm(vec)
embedding = vec.tolist()
payload = {'vector': embedding, 'limit': k}
response = requests.post(SemaDB.BASE_URL +
f'/collections/{self.collection_name}/points/search', json=payload,
headers=self.headers)
if response.status_code != 200:
raise ValueError(f'Error searching: {response.text}')
return response.json()['points']
|
def _search_points(self, embedding: List[float], k: int=4) ->List[dict]:
"""Search points."""
if self.distance_strategy == DistanceStrategy.COSINE:
vec = np.array(embedding)
vec = vec / np.linalg.norm(vec)
embedding = vec.tolist()
payload = {'vector': embedding, 'limit': k}
response = requests.post(SemaDB.BASE_URL +
f'/collections/{self.collection_name}/points/search', json=payload,
headers=self.headers)
if response.status_code != 200:
raise ValueError(f'Error searching: {response.text}')
return response.json()['points']
|
Search points.
|
validate_prompt
|
prompt = values['llm_chain'].prompt
if prompt.output_parser is None:
raise ValueError(
"LLMRouterChain requires base llm_chain prompt to have an output parser that converts LLM text output to a dictionary with keys 'destination' and 'next_inputs'. Received a prompt with no output parser."
)
return values
|
@root_validator()
def validate_prompt(cls, values: dict) ->dict:
prompt = values['llm_chain'].prompt
if prompt.output_parser is None:
raise ValueError(
"LLMRouterChain requires base llm_chain prompt to have an output parser that converts LLM text output to a dictionary with keys 'destination' and 'next_inputs'. Received a prompt with no output parser."
)
return values
| null |
format_dialog
|
"""Format messages and convert to a single string."""
chat_history = format_messages(input)
formatted_dialog = get_buffer_string(chat_history) + f"""
human: {input['text']}"""
return {'dialog': formatted_dialog}
|
def format_dialog(input: dict) ->dict:
"""Format messages and convert to a single string."""
chat_history = format_messages(input)
formatted_dialog = get_buffer_string(chat_history
) + f"\nhuman: {input['text']}"
return {'dialog': formatted_dialog}
|
Format messages and convert to a single string.
|
__init__
|
self.ops = list(ops)
|
def __init__(self, *ops: Dict[str, Any]) ->None:
self.ops = list(ops)
| null |
load_page
|
"""Read a page.
Args:
page_summary: Page summary from Notion API.
"""
page_id = page_summary['id']
metadata: Dict[str, Any] = {}
for prop_name, prop_data in page_summary['properties'].items():
prop_type = prop_data['type']
if prop_type == 'rich_text':
value = prop_data['rich_text'][0]['plain_text'] if prop_data[
'rich_text'] else None
elif prop_type == 'title':
value = prop_data['title'][0]['plain_text'] if prop_data['title'
] else None
elif prop_type == 'multi_select':
value = [item['name'] for item in prop_data['multi_select']
] if prop_data['multi_select'] else []
elif prop_type == 'url':
value = prop_data['url']
elif prop_type == 'unique_id':
value = (
f"{prop_data['unique_id']['prefix']}-{prop_data['unique_id']['number']}"
if prop_data['unique_id'] else None)
elif prop_type == 'status':
value = prop_data['status']['name'] if prop_data['status'] else None
elif prop_type == 'people':
value = [item['name'] for item in prop_data['people']] if prop_data[
'people'] else []
elif prop_type == 'date':
value = prop_data['date'] if prop_data['date'] else None
elif prop_type == 'last_edited_time':
value = prop_data['last_edited_time'] if prop_data['last_edited_time'
] else None
elif prop_type == 'created_time':
value = prop_data['created_time'] if prop_data['created_time'
] else None
elif prop_type == 'checkbox':
value = prop_data['checkbox']
elif prop_type == 'email':
value = prop_data['email']
elif prop_type == 'number':
value = prop_data['number']
elif prop_type == 'select':
value = prop_data['select']['name'] if prop_data['select'] else None
else:
value = None
metadata[prop_name.lower()] = value
metadata['id'] = page_id
return Document(page_content=self._load_blocks(page_id), metadata=metadata)
|
def load_page(self, page_summary: Dict[str, Any]) ->Document:
"""Read a page.
Args:
page_summary: Page summary from Notion API.
"""
page_id = page_summary['id']
metadata: Dict[str, Any] = {}
for prop_name, prop_data in page_summary['properties'].items():
prop_type = prop_data['type']
if prop_type == 'rich_text':
value = prop_data['rich_text'][0]['plain_text'] if prop_data[
'rich_text'] else None
elif prop_type == 'title':
value = prop_data['title'][0]['plain_text'] if prop_data['title'
] else None
elif prop_type == 'multi_select':
value = [item['name'] for item in prop_data['multi_select']
] if prop_data['multi_select'] else []
elif prop_type == 'url':
value = prop_data['url']
elif prop_type == 'unique_id':
value = (
f"{prop_data['unique_id']['prefix']}-{prop_data['unique_id']['number']}"
if prop_data['unique_id'] else None)
elif prop_type == 'status':
value = prop_data['status']['name'] if prop_data['status'
] else None
elif prop_type == 'people':
value = [item['name'] for item in prop_data['people']
] if prop_data['people'] else []
elif prop_type == 'date':
value = prop_data['date'] if prop_data['date'] else None
elif prop_type == 'last_edited_time':
value = prop_data['last_edited_time'] if prop_data[
'last_edited_time'] else None
elif prop_type == 'created_time':
value = prop_data['created_time'] if prop_data['created_time'
] else None
elif prop_type == 'checkbox':
value = prop_data['checkbox']
elif prop_type == 'email':
value = prop_data['email']
elif prop_type == 'number':
value = prop_data['number']
elif prop_type == 'select':
value = prop_data['select']['name'] if prop_data['select'
] else None
else:
value = None
metadata[prop_name.lower()] = value
metadata['id'] = page_id
return Document(page_content=self._load_blocks(page_id), metadata=metadata)
|
Read a page.
Args:
page_summary: Page summary from Notion API.
|
test_deeplake
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
docsearch = DeepLake.from_texts(dataset_path='mem://test_path', texts=texts,
embedding=FakeEmbeddings())
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
def test_deeplake() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
docsearch = DeepLake.from_texts(dataset_path='mem://test_path', texts=
texts, embedding=FakeEmbeddings())
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
Test end to end construction and search.
|
similarity_search_with_score_by_vector
|
try:
from vald.v1.payload import payload_pb2
from vald.v1.vald import search_pb2_grpc
except ImportError:
raise ValueError(
'Could not import vald-client-python python package. Please install it with `pip install vald-client-python`.'
)
channel = self._get_channel()
stub = search_pb2_grpc.SearchStub(channel)
cfg = payload_pb2.Search.Config(num=k, radius=radius, epsilon=epsilon,
timeout=timeout)
res = stub.Search(payload_pb2.Search.Request(vector=embedding, config=cfg),
metadata=grpc_metadata)
docs_and_scores = []
for result in res.results:
docs_and_scores.append((Document(page_content=result.id), result.distance))
channel.close()
return docs_and_scores
|
def similarity_search_with_score_by_vector(self, embedding: List[float], k:
int=4, radius: float=-1.0, epsilon: float=0.01, timeout: int=3000000000,
grpc_metadata: Optional[Any]=None, **kwargs: Any) ->List[Tuple[Document,
float]]:
try:
from vald.v1.payload import payload_pb2
from vald.v1.vald import search_pb2_grpc
except ImportError:
raise ValueError(
'Could not import vald-client-python python package. Please install it with `pip install vald-client-python`.'
)
channel = self._get_channel()
stub = search_pb2_grpc.SearchStub(channel)
cfg = payload_pb2.Search.Config(num=k, radius=radius, epsilon=epsilon,
timeout=timeout)
res = stub.Search(payload_pb2.Search.Request(vector=embedding, config=
cfg), metadata=grpc_metadata)
docs_and_scores = []
for result in res.results:
docs_and_scores.append((Document(page_content=result.id), result.
distance))
channel.close()
return docs_and_scores
| null |
_consume_next_step
|
if isinstance(values[-1], AgentFinish):
assert len(values) == 1
return values[-1]
else:
return [(a.action, a.observation) for a in values if isinstance(a,
AgentStep)]
|
def _consume_next_step(self, values: NextStepOutput) ->Union[AgentFinish,
List[Tuple[AgentAction, str]]]:
if isinstance(values[-1], AgentFinish):
assert len(values) == 1
return values[-1]
else:
return [(a.action, a.observation) for a in values if isinstance(a,
AgentStep)]
| null |
test_proto_file_splitter
|
splitter = RecursiveCharacterTextSplitter.from_language(Language.PROTO,
chunk_size=CHUNK_SIZE, chunk_overlap=0)
code = """
syntax = "proto3";
package example;
message Person {
string name = 1;
int32 age = 2;
repeated string hobbies = 3;
}
"""
chunks = splitter.split_text(code)
assert chunks == ['syntax =', '"proto3";', 'package', 'example;',
'message Person', '{', 'string name', '= 1;', 'int32 age =', '2;',
'repeated', 'string hobbies', '= 3;', '}']
|
def test_proto_file_splitter() ->None:
splitter = RecursiveCharacterTextSplitter.from_language(Language.PROTO,
chunk_size=CHUNK_SIZE, chunk_overlap=0)
code = """
syntax = "proto3";
package example;
message Person {
string name = 1;
int32 age = 2;
repeated string hobbies = 3;
}
"""
chunks = splitter.split_text(code)
assert chunks == ['syntax =', '"proto3";', 'package', 'example;',
'message Person', '{', 'string name', '= 1;', 'int32 age =', '2;',
'repeated', 'string hobbies', '= 3;', '}']
| null |
_import_dingo
|
from langchain_community.vectorstores.dingo import Dingo
return Dingo
|
def _import_dingo() ->Any:
from langchain_community.vectorstores.dingo import Dingo
return Dingo
| null |
template_is_valid
|
"""Check that template and input variables are consistent."""
if values['validate_template']:
all_inputs = values['input_variables'] + list(values['partial_variables'])
check_valid_template(values['template'], values['template_format'],
all_inputs)
elif values.get('template_format'):
values['input_variables'] = [var for var in get_template_variables(
values['template'], values['template_format']) if var not in values
['partial_variables']]
return values
|
@root_validator()
def template_is_valid(cls, values: Dict) ->Dict:
"""Check that template and input variables are consistent."""
if values['validate_template']:
all_inputs = values['input_variables'] + list(values[
'partial_variables'])
check_valid_template(values['template'], values['template_format'],
all_inputs)
elif values.get('template_format'):
values['input_variables'] = [var for var in get_template_variables(
values['template'], values['template_format']) if var not in
values['partial_variables']]
return values
|
Check that template and input variables are consistent.
|
test_loading_from_YAML
|
"""Test loading from yaml file."""
prompt = load_prompt(EXAMPLE_DIR / 'simple_prompt.yaml')
expected_prompt = PromptTemplate(input_variables=['adjective'],
partial_variables={'content': 'dogs'}, template=
'Tell me a {adjective} joke about {content}.')
assert prompt == expected_prompt
|
def test_loading_from_YAML() ->None:
"""Test loading from yaml file."""
prompt = load_prompt(EXAMPLE_DIR / 'simple_prompt.yaml')
expected_prompt = PromptTemplate(input_variables=['adjective'],
partial_variables={'content': 'dogs'}, template=
'Tell me a {adjective} joke about {content}.')
assert prompt == expected_prompt
|
Test loading from yaml file.
|
mdelete
|
"""Delete the given keys."""
_keys = [self._get_prefixed_key(key) for key in keys]
self.client.delete(*_keys)
|
def mdelete(self, keys: Sequence[str]) ->None:
"""Delete the given keys."""
_keys = [self._get_prefixed_key(key) for key in keys]
self.client.delete(*_keys)
|
Delete the given keys.
|
mset
|
"""Set the given key-value pairs."""
self.underlying_store.mset([((k, v.decode('utf-8')) if v is not None else
None) for k, v in key_value_pairs])
|
def mset(self, key_value_pairs: Sequence[Tuple[str, bytes]]) ->None:
"""Set the given key-value pairs."""
self.underlying_store.mset([((k, v.decode('utf-8')) if v is not None else
None) for k, v in key_value_pairs])
|
Set the given key-value pairs.
|
setUp
|
self.fake_llm = self.make_fake_llm()
|
def setUp(self) ->None:
self.fake_llm = self.make_fake_llm()
| null |
__init__
|
"""Implement the BaseStore interface for the local file system.
Args:
root_path (Union[str, Path]): The root path of the file store. All keys are
interpreted as paths relative to this root.
"""
self.root_path = Path(root_path).absolute()
|
def __init__(self, root_path: Union[str, Path]) ->None:
"""Implement the BaseStore interface for the local file system.
Args:
root_path (Union[str, Path]): The root path of the file store. All keys are
interpreted as paths relative to this root.
"""
self.root_path = Path(root_path).absolute()
|
Implement the BaseStore interface for the local file system.
Args:
root_path (Union[str, Path]): The root path of the file store. All keys are
interpreted as paths relative to this root.
|
_identifying_params
|
return {**super()._identifying_params, 'pl_tags': self.pl_tags,
'return_pl_id': self.return_pl_id}
|
@property
def _identifying_params(self) ->Dict[str, Any]:
return {**super()._identifying_params, 'pl_tags': self.pl_tags,
'return_pl_id': self.return_pl_id}
| null |
_validate_input_vars
|
expected_input_vars = {'query', 'context', 'result'}
if expected_input_vars != set(prompt.input_variables):
raise ValueError(
f'Input variables should be {expected_input_vars}, but got {prompt.input_variables}'
)
|
@classmethod
def _validate_input_vars(cls, prompt: PromptTemplate) ->None:
expected_input_vars = {'query', 'context', 'result'}
if expected_input_vars != set(prompt.input_variables):
raise ValueError(
f'Input variables should be {expected_input_vars}, but got {prompt.input_variables}'
)
| null |
_get_examples
|
"""Get the examples to use for formatting the prompt.
Args:
**kwargs: Keyword arguments to be passed to the example selector.
Returns:
List of examples.
"""
if self.examples is not None:
return self.examples
elif self.example_selector is not None:
return self.example_selector.select_examples(kwargs)
else:
raise ValueError(
"One of 'examples' and 'example_selector' should be provided")
|
def _get_examples(self, **kwargs: Any) ->List[dict]:
"""Get the examples to use for formatting the prompt.
Args:
**kwargs: Keyword arguments to be passed to the example selector.
Returns:
List of examples.
"""
if self.examples is not None:
return self.examples
elif self.example_selector is not None:
return self.example_selector.select_examples(kwargs)
else:
raise ValueError(
"One of 'examples' and 'example_selector' should be provided")
|
Get the examples to use for formatting the prompt.
Args:
**kwargs: Keyword arguments to be passed to the example selector.
Returns:
List of examples.
|
__init__
|
"""Initialize with the documents to return."""
self.documents = documents
|
def __init__(self, documents: Sequence[Document]) ->None:
"""Initialize with the documents to return."""
self.documents = documents
|
Initialize with the documents to return.
|
_build_metadata
|
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
"""Build metadata based on the contents of the webpage"""
metadata = {'source': url, 'title': 'No title found.', 'description':
'No description found.', 'language': 'No language found.'}
if (title := driver.title):
metadata['title'] = title
try:
if (description := driver.find_element(By.XPATH,
'//meta[@name="description"]')):
metadata['description'] = description.get_attribute('content'
) or 'No description found.'
except NoSuchElementException:
pass
try:
if (html_tag := driver.find_element(By.TAG_NAME, 'html')):
metadata['language'] = html_tag.get_attribute('lang'
) or 'No language found.'
except NoSuchElementException:
pass
return metadata
|
def _build_metadata(self, url: str, driver: Union['Chrome', 'Firefox']) ->dict:
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
"""Build metadata based on the contents of the webpage"""
metadata = {'source': url, 'title': 'No title found.', 'description':
'No description found.', 'language': 'No language found.'}
if (title := driver.title):
metadata['title'] = title
try:
if (description := driver.find_element(By.XPATH,
'//meta[@name="description"]')):
metadata['description'] = description.get_attribute('content'
) or 'No description found.'
except NoSuchElementException:
pass
try:
if (html_tag := driver.find_element(By.TAG_NAME, 'html')):
metadata['language'] = html_tag.get_attribute('lang'
) or 'No language found.'
except NoSuchElementException:
pass
return metadata
| null |
validate_environment
|
"""Validate that api key and endpoint exists in environment."""
azure_cogs_key = get_from_dict_or_env(values, 'azure_cogs_key',
'AZURE_COGS_KEY')
azure_cogs_endpoint = get_from_dict_or_env(values, 'azure_cogs_endpoint',
'AZURE_COGS_ENDPOINT')
try:
from azure.ai.formrecognizer import DocumentAnalysisClient
from azure.core.credentials import AzureKeyCredential
values['doc_analysis_client'] = DocumentAnalysisClient(endpoint=
azure_cogs_endpoint, credential=AzureKeyCredential(azure_cogs_key))
except ImportError:
raise ImportError(
'azure-ai-formrecognizer is not installed. Run `pip install azure-ai-formrecognizer` to install.'
)
return values
|
@root_validator(pre=True)
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and endpoint exists in environment."""
azure_cogs_key = get_from_dict_or_env(values, 'azure_cogs_key',
'AZURE_COGS_KEY')
azure_cogs_endpoint = get_from_dict_or_env(values,
'azure_cogs_endpoint', 'AZURE_COGS_ENDPOINT')
try:
from azure.ai.formrecognizer import DocumentAnalysisClient
from azure.core.credentials import AzureKeyCredential
values['doc_analysis_client'] = DocumentAnalysisClient(endpoint=
azure_cogs_endpoint, credential=AzureKeyCredential(azure_cogs_key))
except ImportError:
raise ImportError(
'azure-ai-formrecognizer is not installed. Run `pip install azure-ai-formrecognizer` to install.'
)
return values
|
Validate that api key and endpoint exists in environment.
|
test__convert_dict_to_message_other_role
|
message_dict = {'role': 'system', 'content': 'foo'}
result = _convert_dict_to_message(message_dict)
expected_output = ChatMessage(role='system', content='foo')
assert result == expected_output
|
def test__convert_dict_to_message_other_role() ->None:
message_dict = {'role': 'system', 'content': 'foo'}
result = _convert_dict_to_message(message_dict)
expected_output = ChatMessage(role='system', content='foo')
assert result == expected_output
| null |
from_uri
|
"""Creating a remote Spark Session via Spark connect.
For example: SparkSQL.from_uri("sc://localhost:15002")
"""
try:
from pyspark.sql import SparkSession
except ImportError:
raise ValueError(
'pyspark is not installed. Please install it with `pip install pyspark`'
)
spark = SparkSession.builder.remote(database_uri).getOrCreate()
return cls(spark, **kwargs)
|
@classmethod
def from_uri(cls, database_uri: str, engine_args: Optional[dict]=None, **
kwargs: Any) ->SparkSQL:
"""Creating a remote Spark Session via Spark connect.
For example: SparkSQL.from_uri("sc://localhost:15002")
"""
try:
from pyspark.sql import SparkSession
except ImportError:
raise ValueError(
'pyspark is not installed. Please install it with `pip install pyspark`'
)
spark = SparkSession.builder.remote(database_uri).getOrCreate()
return cls(spark, **kwargs)
|
Creating a remote Spark Session via Spark connect.
For example: SparkSQL.from_uri("sc://localhost:15002")
|
delete
|
"""Delete the documents which have the specified ids.
Args:
ids: The ids of the embedding vectors.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful.
False otherwise, None if not implemented.
"""
ret: Optional[bool] = None
tmp_res = []
if ids is None or ids.__len__() == 0:
return ret
for _id in ids:
if self.flag:
ret = self.vearch.delete(self.using_db_name, self.using_table_name, _id
)
else:
ret = self.vearch.del_doc(_id)
tmp_res.append(ret)
ret = all(i == 0 for i in tmp_res)
return ret
|
def delete(self, ids: Optional[List[str]]=None, **kwargs: Any) ->Optional[bool
]:
"""Delete the documents which have the specified ids.
Args:
ids: The ids of the embedding vectors.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful.
False otherwise, None if not implemented.
"""
ret: Optional[bool] = None
tmp_res = []
if ids is None or ids.__len__() == 0:
return ret
for _id in ids:
if self.flag:
ret = self.vearch.delete(self.using_db_name, self.
using_table_name, _id)
else:
ret = self.vearch.del_doc(_id)
tmp_res.append(ret)
ret = all(i == 0 for i in tmp_res)
return ret
|
Delete the documents which have the specified ids.
Args:
ids: The ids of the embedding vectors.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful.
False otherwise, None if not implemented.
|
list_tables
|
"""List all the tables created by the client."""
if self.awadb_client is None:
return []
return self.awadb_client.ListAllTables()
|
def list_tables(self, **kwargs: Any) ->List[str]:
"""List all the tables created by the client."""
if self.awadb_client is None:
return []
return self.awadb_client.ListAllTables()
|
List all the tables created by the client.
|
test_write_file
|
"""Test the WriteFile tool."""
with TemporaryDirectory() as temp_dir:
file_path = str(Path(temp_dir) / 'file.txt')
tool = WriteFileTool()
tool.run({'file_path': file_path, 'text': 'Hello, world!'})
assert (Path(temp_dir) / 'file.txt').exists()
assert (Path(temp_dir) / 'file.txt').read_text() == 'Hello, world!'
|
def test_write_file() ->None:
"""Test the WriteFile tool."""
with TemporaryDirectory() as temp_dir:
file_path = str(Path(temp_dir) / 'file.txt')
tool = WriteFileTool()
tool.run({'file_path': file_path, 'text': 'Hello, world!'})
assert (Path(temp_dir) / 'file.txt').exists()
assert (Path(temp_dir) / 'file.txt').read_text() == 'Hello, world!'
|
Test the WriteFile tool.
|
scrape_text
|
try:
response = requests.get(url)
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'html.parser')
page_text = soup.get_text(separator=' ', strip=True)
return page_text
else:
return (
f'Failed to retrieve the webpage: Status code {response.status_code}'
)
except Exception as e:
print(e)
return f'Failed to retrieve the webpage: {e}'
|
def scrape_text(url: str):
try:
response = requests.get(url)
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'html.parser')
page_text = soup.get_text(separator=' ', strip=True)
return page_text
else:
return (
f'Failed to retrieve the webpage: Status code {response.status_code}'
)
except Exception as e:
print(e)
return f'Failed to retrieve the webpage: {e}'
| null |
_import_gpt4all
|
from langchain_community.llms.gpt4all import GPT4All
return GPT4All
|
def _import_gpt4all() ->Any:
from langchain_community.llms.gpt4all import GPT4All
return GPT4All
| null |
test_create_confluence_page
|
"""Test for getting projects on JIRA"""
jira = JiraAPIWrapper()
create_page_dict = (
'{"space": "ROC", "title":"This is the title","body":"This is the body. You can use <strong>HTML tags</strong>!"}'
)
output = jira.run('create_page', create_page_dict)
assert 'type' in output
assert 'page' in output
|
def test_create_confluence_page() ->None:
"""Test for getting projects on JIRA"""
jira = JiraAPIWrapper()
create_page_dict = (
'{"space": "ROC", "title":"This is the title","body":"This is the body. You can use <strong>HTML tags</strong>!"}'
)
output = jira.run('create_page', create_page_dict)
assert 'type' in output
assert 'page' in output
|
Test for getting projects on JIRA
|
test_self_hosted_huggingface_embedding_documents
|
"""Test self-hosted huggingface embeddings."""
documents = ['foo bar']
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceEmbeddings(hardware=gpu)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
|
def test_self_hosted_huggingface_embedding_documents() ->None:
"""Test self-hosted huggingface embeddings."""
documents = ['foo bar']
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceEmbeddings(hardware=gpu)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
|
Test self-hosted huggingface embeddings.
|
_convert_delta_to_message_chunk
|
role = _dict.get('role')
content = _dict.get('content') or ''
if role == 'user' or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == 'assistant' or default_class == AIMessageChunk:
return AIMessageChunk(content=content)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content)
|
def _convert_delta_to_message_chunk(_dict: Mapping[str, Any], default_class:
Type[BaseMessageChunk]) ->BaseMessageChunk:
role = _dict.get('role')
content = _dict.get('content') or ''
if role == 'user' or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == 'assistant' or default_class == AIMessageChunk:
return AIMessageChunk(content=content)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content)
| null |
test_ani_message_chunks
|
assert AIMessageChunk(example=True, content='I am') + AIMessageChunk(example
=True, content=' indeed.') == AIMessageChunk(example=True, content=
'I am indeed.'
), 'AIMessageChunk + AIMessageChunk should be a AIMessageChunk'
with pytest.raises(ValueError):
AIMessageChunk(example=True, content='I am') + AIMessageChunk(example=
False, content=' indeed.')
|
def test_ani_message_chunks() ->None:
assert AIMessageChunk(example=True, content='I am') + AIMessageChunk(
example=True, content=' indeed.') == AIMessageChunk(example=True,
content='I am indeed.'
), 'AIMessageChunk + AIMessageChunk should be a AIMessageChunk'
with pytest.raises(ValueError):
AIMessageChunk(example=True, content='I am') + AIMessageChunk(example
=False, content=' indeed.')
| null |
similarity_search
|
"""Return docs most similar to query.
Args:
query: Text query.
k: The maximum number of documents to return.
text_in_page_content: Filter by the text in page_content of Document.
meta_filter (Optional[dict]): Filter by metadata. Defaults to None.
E.g. `{"color" : "red", "price": 4.20}`. Optional.
E.g. `{"max_price" : 15.66, "min_price": 4.20}`
`price` is the metadata field, means range filter(4.20<'price'<15.66).
E.g. `{"maxe_price" : 15.66, "mine_price": 4.20}`
`price` is the metadata field, means range filter(4.20<='price'<=15.66).
kwargs: Any possible extend parameters in the future.
Returns:
Returns the k most similar documents to the specified text query.
"""
if self.awadb_client is None:
raise ValueError('AwaDB client is None!!!')
embedding = None
if self.using_table_name in self.table2embeddings:
embedding = self.table2embeddings[self.using_table_name].embed_query(query)
else:
from awadb import AwaEmbedding
embedding = AwaEmbedding().Embedding(query)
not_include_fields: Set[str] = {'text_embedding', '_id', 'score'}
return self.similarity_search_by_vector(embedding, k, text_in_page_content=
text_in_page_content, meta_filter=meta_filter,
not_include_fields_in_metadata=not_include_fields)
|
def similarity_search(self, query: str, k: int=DEFAULT_TOPN,
text_in_page_content: Optional[str]=None, meta_filter: Optional[dict]=
None, **kwargs: Any) ->List[Document]:
"""Return docs most similar to query.
Args:
query: Text query.
k: The maximum number of documents to return.
text_in_page_content: Filter by the text in page_content of Document.
meta_filter (Optional[dict]): Filter by metadata. Defaults to None.
E.g. `{"color" : "red", "price": 4.20}`. Optional.
E.g. `{"max_price" : 15.66, "min_price": 4.20}`
`price` is the metadata field, means range filter(4.20<'price'<15.66).
E.g. `{"maxe_price" : 15.66, "mine_price": 4.20}`
`price` is the metadata field, means range filter(4.20<='price'<=15.66).
kwargs: Any possible extend parameters in the future.
Returns:
Returns the k most similar documents to the specified text query.
"""
if self.awadb_client is None:
raise ValueError('AwaDB client is None!!!')
embedding = None
if self.using_table_name in self.table2embeddings:
embedding = self.table2embeddings[self.using_table_name].embed_query(
query)
else:
from awadb import AwaEmbedding
embedding = AwaEmbedding().Embedding(query)
not_include_fields: Set[str] = {'text_embedding', '_id', 'score'}
return self.similarity_search_by_vector(embedding, k,
text_in_page_content=text_in_page_content, meta_filter=meta_filter,
not_include_fields_in_metadata=not_include_fields)
|
Return docs most similar to query.
Args:
query: Text query.
k: The maximum number of documents to return.
text_in_page_content: Filter by the text in page_content of Document.
meta_filter (Optional[dict]): Filter by metadata. Defaults to None.
E.g. `{"color" : "red", "price": 4.20}`. Optional.
E.g. `{"max_price" : 15.66, "min_price": 4.20}`
`price` is the metadata field, means range filter(4.20<'price'<15.66).
E.g. `{"maxe_price" : 15.66, "mine_price": 4.20}`
`price` is the metadata field, means range filter(4.20<='price'<=15.66).
kwargs: Any possible extend parameters in the future.
Returns:
Returns the k most similar documents to the specified text query.
|
test_retrieve_article_returns_book_abstract
|
"""Test that returns the excerpt of a book."""
output_nolabel = api_client.retrieve_article('25905357', '')
output_withlabel = api_client.retrieve_article('29262144', '')
test_string_nolabel = (
'Osteoporosis is a multifactorial disorder associated with low bone mass and enhanced skeletal fragility. Although'
)
assert test_string_nolabel in output_nolabel['Summary']
assert 'Wallenberg syndrome was first described in 1808 by Gaspard Vieusseux. However,' in output_withlabel[
'Summary']
|
def test_retrieve_article_returns_book_abstract(api_client: PubMedAPIWrapper
) ->None:
"""Test that returns the excerpt of a book."""
output_nolabel = api_client.retrieve_article('25905357', '')
output_withlabel = api_client.retrieve_article('29262144', '')
test_string_nolabel = (
'Osteoporosis is a multifactorial disorder associated with low bone mass and enhanced skeletal fragility. Although'
)
assert test_string_nolabel in output_nolabel['Summary']
assert 'Wallenberg syndrome was first described in 1808 by Gaspard Vieusseux. However,' in output_withlabel[
'Summary']
|
Test that returns the excerpt of a book.
|
observation_prefix
|
"""Prefix to append the observation with."""
|
@property
@abstractmethod
def observation_prefix(self) ->str:
"""Prefix to append the observation with."""
|
Prefix to append the observation with.
|
_type
|
return 'vector_sql_parser'
|
@property
def _type(self) ->str:
return 'vector_sql_parser'
| null |
test_public_api
|
"""Test for regressions or changes in the public API."""
assert set(public_api) == set(_EXPECTED)
|
def test_public_api() ->None:
"""Test for regressions or changes in the public API."""
assert set(public_api) == set(_EXPECTED)
|
Test for regressions or changes in the public API.
|
similarity_search_by_text
|
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
content: Dict[str, Any] = {'concepts': [query]}
if kwargs.get('search_distance'):
content['certainty'] = kwargs.get('search_distance')
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get('where_filter'):
query_obj = query_obj.with_where(kwargs.get('where_filter'))
if kwargs.get('tenant'):
query_obj = query_obj.with_tenant(kwargs.get('tenant'))
if kwargs.get('additional'):
query_obj = query_obj.with_additional(kwargs.get('additional'))
result = query_obj.with_near_text(content).with_limit(k).do()
if 'errors' in result:
raise ValueError(f"Error during query: {result['errors']}")
docs = []
for res in result['data']['Get'][self._index_name]:
text = res.pop(self._text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
|
def similarity_search_by_text(self, query: str, k: int=4, **kwargs: Any
) ->List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
content: Dict[str, Any] = {'concepts': [query]}
if kwargs.get('search_distance'):
content['certainty'] = kwargs.get('search_distance')
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get('where_filter'):
query_obj = query_obj.with_where(kwargs.get('where_filter'))
if kwargs.get('tenant'):
query_obj = query_obj.with_tenant(kwargs.get('tenant'))
if kwargs.get('additional'):
query_obj = query_obj.with_additional(kwargs.get('additional'))
result = query_obj.with_near_text(content).with_limit(k).do()
if 'errors' in result:
raise ValueError(f"Error during query: {result['errors']}")
docs = []
for res in result['data']['Get'][self._index_name]:
text = res.pop(self._text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
|
Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
|
acompletion_with_retry
|
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) ->Any:
return await llm.async_client.generate(**kwargs)
return _completion_with_retry(**kwargs)
|
def acompletion_with_retry(llm: Cohere, **kwargs: Any) ->Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) ->Any:
return await llm.async_client.generate(**kwargs)
return _completion_with_retry(**kwargs)
|
Use tenacity to retry the completion call.
|
load
|
"""Load documents."""
loader = UnstructuredPDFLoader(str(self.file_path))
return loader.load()
|
def load(self) ->List[Document]:
"""Load documents."""
loader = UnstructuredPDFLoader(str(self.file_path))
return loader.load()
|
Load documents.
|
_generate
|
"""Generate next turn in the conversation.
Args:
messages: The history of the conversation as a list of messages. Code chat
does not support context.
stop: The list of stop words (optional).
run_manager: The CallbackManager for LLM run, it's not used at the moment.
stream: Whether to use the streaming endpoint.
Returns:
The ChatResult that contains outputs generated by the model.
Raises:
ValueError: if the last message in the list is not from human.
"""
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(messages, stop=stop, run_manager=run_manager,
**kwargs)
return generate_from_stream(stream_iter)
question = _get_question(messages)
params = self._prepare_params(stop=stop, stream=False, **kwargs)
msg_params = {}
if 'candidate_count' in params:
msg_params['candidate_count'] = params.pop('candidate_count')
if self._is_gemini_model:
history_gemini = _parse_chat_history_gemini(messages, project=self.project)
message = history_gemini.pop()
chat = self.client.start_chat(history=history_gemini)
response = chat.send_message(message, generation_config=params)
else:
history = _parse_chat_history(messages[:-1])
examples = kwargs.get('examples') or self.examples
if examples:
params['examples'] = _parse_examples(examples)
chat = self._start_chat(history, **params)
response = chat.send_message(question.content, **msg_params)
generations = [ChatGeneration(message=AIMessage(content=r.text)) for r in
response.candidates]
return ChatResult(generations=generations)
|
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, stream:
Optional[bool]=None, **kwargs: Any) ->ChatResult:
"""Generate next turn in the conversation.
Args:
messages: The history of the conversation as a list of messages. Code chat
does not support context.
stop: The list of stop words (optional).
run_manager: The CallbackManager for LLM run, it's not used at the moment.
stream: Whether to use the streaming endpoint.
Returns:
The ChatResult that contains outputs generated by the model.
Raises:
ValueError: if the last message in the list is not from human.
"""
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(messages, stop=stop, run_manager=
run_manager, **kwargs)
return generate_from_stream(stream_iter)
question = _get_question(messages)
params = self._prepare_params(stop=stop, stream=False, **kwargs)
msg_params = {}
if 'candidate_count' in params:
msg_params['candidate_count'] = params.pop('candidate_count')
if self._is_gemini_model:
history_gemini = _parse_chat_history_gemini(messages, project=self.
project)
message = history_gemini.pop()
chat = self.client.start_chat(history=history_gemini)
response = chat.send_message(message, generation_config=params)
else:
history = _parse_chat_history(messages[:-1])
examples = kwargs.get('examples') or self.examples
if examples:
params['examples'] = _parse_examples(examples)
chat = self._start_chat(history, **params)
response = chat.send_message(question.content, **msg_params)
generations = [ChatGeneration(message=AIMessage(content=r.text)) for r in
response.candidates]
return ChatResult(generations=generations)
|
Generate next turn in the conversation.
Args:
messages: The history of the conversation as a list of messages. Code chat
does not support context.
stop: The list of stop words (optional).
run_manager: The CallbackManager for LLM run, it's not used at the moment.
stream: Whether to use the streaming endpoint.
Returns:
The ChatResult that contains outputs generated by the model.
Raises:
ValueError: if the last message in the list is not from human.
|
test_singlestoredb_filter_metadata_4
|
"""Test no matches"""
table_name = 'test_singlestoredb_filter_metadata_4'
drop(table_name)
docs = [Document(page_content=t, metadata={'index': i, 'category': 'budget'
}) for i, t in enumerate(texts)]
docsearch = SingleStoreDB.from_documents(docs, FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE, table_name=
table_name, host=TEST_SINGLESTOREDB_URL)
output = docsearch.similarity_search('foo', k=1, filter={'category':
'vacation'})
assert output == []
drop(table_name)
|
@pytest.mark.skipif(not singlestoredb_installed, reason=
'singlestoredb not installed')
def test_singlestoredb_filter_metadata_4(texts: List[str]) ->None:
"""Test no matches"""
table_name = 'test_singlestoredb_filter_metadata_4'
drop(table_name)
docs = [Document(page_content=t, metadata={'index': i, 'category':
'budget'}) for i, t in enumerate(texts)]
docsearch = SingleStoreDB.from_documents(docs, FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE, table_name=
table_name, host=TEST_SINGLESTOREDB_URL)
output = docsearch.similarity_search('foo', k=1, filter={'category':
'vacation'})
assert output == []
drop(table_name)
|
Test no matches
|
input_keys
|
"""Expect input key.
:meta private:
"""
return [self.input_key]
|
@property
def input_keys(self) ->List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
|
Expect input key.
:meta private:
|
__init__
|
"""Initialize with necessary components."""
self.embedding = embedding
self.index = index
self.docstore = docstore
self.index_to_docstore_id = index_to_docstore_id
self.distance_strategy = distance_strategy
self.override_relevance_score_fn = relevance_score_fn
self._normalize_L2 = normalize_L2
self._scann_config = scann_config
|
def __init__(self, embedding: Embeddings, index: Any, docstore: Docstore,
index_to_docstore_id: Dict[int, str], relevance_score_fn: Optional[
Callable[[float], float]]=None, normalize_L2: bool=False,
distance_strategy: DistanceStrategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
scann_config: Optional[str]=None):
"""Initialize with necessary components."""
self.embedding = embedding
self.index = index
self.docstore = docstore
self.index_to_docstore_id = index_to_docstore_id
self.distance_strategy = distance_strategy
self.override_relevance_score_fn = relevance_score_fn
self._normalize_L2 = normalize_L2
self._scann_config = scann_config
|
Initialize with necessary components.
|
__init__
|
"""Initialize the HuggingFaceDatasetLoader.
Args:
path: Path or name of the dataset.
page_content_column: Page content column name. Default is "text".
name: Name of the dataset configuration.
data_dir: Data directory of the dataset configuration.
data_files: Path(s) to source data file(s).
cache_dir: Directory to read/write data.
keep_in_memory: Whether to copy the dataset in-memory.
save_infos: Save the dataset information (checksums/size/splits/...).
Default is False.
use_auth_token: Bearer token for remote files on the Dataset Hub.
num_proc: Number of processes.
"""
self.path = path
self.page_content_column = page_content_column
self.name = name
self.data_dir = data_dir
self.data_files = data_files
self.cache_dir = cache_dir
self.keep_in_memory = keep_in_memory
self.save_infos = save_infos
self.use_auth_token = use_auth_token
self.num_proc = num_proc
|
def __init__(self, path: str, page_content_column: str='text', name:
Optional[str]=None, data_dir: Optional[str]=None, data_files: Optional[
Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]]=
None, cache_dir: Optional[str]=None, keep_in_memory: Optional[bool]=
None, save_infos: bool=False, use_auth_token: Optional[Union[bool, str]
]=None, num_proc: Optional[int]=None):
"""Initialize the HuggingFaceDatasetLoader.
Args:
path: Path or name of the dataset.
page_content_column: Page content column name. Default is "text".
name: Name of the dataset configuration.
data_dir: Data directory of the dataset configuration.
data_files: Path(s) to source data file(s).
cache_dir: Directory to read/write data.
keep_in_memory: Whether to copy the dataset in-memory.
save_infos: Save the dataset information (checksums/size/splits/...).
Default is False.
use_auth_token: Bearer token for remote files on the Dataset Hub.
num_proc: Number of processes.
"""
self.path = path
self.page_content_column = page_content_column
self.name = name
self.data_dir = data_dir
self.data_files = data_files
self.cache_dir = cache_dir
self.keep_in_memory = keep_in_memory
self.save_infos = save_infos
self.use_auth_token = use_auth_token
self.num_proc = num_proc
|
Initialize the HuggingFaceDatasetLoader.
Args:
path: Path or name of the dataset.
page_content_column: Page content column name. Default is "text".
name: Name of the dataset configuration.
data_dir: Data directory of the dataset configuration.
data_files: Path(s) to source data file(s).
cache_dir: Directory to read/write data.
keep_in_memory: Whether to copy the dataset in-memory.
save_infos: Save the dataset information (checksums/size/splits/...).
Default is False.
use_auth_token: Bearer token for remote files on the Dataset Hub.
num_proc: Number of processes.
|
add_texts
|
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
batch_size = kwargs.get('batch_size', DEFAULT_INSERT_BATCH_SIZE)
_metadatas: Union[List, Generator] = metadatas or ({} for _ in texts)
texts_batch = []
metadatas_batch = []
result_ids = []
for i, (text, metadata) in enumerate(zip(texts, _metadatas)):
texts_batch.append(text)
metadatas_batch.append(metadata)
if (i + 1) % batch_size == 0:
result_ids.extend(self._insert_texts(texts_batch, metadatas_batch))
texts_batch = []
metadatas_batch = []
if texts_batch:
result_ids.extend(self._insert_texts(texts_batch, metadatas_batch))
return result_ids
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[Dict[str,
Any]]]=None, **kwargs: Any) ->List:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
batch_size = kwargs.get('batch_size', DEFAULT_INSERT_BATCH_SIZE)
_metadatas: Union[List, Generator] = metadatas or ({} for _ in texts)
texts_batch = []
metadatas_batch = []
result_ids = []
for i, (text, metadata) in enumerate(zip(texts, _metadatas)):
texts_batch.append(text)
metadatas_batch.append(metadata)
if (i + 1) % batch_size == 0:
result_ids.extend(self._insert_texts(texts_batch, metadatas_batch))
texts_batch = []
metadatas_batch = []
if texts_batch:
result_ids.extend(self._insert_texts(texts_batch, metadatas_batch))
return result_ids
|
Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
|
_import_stackexchange_tool
|
from langchain_community.tools.stackexchange.tool import StackExchangeTool
return StackExchangeTool
|
def _import_stackexchange_tool() ->Any:
from langchain_community.tools.stackexchange.tool import StackExchangeTool
return StackExchangeTool
| null |
_load_from_object_ids
|
"""Lazily load files specified by their object_ids from a drive.
Load files into the system as binary large objects (Blobs) and return Iterable.
Args:
drive: The Drive instance from which the files are to be loaded. This Drive
instance should represent a cloud storage service or similar storage
system where the files are stored.
object_ids: A list of object_id strings. Each object_id represents a unique
identifier for a file in the drive.
Yields:
An iterator that yields Blob instances, which are binary representations of
the files loaded from the drive using the specified object_ids.
"""
file_mime_types = self._fetch_mime_types
with tempfile.TemporaryDirectory() as temp_dir:
for object_id in object_ids:
file = drive.get_item(object_id)
if not file:
logging.warning(
f"There isn't a file withobject_id {object_id} in drive {drive}."
)
continue
if file.is_file:
if file.mime_type in list(file_mime_types.values()):
file.download(to_path=temp_dir, chunk_size=self.chunk_size)
loader = FileSystemBlobLoader(path=temp_dir)
yield from loader.yield_blobs()
|
def _load_from_object_ids(self, drive: Drive, object_ids: List[str]
) ->Iterable[Blob]:
"""Lazily load files specified by their object_ids from a drive.
Load files into the system as binary large objects (Blobs) and return Iterable.
Args:
drive: The Drive instance from which the files are to be loaded. This Drive
instance should represent a cloud storage service or similar storage
system where the files are stored.
object_ids: A list of object_id strings. Each object_id represents a unique
identifier for a file in the drive.
Yields:
An iterator that yields Blob instances, which are binary representations of
the files loaded from the drive using the specified object_ids.
"""
file_mime_types = self._fetch_mime_types
with tempfile.TemporaryDirectory() as temp_dir:
for object_id in object_ids:
file = drive.get_item(object_id)
if not file:
logging.warning(
f"There isn't a file withobject_id {object_id} in drive {drive}."
)
continue
if file.is_file:
if file.mime_type in list(file_mime_types.values()):
file.download(to_path=temp_dir, chunk_size=self.chunk_size)
loader = FileSystemBlobLoader(path=temp_dir)
yield from loader.yield_blobs()
|
Lazily load files specified by their object_ids from a drive.
Load files into the system as binary large objects (Blobs) and return Iterable.
Args:
drive: The Drive instance from which the files are to be loaded. This Drive
instance should represent a cloud storage service or similar storage
system where the files are stored.
object_ids: A list of object_id strings. Each object_id represents a unique
identifier for a file in the drive.
Yields:
An iterator that yields Blob instances, which are binary representations of
the files loaded from the drive using the specified object_ids.
|
_load_map_rerank_chain
|
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
return MapRerankDocumentsChain(llm_chain=llm_chain, rank_key=rank_key,
answer_key=answer_key, document_variable_name=document_variable_name,
**kwargs)
|
def _load_map_rerank_chain(llm: BaseLanguageModel, prompt:
BasePromptTemplate=MAP_RERANK_PROMPT, verbose: bool=False,
document_variable_name: str='context', rank_key: str='score',
answer_key: str='answer', **kwargs: Any) ->MapRerankDocumentsChain:
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
return MapRerankDocumentsChain(llm_chain=llm_chain, rank_key=rank_key,
answer_key=answer_key, document_variable_name=
document_variable_name, **kwargs)
| null |
get_fallacies
|
if names is None:
return list(FALLACIES.values())
else:
return [FALLACIES[name] for name in names]
|
@classmethod
def get_fallacies(cls, names: Optional[List[str]]=None) ->List[LogicalFallacy]:
if names is None:
return list(FALLACIES.values())
else:
return [FALLACIES[name] for name in names]
| null |
format_expression
|
_left, _right = str(left), str(right)
if _left == _right == '*':
return _left
if _left == '*' != _right:
return _right
if _right == '*' != _left:
return _left
return f'({_left}{operator_str}{_right})'
|
@staticmethod
def format_expression(left: 'RedisFilterExpression', right:
'RedisFilterExpression', operator_str: str) ->str:
_left, _right = str(left), str(right)
if _left == _right == '*':
return _left
if _left == '*' != _right:
return _right
if _right == '*' != _left:
return _left
return f'({_left}{operator_str}{_right})'
| null |
_validate_client
|
if 'client' not in values:
values['client'] = nvai_common.NVEModel()
return values
|
@root_validator(pre=True)
def _validate_client(cls, values: Any) ->Any:
if 'client' not in values:
values['client'] = nvai_common.NVEModel()
return values
| null |
_openai_v1_installed
|
try:
return is_openai_v1()
except Exception as _:
return False
|
def _openai_v1_installed() ->bool:
try:
return is_openai_v1()
except Exception as _:
return False
| null |
test_openai_embedding_query
|
"""Test openai embeddings."""
document = 'foo bar'
embedding = OpenAIEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 1536
|
@pytest.mark.scheduled
def test_openai_embedding_query() ->None:
"""Test openai embeddings."""
document = 'foo bar'
embedding = OpenAIEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 1536
|
Test openai embeddings.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.