method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
---|---|---|---|
test_confluence_loader_load_data_by_page_ids | mock_confluence.get_page_by_id.side_effect = [self._get_mock_page('123'),
self._get_mock_page('456')]
mock_confluence.get_all_restrictions_for_content.side_effect = [self.
_get_mock_page_restrictions('123'), self._get_mock_page_restrictions('456')
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
mock_page_ids = ['123', '456']
documents = confluence_loader.load(page_ids=mock_page_ids)
assert mock_confluence.get_page_by_id.call_count == 2
assert mock_confluence.get_all_restrictions_for_content.call_count == 2
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == 'Content 123'
assert documents[1].page_content == 'Content 456'
assert mock_confluence.get_all_pages_from_space.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0 | def test_confluence_loader_load_data_by_page_ids(self, mock_confluence:
MagicMock) ->None:
mock_confluence.get_page_by_id.side_effect = [self._get_mock_page('123'
), self._get_mock_page('456')]
mock_confluence.get_all_restrictions_for_content.side_effect = [self.
_get_mock_page_restrictions('123'), self.
_get_mock_page_restrictions('456')]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
mock_page_ids = ['123', '456']
documents = confluence_loader.load(page_ids=mock_page_ids)
assert mock_confluence.get_page_by_id.call_count == 2
assert mock_confluence.get_all_restrictions_for_content.call_count == 2
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == 'Content 123'
assert documents[1].page_content == 'Content 456'
assert mock_confluence.get_all_pages_from_space.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0 | null |
lookup_with_id_through_llm | llm_string = get_prompts({**llm.dict(), **{'stop': stop}}, [])[1]
return self.lookup_with_id(prompt, llm_string=llm_string) | def lookup_with_id_through_llm(self, prompt: str, llm: LLM, stop: Optional[
List[str]]=None) ->Optional[Tuple[str, RETURN_VAL_TYPE]]:
llm_string = get_prompts({**llm.dict(), **{'stop': stop}}, [])[1]
return self.lookup_with_id(prompt, llm_string=llm_string) | null |
_llm_type | """Return type of chat model."""
return 'mlflow-ai-gateway-chat' | @property
def _llm_type(self) ->str:
"""Return type of chat model."""
return 'mlflow-ai-gateway-chat' | Return type of chat model. |
plan | """Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with the observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
output = self.llm_chain.run(intermediate_steps=intermediate_steps, stop=
self.stop, callbacks=callbacks, **kwargs)
return self.output_parser.parse(output) | def plan(self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks:
Callbacks=None, **kwargs: Any) ->Union[AgentAction, AgentFinish]:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with the observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
output = self.llm_chain.run(intermediate_steps=intermediate_steps, stop
=self.stop, callbacks=callbacks, **kwargs)
return self.output_parser.parse(output) | Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with the observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Action specifying what tool to use. |
test_with_types_with_type_generics | """Verify that with_types works if we use things like List[int]"""
def foo(x: int) ->None:
"""Add one to the input."""
raise NotImplementedError()
RunnableLambda(foo).with_types(output_type=List[int], input_type=List[int])
RunnableLambda(foo).with_types(output_type=Sequence[int], input_type=
Sequence[int]) | def test_with_types_with_type_generics() ->None:
"""Verify that with_types works if we use things like List[int]"""
def foo(x: int) ->None:
"""Add one to the input."""
raise NotImplementedError()
RunnableLambda(foo).with_types(output_type=List[int], input_type=List[int])
RunnableLambda(foo).with_types(output_type=Sequence[int], input_type=
Sequence[int]) | Verify that with_types works if we use things like List[int] |
visit_operation | args = [arg.accept(self) for arg in operation.arguments]
return {self._format_func(operation.operator): args} | def visit_operation(self, operation: Operation) ->Dict:
args = [arg.accept(self) for arg in operation.arguments]
return {self._format_func(operation.operator): args} | null |
lazy_load | """Lazy load text from the url(s) in web_path."""
for path in self.web_paths:
soup = self._scrape(path, bs_kwargs=self.bs_kwargs)
text = soup.get_text(**self.bs_get_text_kwargs)
metadata = _build_metadata(soup, path)
yield Document(page_content=text, metadata=metadata) | def lazy_load(self) ->Iterator[Document]:
"""Lazy load text from the url(s) in web_path."""
for path in self.web_paths:
soup = self._scrape(path, bs_kwargs=self.bs_kwargs)
text = soup.get_text(**self.bs_get_text_kwargs)
metadata = _build_metadata(soup, path)
yield Document(page_content=text, metadata=metadata) | Lazy load text from the url(s) in web_path. |
__init__ | """Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import('source_hubspot', pip_name='airbyte-source-hubspot'
).SourceHubspot
super().__init__(config=config, source_class=source_class, stream_name=
stream_name, record_handler=record_handler, state=state) | def __init__(self, config: Mapping[str, Any], stream_name: str,
record_handler: Optional[RecordHandler]=None, state: Optional[Any]=None
) ->None:
"""Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import('source_hubspot', pip_name=
'airbyte-source-hubspot').SourceHubspot
super().__init__(config=config, source_class=source_class, stream_name=
stream_name, record_handler=record_handler, state=state) | Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None. |
test_sql_database_run | """Test that commands can be run successfully and returned in correct format."""
engine = create_engine('sqlite:///:memory:')
metadata_obj.create_all(engine)
stmt = insert(user).values(user_id=13, user_name='Harrison', user_bio=
'That is my Bio ' * 24)
with engine.begin() as conn:
conn.execute(stmt)
db = SQLDatabase(engine)
command = 'select user_id, user_name, user_bio from user where user_id = 13'
partial_output = db.run(command)
user_bio = 'That is my Bio ' * 19 + 'That is my...'
expected_partial_output = f"[(13, 'Harrison', '{user_bio}')]"
assert partial_output == expected_partial_output
full_output = db.run(command, include_columns=True)
expected_full_output = (
"[{'user_id': 13, 'user_name': 'Harrison', 'user_bio': '%s'}]" % user_bio)
assert full_output == expected_full_output | def test_sql_database_run() ->None:
"""Test that commands can be run successfully and returned in correct format."""
engine = create_engine('sqlite:///:memory:')
metadata_obj.create_all(engine)
stmt = insert(user).values(user_id=13, user_name='Harrison', user_bio=
'That is my Bio ' * 24)
with engine.begin() as conn:
conn.execute(stmt)
db = SQLDatabase(engine)
command = (
'select user_id, user_name, user_bio from user where user_id = 13')
partial_output = db.run(command)
user_bio = 'That is my Bio ' * 19 + 'That is my...'
expected_partial_output = f"[(13, 'Harrison', '{user_bio}')]"
assert partial_output == expected_partial_output
full_output = db.run(command, include_columns=True)
expected_full_output = (
"[{'user_id': 13, 'user_name': 'Harrison', 'user_bio': '%s'}]" %
user_bio)
assert full_output == expected_full_output | Test that commands can be run successfully and returned in correct format. |
refresh_schema | """Refreshes the schema of the FalkorDB database"""
node_properties: List[Any] = self.query(node_properties_query)
rel_properties: List[Any] = self.query(rel_properties_query)
relationships: List[Any] = self.query(rel_query)
self.structured_schema = {'node_props': {el[0]['label']: el[0]['keys'] for
el in node_properties}, 'rel_props': {el[0]['types']: el[0]['keys'] for
el in rel_properties}, 'relationships': [el[0] for el in relationships]}
self.schema = f"""Node properties: {node_properties}
Relationships properties: {rel_properties}
Relationships: {relationships}
""" | def refresh_schema(self) ->None:
"""Refreshes the schema of the FalkorDB database"""
node_properties: List[Any] = self.query(node_properties_query)
rel_properties: List[Any] = self.query(rel_properties_query)
relationships: List[Any] = self.query(rel_query)
self.structured_schema = {'node_props': {el[0]['label']: el[0]['keys'] for
el in node_properties}, 'rel_props': {el[0]['types']: el[0]['keys'] for
el in rel_properties}, 'relationships': [el[0] for el in relationships]
}
self.schema = f"""Node properties: {node_properties}
Relationships properties: {rel_properties}
Relationships: {relationships}
""" | Refreshes the schema of the FalkorDB database |
external_import_error | if name == 'streamlit.external.langchain':
raise ImportError
return self.builtins_import(name, globals, locals, fromlist, level) | def external_import_error(name: str, globals: Any, locals: Any, fromlist:
Any, level: int) ->Any:
if name == 'streamlit.external.langchain':
raise ImportError
return self.builtins_import(name, globals, locals, fromlist, level) | null |
equals | if not isinstance(other, type(self)):
return False
return self._field == other._field and self._value == other._value | def equals(self, other: 'RedisFilterField') ->bool:
if not isinstance(other, type(self)):
return False
return self._field == other._field and self._value == other._value | null |
__init__ | self.mlflow = import_mlflow()
if 'DATABRICKS_RUNTIME_VERSION' in os.environ:
self.mlflow.set_tracking_uri('databricks')
self.mlf_expid = self.mlflow.tracking.fluent._get_experiment_id()
self.mlf_exp = self.mlflow.get_experiment(self.mlf_expid)
else:
tracking_uri = get_from_dict_or_env(kwargs, 'tracking_uri',
'MLFLOW_TRACKING_URI', '')
self.mlflow.set_tracking_uri(tracking_uri)
experiment_name = get_from_dict_or_env(kwargs, 'experiment_name',
'MLFLOW_EXPERIMENT_NAME')
self.mlf_exp = self.mlflow.get_experiment_by_name(experiment_name)
if self.mlf_exp is not None:
self.mlf_expid = self.mlf_exp.experiment_id
else:
self.mlf_expid = self.mlflow.create_experiment(experiment_name)
self.start_run(kwargs['run_name'], kwargs['run_tags']) | def __init__(self, **kwargs: Any):
self.mlflow = import_mlflow()
if 'DATABRICKS_RUNTIME_VERSION' in os.environ:
self.mlflow.set_tracking_uri('databricks')
self.mlf_expid = self.mlflow.tracking.fluent._get_experiment_id()
self.mlf_exp = self.mlflow.get_experiment(self.mlf_expid)
else:
tracking_uri = get_from_dict_or_env(kwargs, 'tracking_uri',
'MLFLOW_TRACKING_URI', '')
self.mlflow.set_tracking_uri(tracking_uri)
experiment_name = get_from_dict_or_env(kwargs, 'experiment_name',
'MLFLOW_EXPERIMENT_NAME')
self.mlf_exp = self.mlflow.get_experiment_by_name(experiment_name)
if self.mlf_exp is not None:
self.mlf_expid = self.mlf_exp.experiment_id
else:
self.mlf_expid = self.mlflow.create_experiment(experiment_name)
self.start_run(kwargs['run_name'], kwargs['run_tags']) | null |
test_default_w_embeddings_on | llm, PROMPT = setup()
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=True,
model=MockEncoderReturnsList())
chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT,
feature_embedder=feature_embedder, auto_embed=True)
str1 = '0'
str2 = '1'
ctx_str_1 = 'context1'
dot_prod = 'dotprod 0:5.0'
expected = f"""shared |User {ctx_str_1} |@ User={ctx_str_1}
|action {str1} |# action={str1} |{dot_prod}
|action {str2} |# action={str2} |{dot_prod}"""
actions = [str1, str2]
response = chain.run(User=rl_chain.BasedOn(ctx_str_1), action=rl_chain.
ToSelectFrom(actions))
selection_metadata = response['selection_metadata']
vw_str = feature_embedder.format(selection_metadata)
assert vw_str == expected | @pytest.mark.requires('vowpal_wabbit_next', 'sentence_transformers')
def test_default_w_embeddings_on() ->None:
llm, PROMPT = setup()
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=
True, model=MockEncoderReturnsList())
chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT,
feature_embedder=feature_embedder, auto_embed=True)
str1 = '0'
str2 = '1'
ctx_str_1 = 'context1'
dot_prod = 'dotprod 0:5.0'
expected = f"""shared |User {ctx_str_1} |@ User={ctx_str_1}
|action {str1} |# action={str1} |{dot_prod}
|action {str2} |# action={str2} |{dot_prod}"""
actions = [str1, str2]
response = chain.run(User=rl_chain.BasedOn(ctx_str_1), action=rl_chain.
ToSelectFrom(actions))
selection_metadata = response['selection_metadata']
vw_str = feature_embedder.format(selection_metadata)
assert vw_str == expected | null |
embed_documents | """Embed a list of documents using EdenAI.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self._generate_embeddings(texts) | def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Embed a list of documents using EdenAI.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self._generate_embeddings(texts) | Embed a list of documents using EdenAI.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text. |
embed_documents | """
Make a list of texts into a list of embedding vectors.
"""
return [self.embed_query(text) for text in texts] | def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""
Make a list of texts into a list of embedding vectors.
"""
return [self.embed_query(text) for text in texts] | Make a list of texts into a list of embedding vectors. |
_generate | if self.streaming:
raise ValueError('`streaming` option currently unsupported.')
if not self.access_token:
self._refresh_access_token_with_lock()
payload = {'messages': [_convert_message_to_dict(m) for m in messages],
'top_p': self.top_p, 'temperature': self.temperature, 'penalty_score':
self.penalty_score, 'system': self.system, **kwargs}
logger.debug(f'Payload for ernie api is {payload}')
resp = self._chat(payload)
if resp.get('error_code'):
if resp.get('error_code') == 111:
logger.debug('access_token expired, refresh it')
self._refresh_access_token_with_lock()
resp = self._chat(payload)
else:
raise ValueError(f'Error from ErnieChat api response: {resp}')
return self._create_chat_result(resp) | def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->ChatResult:
if self.streaming:
raise ValueError('`streaming` option currently unsupported.')
if not self.access_token:
self._refresh_access_token_with_lock()
payload = {'messages': [_convert_message_to_dict(m) for m in messages],
'top_p': self.top_p, 'temperature': self.temperature,
'penalty_score': self.penalty_score, 'system': self.system, **kwargs}
logger.debug(f'Payload for ernie api is {payload}')
resp = self._chat(payload)
if resp.get('error_code'):
if resp.get('error_code') == 111:
logger.debug('access_token expired, refresh it')
self._refresh_access_token_with_lock()
resp = self._chat(payload)
else:
raise ValueError(f'Error from ErnieChat api response: {resp}')
return self._create_chat_result(resp) | null |
embed_query | """Return simple embeddings."""
return [float(1.0)] * (OS_TOKEN_COUNT - 1) + [float(texts.index(text))] | def embed_query(self, text: str) ->List[float]:
"""Return simple embeddings."""
return [float(1.0)] * (OS_TOKEN_COUNT - 1) + [float(texts.index(text))] | Return simple embeddings. |
test_similarity_search_approx_with_hybrid_search | """Test end to end construction and search with metadata."""
texts = ['foo', 'bar', 'baz']
docsearch = ElasticsearchStore.from_texts(texts, FakeEmbeddings(), **
elasticsearch_connection, index_name=index_name, strategy=
ElasticsearchStore.ApproxRetrievalStrategy(hybrid=True))
def assert_query(query_body: dict, query: str) ->dict:
assert query_body == {'knn': {'field': 'vector', 'filter': [], 'k': 1,
'num_candidates': 50, 'query_vector': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 0.0]}, 'query': {'bool': {'filter': [], 'must': [{
'match': {'text': {'query': 'foo'}}}]}}, 'rank': {'rrf': {}}}
return query_body
output = docsearch.similarity_search('foo', k=1, custom_query=assert_query)
assert output == [Document(page_content='foo')] | def test_similarity_search_approx_with_hybrid_search(self,
elasticsearch_connection: dict, index_name: str) ->None:
"""Test end to end construction and search with metadata."""
texts = ['foo', 'bar', 'baz']
docsearch = ElasticsearchStore.from_texts(texts, FakeEmbeddings(), **
elasticsearch_connection, index_name=index_name, strategy=
ElasticsearchStore.ApproxRetrievalStrategy(hybrid=True))
def assert_query(query_body: dict, query: str) ->dict:
assert query_body == {'knn': {'field': 'vector', 'filter': [], 'k':
1, 'num_candidates': 50, 'query_vector': [1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 0.0]}, 'query': {'bool': {'filter': [],
'must': [{'match': {'text': {'query': 'foo'}}}]}}, 'rank': {
'rrf': {}}}
return query_body
output = docsearch.similarity_search('foo', k=1, custom_query=assert_query)
assert output == [Document(page_content='foo')] | Test end to end construction and search with metadata. |
test_max_marginal_relevance_search | """Test MRR search."""
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = DocArrayHnswSearch.from_texts(texts, FakeEmbeddings(),
metadatas=metadatas, dist_metric=metric, work_dir=str(tmp_path), n_dim=10)
output = docsearch.max_marginal_relevance_search('foo', k=2, fetch_k=3)
assert output == [Document(page_content='foo', metadata={'page': 0}),
Document(page_content='bar', metadata={'page': 1})] | @pytest.mark.parametrize('metric', ['cosine', 'l2'])
def test_max_marginal_relevance_search(metric: str, texts: List[str],
tmp_path: Path) ->None:
"""Test MRR search."""
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = DocArrayHnswSearch.from_texts(texts, FakeEmbeddings(),
metadatas=metadatas, dist_metric=metric, work_dir=str(tmp_path),
n_dim=10)
output = docsearch.max_marginal_relevance_search('foo', k=2, fetch_k=3)
assert output == [Document(page_content='foo', metadata={'page': 0}),
Document(page_content='bar', metadata={'page': 1})] | Test MRR search. |
clear | """Clear session memory from DB"""
self.collection.delete_many(filter={'session_id': self.session_id}) | def clear(self) ->None:
"""Clear session memory from DB"""
self.collection.delete_many(filter={'session_id': self.session_id}) | Clear session memory from DB |
test_debug_is_settable_directly | from langchain_core.callbacks.manager import _get_debug
import langchain
previous_value = langchain.debug
previous_fn_reading = _get_debug()
assert previous_value == previous_fn_reading
langchain.debug = not previous_value
new_value = langchain.debug
new_fn_reading = _get_debug()
try:
assert new_value != previous_value
assert new_value == new_fn_reading
assert new_value == get_debug()
finally:
set_debug(previous_value) | def test_debug_is_settable_directly() ->None:
from langchain_core.callbacks.manager import _get_debug
import langchain
previous_value = langchain.debug
previous_fn_reading = _get_debug()
assert previous_value == previous_fn_reading
langchain.debug = not previous_value
new_value = langchain.debug
new_fn_reading = _get_debug()
try:
assert new_value != previous_value
assert new_value == new_fn_reading
assert new_value == get_debug()
finally:
set_debug(previous_value) | null |
test_invoke | """Test invoke tokens from ChatAnthropicMessages."""
llm = ChatAnthropicMessages(model_name='claude-instant-1.2')
result = llm.invoke("I'm Pickle Rick", config=dict(tags=['foo']))
assert isinstance(result.content, str) | def test_invoke() ->None:
"""Test invoke tokens from ChatAnthropicMessages."""
llm = ChatAnthropicMessages(model_name='claude-instant-1.2')
result = llm.invoke("I'm Pickle Rick", config=dict(tags=['foo']))
assert isinstance(result.content, str) | Test invoke tokens from ChatAnthropicMessages. |
load | """Load given path as pages."""
return list(self.lazy_load()) | def load(self) ->List[Document]:
"""Load given path as pages."""
return list(self.lazy_load()) | Load given path as pages. |
_create_retry_decorator | """Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions"""
import google.api_core.exceptions
multiplier = 2
min_seconds = 1
max_seconds = 60
max_retries = 10
return retry(reraise=True, stop=stop_after_attempt(max_retries), wait=
wait_exponential(multiplier=multiplier, min=min_seconds, max=
max_seconds), retry=retry_if_exception_type(google.api_core.exceptions.
ResourceExhausted) | retry_if_exception_type(google.api_core.exceptions
.ServiceUnavailable) | retry_if_exception_type(google.api_core.
exceptions.GoogleAPIError), before_sleep=before_sleep_log(logger,
logging.WARNING)) | def _create_retry_decorator() ->Callable[[Any], Any]:
"""Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions"""
import google.api_core.exceptions
multiplier = 2
min_seconds = 1
max_seconds = 60
max_retries = 10
return retry(reraise=True, stop=stop_after_attempt(max_retries), wait=
wait_exponential(multiplier=multiplier, min=min_seconds, max=
max_seconds), retry=retry_if_exception_type(google.api_core.
exceptions.ResourceExhausted) | retry_if_exception_type(google.
api_core.exceptions.ServiceUnavailable) | retry_if_exception_type(
google.api_core.exceptions.GoogleAPIError), before_sleep=
before_sleep_log(logger, logging.WARNING)) | Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions |
_similarity_search_with_relevance_scores | """Return docs and relevance scores, normalized on a scale from 0 to 1.
0 is dissimilar, 1 is most similar.
"""
raise NotImplementedError() | def _similarity_search_with_relevance_scores(self, query: str, k: int=4, **
kwargs: Any) ->List[Tuple[Document, float]]:
"""Return docs and relevance scores, normalized on a scale from 0 to 1.
0 is dissimilar, 1 is most similar.
"""
raise NotImplementedError() | Return docs and relevance scores, normalized on a scale from 0 to 1.
0 is dissimilar, 1 is most similar. |
__init__ | """Initialize with domain, access_token (tenant / user), and document_id.
Args:
domain: The domain to load the LarkSuite.
access_token: The access_token to use.
document_id: The document_id to load.
"""
self.domain = domain
self.access_token = access_token
self.document_id = document_id | def __init__(self, domain: str, access_token: str, document_id: str):
"""Initialize with domain, access_token (tenant / user), and document_id.
Args:
domain: The domain to load the LarkSuite.
access_token: The access_token to use.
document_id: The document_id to load.
"""
self.domain = domain
self.access_token = access_token
self.document_id = document_id | Initialize with domain, access_token (tenant / user), and document_id.
Args:
domain: The domain to load the LarkSuite.
access_token: The access_token to use.
document_id: The document_id to load. |
flush_tracker | """Flush the tracker and reset the session.
Args:
repo (:obj:`str`, optional): Aim repository path or Repo object to which
Run object is bound. If skipped, default Repo is used.
experiment_name (:obj:`str`, optional): Sets Run's `experiment` property.
'default' if not specified. Can be used later to query runs/sequences.
system_tracking_interval (:obj:`int`, optional): Sets the tracking interval
in seconds for system usage metrics (CPU, Memory, etc.). Set to `None`
to disable system metrics tracking.
log_system_params (:obj:`bool`, optional): Enable/Disable logging of system
params such as installed packages, git info, environment variables, etc.
langchain_asset: The langchain asset to save.
reset: Whether to reset the session.
finish: Whether to finish the run.
Returns:
None
"""
if langchain_asset:
try:
for key, value in langchain_asset.dict().items():
self._run.set(key, value, strict=False)
except Exception:
pass
if finish or reset:
self._run.close()
self.reset_callback_meta()
if reset:
self.__init__(repo=repo if repo else self.repo, experiment_name=
experiment_name if experiment_name else self.experiment_name,
system_tracking_interval=system_tracking_interval if
system_tracking_interval else self.system_tracking_interval,
log_system_params=log_system_params if log_system_params else self.
log_system_params) | def flush_tracker(self, repo: Optional[str]=None, experiment_name: Optional
[str]=None, system_tracking_interval: Optional[int]=10,
log_system_params: bool=True, langchain_asset: Any=None, reset: bool=
True, finish: bool=False) ->None:
"""Flush the tracker and reset the session.
Args:
repo (:obj:`str`, optional): Aim repository path or Repo object to which
Run object is bound. If skipped, default Repo is used.
experiment_name (:obj:`str`, optional): Sets Run's `experiment` property.
'default' if not specified. Can be used later to query runs/sequences.
system_tracking_interval (:obj:`int`, optional): Sets the tracking interval
in seconds for system usage metrics (CPU, Memory, etc.). Set to `None`
to disable system metrics tracking.
log_system_params (:obj:`bool`, optional): Enable/Disable logging of system
params such as installed packages, git info, environment variables, etc.
langchain_asset: The langchain asset to save.
reset: Whether to reset the session.
finish: Whether to finish the run.
Returns:
None
"""
if langchain_asset:
try:
for key, value in langchain_asset.dict().items():
self._run.set(key, value, strict=False)
except Exception:
pass
if finish or reset:
self._run.close()
self.reset_callback_meta()
if reset:
self.__init__(repo=repo if repo else self.repo, experiment_name=
experiment_name if experiment_name else self.experiment_name,
system_tracking_interval=system_tracking_interval if
system_tracking_interval else self.system_tracking_interval,
log_system_params=log_system_params if log_system_params else
self.log_system_params) | Flush the tracker and reset the session.
Args:
repo (:obj:`str`, optional): Aim repository path or Repo object to which
Run object is bound. If skipped, default Repo is used.
experiment_name (:obj:`str`, optional): Sets Run's `experiment` property.
'default' if not specified. Can be used later to query runs/sequences.
system_tracking_interval (:obj:`int`, optional): Sets the tracking interval
in seconds for system usage metrics (CPU, Memory, etc.). Set to `None`
to disable system metrics tracking.
log_system_params (:obj:`bool`, optional): Enable/Disable logging of system
params such as installed packages, git info, environment variables, etc.
langchain_asset: The langchain asset to save.
reset: Whether to reset the session.
finish: Whether to finish the run.
Returns:
None |
_llm_type | """Return type of llm."""
return 'deepsparse' | @property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'deepsparse' | Return type of llm. |
_result_to_document | main_meta = {'title': outline_res['document']['title'], 'source': self.
outline_instance_url + outline_res['document']['url']}
add_meta = {'id': outline_res['document']['id'], 'ranking': outline_res[
'ranking'], 'collection_id': outline_res['document']['collectionId'],
'parent_document_id': outline_res['document']['parentDocumentId'],
'revision': outline_res['document']['revision'], 'created_by':
outline_res['document']['createdBy']['name']
} if self.load_all_available_meta else {}
doc = Document(page_content=outline_res['document']['text'][:self.
doc_content_chars_max], metadata={**main_meta, **add_meta})
return doc | def _result_to_document(self, outline_res: Any) ->Document:
main_meta = {'title': outline_res['document']['title'], 'source': self.
outline_instance_url + outline_res['document']['url']}
add_meta = {'id': outline_res['document']['id'], 'ranking': outline_res
['ranking'], 'collection_id': outline_res['document'][
'collectionId'], 'parent_document_id': outline_res['document'][
'parentDocumentId'], 'revision': outline_res['document']['revision'
], 'created_by': outline_res['document']['createdBy']['name']
} if self.load_all_available_meta else {}
doc = Document(page_content=outline_res['document']['text'][:self.
doc_content_chars_max], metadata={**main_meta, **add_meta})
return doc | null |
_build_llm_df | base_df_fields = [field for field in base_df_fields if field in base_df]
rename_map = {map_entry_k: map_entry_v for map_entry_k, map_entry_v in
rename_map.items() if map_entry_k in base_df_fields}
llm_df = base_df[base_df_fields].dropna(axis=1)
if rename_map:
llm_df = llm_df.rename(rename_map, axis=1)
return llm_df | @staticmethod
def _build_llm_df(base_df: pd.DataFrame, base_df_fields: Sequence,
rename_map: Mapping) ->pd.DataFrame:
base_df_fields = [field for field in base_df_fields if field in base_df]
rename_map = {map_entry_k: map_entry_v for map_entry_k, map_entry_v in
rename_map.items() if map_entry_k in base_df_fields}
llm_df = base_df[base_df_fields].dropna(axis=1)
if rename_map:
llm_df = llm_df.rename(rename_map, axis=1)
return llm_df | null |
load_suggestions | """Load suggestions.
Args:
query: A query string
doc_type: The type of document to search for. Can be one of "all",
"device", "guide", "teardown", "answer", "wiki".
Returns:
"""
res = requests.get(IFIXIT_BASE_URL + '/suggest/' + query + '?doctypes=' +
doc_type)
if res.status_code != 200:
raise ValueError('Could not load suggestions for "' + query + '"\n' +
res.json())
data = res.json()
results = data['results']
output = []
for result in results:
try:
loader = IFixitLoader(result['url'])
if loader.page_type == 'Device':
output += loader.load_device(include_guides=False)
else:
output += loader.load()
except ValueError:
continue
return output | @staticmethod
def load_suggestions(query: str='', doc_type: str='all') ->List[Document]:
"""Load suggestions.
Args:
query: A query string
doc_type: The type of document to search for. Can be one of "all",
"device", "guide", "teardown", "answer", "wiki".
Returns:
"""
res = requests.get(IFIXIT_BASE_URL + '/suggest/' + query + '?doctypes=' +
doc_type)
if res.status_code != 200:
raise ValueError('Could not load suggestions for "' + query + '"\n' +
res.json())
data = res.json()
results = data['results']
output = []
for result in results:
try:
loader = IFixitLoader(result['url'])
if loader.page_type == 'Device':
output += loader.load_device(include_guides=False)
else:
output += loader.load()
except ValueError:
continue
return output | Load suggestions.
Args:
query: A query string
doc_type: The type of document to search for. Can be one of "all",
"device", "guide", "teardown", "answer", "wiki".
Returns: |
setup_class | assert os.getenv('XATA_API_KEY'
), 'XATA_API_KEY environment variable is not set'
assert os.getenv('XATA_DB_URL'), 'XATA_DB_URL environment variable is not set' | @classmethod
def setup_class(cls) ->None:
assert os.getenv('XATA_API_KEY'
), 'XATA_API_KEY environment variable is not set'
assert os.getenv('XATA_DB_URL'
), 'XATA_DB_URL environment variable is not set' | null |
test_load_full_confluence_space | loader = ConfluenceLoader(url='https://templates.atlassian.net/wiki/')
docs = loader.load(space_key='RD')
assert len(docs) == 14
assert docs[0].page_content is not None | @pytest.mark.skipif(not confluence_installed, reason=
'Atlassian package not installed')
def test_load_full_confluence_space() ->None:
loader = ConfluenceLoader(url='https://templates.atlassian.net/wiki/')
docs = loader.load(space_key='RD')
assert len(docs) == 14
assert docs[0].page_content is not None | null |
similarity_search_with_relevance_scores | """Perform a similarity search with ClickHouse
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of (Document, similarity)
"""
q_str = self._build_query_sql(self.embedding_function.embed_query(query), k,
where_str)
try:
return [(Document(page_content=r[self.config.column_map['document']],
metadata=r[self.config.column_map['metadata']]), r['dist']) for r in
self.client.query(q_str).named_results()]
except Exception as e:
logger.error(f'\x1b[91m\x1b[1m{type(e)}\x1b[0m \x1b[95m{str(e)}\x1b[0m')
return [] | def similarity_search_with_relevance_scores(self, query: str, k: int=4,
where_str: Optional[str]=None, **kwargs: Any) ->List[Tuple[Document, float]
]:
"""Perform a similarity search with ClickHouse
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of (Document, similarity)
"""
q_str = self._build_query_sql(self.embedding_function.embed_query(query
), k, where_str)
try:
return [(Document(page_content=r[self.config.column_map['document']
], metadata=r[self.config.column_map['metadata']]), r['dist']) for
r in self.client.query(q_str).named_results()]
except Exception as e:
logger.error(f'\x1b[91m\x1b[1m{type(e)}\x1b[0m \x1b[95m{str(e)}\x1b[0m'
)
return [] | Perform a similarity search with ClickHouse
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of (Document, similarity) |
_call | _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
_table_names = self.sql_chain.database.get_usable_table_names()
table_names = ', '.join(_table_names)
llm_inputs = {'query': inputs[self.input_key], 'table_names': table_names}
_lowercased_table_names = [name.lower() for name in _table_names]
table_names_from_chain = self.decider_chain.predict_and_parse(**llm_inputs)
table_names_to_use = [name for name in table_names_from_chain if name.lower
() in _lowercased_table_names]
_run_manager.on_text('Table names to use:', end='\n', verbose=self.verbose)
_run_manager.on_text(str(table_names_to_use), color='yellow', verbose=self.
verbose)
new_inputs = {self.sql_chain.input_key: inputs[self.input_key],
'table_names_to_use': table_names_to_use}
return self.sql_chain(new_inputs, callbacks=_run_manager.get_child(),
return_only_outputs=True) | def _call(self, inputs: Dict[str, Any], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
_table_names = self.sql_chain.database.get_usable_table_names()
table_names = ', '.join(_table_names)
llm_inputs = {'query': inputs[self.input_key], 'table_names': table_names}
_lowercased_table_names = [name.lower() for name in _table_names]
table_names_from_chain = self.decider_chain.predict_and_parse(**llm_inputs)
table_names_to_use = [name for name in table_names_from_chain if name.
lower() in _lowercased_table_names]
_run_manager.on_text('Table names to use:', end='\n', verbose=self.verbose)
_run_manager.on_text(str(table_names_to_use), color='yellow', verbose=
self.verbose)
new_inputs = {self.sql_chain.input_key: inputs[self.input_key],
'table_names_to_use': table_names_to_use}
return self.sql_chain(new_inputs, callbacks=_run_manager.get_child(),
return_only_outputs=True) | null |
input_keys | """Expect url and browser content.
:meta private:
"""
return [self.input_url_key, self.input_browser_content_key] | @property
def input_keys(self) ->List[str]:
"""Expect url and browser content.
:meta private:
"""
return [self.input_url_key, self.input_browser_content_key] | Expect url and browser content.
:meta private: |
stream | yield from self.transform(iter([input]), config) | def stream(self, input: Input, config: Optional[RunnableConfig]=None, **
kwargs: Optional[Any]) ->Iterator[Dict[str, Any]]:
yield from self.transform(iter([input]), config) | null |
ignore_agent | """Whether to ignore agent callbacks."""
return self.ignore_agent_ | @property
def ignore_agent(self) ->bool:
"""Whether to ignore agent callbacks."""
return self.ignore_agent_ | Whether to ignore agent callbacks. |
_call | """Call out to Aleph Alpha's completion endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = aleph_alpha("Tell me a joke.")
"""
from aleph_alpha_client import CompletionRequest, Prompt
params = self._default_params
if self.stop_sequences is not None and stop is not None:
raise ValueError(
'stop sequences found in both the input and default params.')
elif self.stop_sequences is not None:
params['stop_sequences'] = self.stop_sequences
else:
params['stop_sequences'] = stop
params = {**params, **kwargs}
request = CompletionRequest(prompt=Prompt.from_text(prompt), **params)
response = self.client.complete(model=self.model, request=request)
text = response.completions[0].completion
if stop is not None or self.stop_sequences is not None:
text = enforce_stop_tokens(text, params['stop_sequences'])
return text | def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call out to Aleph Alpha's completion endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = aleph_alpha("Tell me a joke.")
"""
from aleph_alpha_client import CompletionRequest, Prompt
params = self._default_params
if self.stop_sequences is not None and stop is not None:
raise ValueError(
'stop sequences found in both the input and default params.')
elif self.stop_sequences is not None:
params['stop_sequences'] = self.stop_sequences
else:
params['stop_sequences'] = stop
params = {**params, **kwargs}
request = CompletionRequest(prompt=Prompt.from_text(prompt), **params)
response = self.client.complete(model=self.model, request=request)
text = response.completions[0].completion
if stop is not None or self.stop_sequences is not None:
text = enforce_stop_tokens(text, params['stop_sequences'])
return text | Call out to Aleph Alpha's completion endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = aleph_alpha("Tell me a joke.") |
test_python_repl_tool_single_input | """Test that the python REPL tool works with a single input."""
tool = PythonREPLTool()
assert tool.is_single_input
assert int(tool.run('print(1 + 1)').strip()) == 2 | def test_python_repl_tool_single_input() ->None:
"""Test that the python REPL tool works with a single input."""
tool = PythonREPLTool()
assert tool.is_single_input
assert int(tool.run('print(1 + 1)').strip()) == 2 | Test that the python REPL tool works with a single input. |
_format_tweets | """Format tweets into a string."""
for tweet in tweets:
metadata = {'created_at': tweet['created_at'], 'user_info': user_info}
yield Document(page_content=tweet['text'], metadata=metadata) | def _format_tweets(self, tweets: List[Dict[str, Any]], user_info: dict
) ->Iterable[Document]:
"""Format tweets into a string."""
for tweet in tweets:
metadata = {'created_at': tweet['created_at'], 'user_info': user_info}
yield Document(page_content=tweet['text'], metadata=metadata) | Format tweets into a string. |
__init__ | self.value = value
self.keep = keep | def __init__(self, value: Any, keep: bool=False):
self.value = value
self.keep = keep | null |
test_tracer_chain_run | """Test tracer on a Chain run."""
uuid = uuid4()
compare_run = Run(id=str(uuid), start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc), events=[{'name': 'start', 'time':
datetime.now(timezone.utc)}, {'name': 'end', 'time': datetime.now(
timezone.utc)}], extra={}, execution_order=1, child_execution_order=1,
serialized={'name': 'chain'}, inputs={}, outputs={}, error=None,
run_type='chain', trace_id=uuid, dotted_order=
f'20230101T000000000000Z{uuid}')
tracer = FakeTracer()
tracer.on_chain_start(serialized={'name': 'chain'}, inputs={}, run_id=uuid)
tracer.on_chain_end(outputs={}, run_id=uuid)
assert tracer.runs == [compare_run] | @freeze_time('2023-01-01')
def test_tracer_chain_run() ->None:
"""Test tracer on a Chain run."""
uuid = uuid4()
compare_run = Run(id=str(uuid), start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc), events=[{'name': 'start',
'time': datetime.now(timezone.utc)}, {'name': 'end', 'time':
datetime.now(timezone.utc)}], extra={}, execution_order=1,
child_execution_order=1, serialized={'name': 'chain'}, inputs={},
outputs={}, error=None, run_type='chain', trace_id=uuid,
dotted_order=f'20230101T000000000000Z{uuid}')
tracer = FakeTracer()
tracer.on_chain_start(serialized={'name': 'chain'}, inputs={}, run_id=uuid)
tracer.on_chain_end(outputs={}, run_id=uuid)
assert tracer.runs == [compare_run] | Test tracer on a Chain run. |
test__collapse_docs_one_doc | """Test collapse documents functionality when only one document present."""
docs = [Document(page_content='foo')]
output = collapse_docs(docs, _fake_combine_docs_func)
assert output == docs[0]
docs = [Document(page_content='foo', metadata={'source': 'a'})]
output = collapse_docs(docs, _fake_combine_docs_func)
assert output == docs[0] | def test__collapse_docs_one_doc() ->None:
"""Test collapse documents functionality when only one document present."""
docs = [Document(page_content='foo')]
output = collapse_docs(docs, _fake_combine_docs_func)
assert output == docs[0]
docs = [Document(page_content='foo', metadata={'source': 'a'})]
output = collapse_docs(docs, _fake_combine_docs_func)
assert output == docs[0] | Test collapse documents functionality when only one document present. |
create | models = importlib.import_module('langchain.chat_models')
model_cls = getattr(models, provider)
model_config = model_cls(**kwargs)
converted_messages = convert_openai_messages(messages)
if not stream:
result = model_config.invoke(converted_messages)
return ChatCompletions(choices=[Choice(message=convert_message_to_dict(
result))])
else:
return (ChatCompletionChunk(choices=[ChoiceChunk(delta=
_convert_message_chunk(c, i))]) for i, c in enumerate(model_config.
stream(converted_messages))) | @staticmethod
def create(messages: Sequence[Dict[str, Any]], *, provider: str=
'ChatOpenAI', stream: bool=False, **kwargs: Any) ->Union[
ChatCompletions, Iterable]:
models = importlib.import_module('langchain.chat_models')
model_cls = getattr(models, provider)
model_config = model_cls(**kwargs)
converted_messages = convert_openai_messages(messages)
if not stream:
result = model_config.invoke(converted_messages)
return ChatCompletions(choices=[Choice(message=
convert_message_to_dict(result))])
else:
return (ChatCompletionChunk(choices=[ChoiceChunk(delta=
_convert_message_chunk(c, i))]) for i, c in enumerate(
model_config.stream(converted_messages))) | null |
_create_chat_result | generations = []
for res in response.choices:
message = convert_dict_to_message({'role': 'assistant', 'content': res.
text})
gen = ChatGeneration(message=message, generation_info=dict(
finish_reason=res.finish_reason))
generations.append(gen)
llm_output = {'token_usage': response.meta, 'model': response.model}
return ChatResult(generations=generations, llm_output=llm_output) | def _create_chat_result(self, response: GenerationResponse) ->ChatResult:
generations = []
for res in response.choices:
message = convert_dict_to_message({'role': 'assistant', 'content':
res.text})
gen = ChatGeneration(message=message, generation_info=dict(
finish_reason=res.finish_reason))
generations.append(gen)
llm_output = {'token_usage': response.meta, 'model': response.model}
return ChatResult(generations=generations, llm_output=llm_output) | null |
test_chat_bedrock_generate | """Test BedrockChat wrapper with generate."""
message = HumanMessage(content='Hello')
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content | @pytest.mark.scheduled
def test_chat_bedrock_generate(chat: BedrockChat) ->None:
"""Test BedrockChat wrapper with generate."""
message = HumanMessage(content='Hello')
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content | Test BedrockChat wrapper with generate. |
test_serialization | """Test serialization."""
from langchain.chains.loading import load_chain
with TemporaryDirectory() as temp_dir:
file = temp_dir + '/llm.json'
fake_llm_chain.save(file)
loaded_chain = load_chain(file)
assert loaded_chain == fake_llm_chain | @patch('langchain_community.llms.loading.get_type_to_cls_dict', lambda : {
'fake': lambda : FakeLLM})
def test_serialization(fake_llm_chain: LLMChain) ->None:
"""Test serialization."""
from langchain.chains.loading import load_chain
with TemporaryDirectory() as temp_dir:
file = temp_dir + '/llm.json'
fake_llm_chain.save(file)
loaded_chain = load_chain(file)
assert loaded_chain == fake_llm_chain | Test serialization. |
test_openlm_call | """Test valid call to openlm."""
llm = OpenLM(model_name='dolly-v2-7b', max_tokens=10)
output = llm(prompt='Say foo:')
assert isinstance(output, str) | def test_openlm_call() ->None:
"""Test valid call to openlm."""
llm = OpenLM(model_name='dolly-v2-7b', max_tokens=10)
output = llm(prompt='Say foo:')
assert isinstance(output, str) | Test valid call to openlm. |
test_character_text_splitter_keep_separator_regex | """Test splitting by characters while keeping the separator
that is a regex special character.
"""
text = 'foo.bar.baz.123'
splitter = CharacterTextSplitter(separator=separator, chunk_size=1,
chunk_overlap=0, keep_separator=True, is_separator_regex=is_separator_regex
)
output = splitter.split_text(text)
expected_output = ['foo', '.bar', '.baz', '.123']
assert output == expected_output | @pytest.mark.parametrize('separator, is_separator_regex', [(re.escape('.'),
True), ('.', False)])
def test_character_text_splitter_keep_separator_regex(separator: str,
is_separator_regex: bool) ->None:
"""Test splitting by characters while keeping the separator
that is a regex special character.
"""
text = 'foo.bar.baz.123'
splitter = CharacterTextSplitter(separator=separator, chunk_size=1,
chunk_overlap=0, keep_separator=True, is_separator_regex=
is_separator_regex)
output = splitter.split_text(text)
expected_output = ['foo', '.bar', '.baz', '.123']
assert output == expected_output | Test splitting by characters while keeping the separator
that is a regex special character. |
test_escaping_lucene | """Test escaping lucene characters"""
assert remove_lucene_chars('Hello+World') == 'Hello World'
assert remove_lucene_chars('Hello World\\') == 'Hello World'
assert remove_lucene_chars('It is the end of the world. Take shelter!'
) == 'It is the end of the world. Take shelter'
assert remove_lucene_chars('It is the end of the world. Take shelter&&'
) == 'It is the end of the world. Take shelter'
assert remove_lucene_chars('Bill&&Melinda Gates Foundation'
) == 'Bill Melinda Gates Foundation'
assert remove_lucene_chars('It is the end of the world. Take shelter(&&)'
) == 'It is the end of the world. Take shelter'
assert remove_lucene_chars('It is the end of the world. Take shelter??'
) == 'It is the end of the world. Take shelter'
assert remove_lucene_chars('It is the end of the world. Take shelter^'
) == 'It is the end of the world. Take shelter'
assert remove_lucene_chars('It is the end of the world. Take shelter+'
) == 'It is the end of the world. Take shelter'
assert remove_lucene_chars('It is the end of the world. Take shelter-'
) == 'It is the end of the world. Take shelter'
assert remove_lucene_chars('It is the end of the world. Take shelter~'
) == 'It is the end of the world. Take shelter' | def test_escaping_lucene() ->None:
"""Test escaping lucene characters"""
assert remove_lucene_chars('Hello+World') == 'Hello World'
assert remove_lucene_chars('Hello World\\') == 'Hello World'
assert remove_lucene_chars('It is the end of the world. Take shelter!'
) == 'It is the end of the world. Take shelter'
assert remove_lucene_chars('It is the end of the world. Take shelter&&'
) == 'It is the end of the world. Take shelter'
assert remove_lucene_chars('Bill&&Melinda Gates Foundation'
) == 'Bill Melinda Gates Foundation'
assert remove_lucene_chars('It is the end of the world. Take shelter(&&)'
) == 'It is the end of the world. Take shelter'
assert remove_lucene_chars('It is the end of the world. Take shelter??'
) == 'It is the end of the world. Take shelter'
assert remove_lucene_chars('It is the end of the world. Take shelter^'
) == 'It is the end of the world. Take shelter'
assert remove_lucene_chars('It is the end of the world. Take shelter+'
) == 'It is the end of the world. Take shelter'
assert remove_lucene_chars('It is the end of the world. Take shelter-'
) == 'It is the end of the world. Take shelter'
assert remove_lucene_chars('It is the end of the world. Take shelter~'
) == 'It is the end of the world. Take shelter' | Test escaping lucene characters |
stop_cb | """callback that stop continuous recognition"""
speech_recognizer.stop_continuous_recognition_async()
nonlocal done
done = True | def stop_cb(evt: Any) ->None:
"""callback that stop continuous recognition"""
speech_recognizer.stop_continuous_recognition_async()
nonlocal done
done = True | callback that stop continuous recognition |
test_graph_cypher_qa_chain_prompt_selection_5 | qa_prompt_template = 'QA Prompt'
cypher_prompt_template = 'Cypher Prompt'
memory = ConversationBufferMemory(memory_key='chat_history')
readonlymemory = ReadOnlySharedMemory(memory=memory)
qa_prompt = PromptTemplate(template=qa_prompt_template, input_variables=[])
cypher_prompt = PromptTemplate(template=cypher_prompt_template,
input_variables=[])
try:
GraphCypherQAChain.from_llm(llm=FakeLLM(), graph=FakeGraphStore(),
verbose=True, return_intermediate_steps=False, qa_prompt=qa_prompt,
cypher_prompt=cypher_prompt, cypher_llm_kwargs={'memory':
readonlymemory}, qa_llm_kwargs={'memory': readonlymemory})
assert False
except ValueError:
assert True | def test_graph_cypher_qa_chain_prompt_selection_5() ->None:
qa_prompt_template = 'QA Prompt'
cypher_prompt_template = 'Cypher Prompt'
memory = ConversationBufferMemory(memory_key='chat_history')
readonlymemory = ReadOnlySharedMemory(memory=memory)
qa_prompt = PromptTemplate(template=qa_prompt_template, input_variables=[])
cypher_prompt = PromptTemplate(template=cypher_prompt_template,
input_variables=[])
try:
GraphCypherQAChain.from_llm(llm=FakeLLM(), graph=FakeGraphStore(),
verbose=True, return_intermediate_steps=False, qa_prompt=
qa_prompt, cypher_prompt=cypher_prompt, cypher_llm_kwargs={
'memory': readonlymemory}, qa_llm_kwargs={'memory': readonlymemory}
)
assert False
except ValueError:
assert True | null |
_get_relevant_documents | """
Get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of reranked documents.
"""
fused_documents = self.rank_fusion(query, run_manager)
return fused_documents | def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
"""
Get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of reranked documents.
"""
fused_documents = self.rank_fusion(query, run_manager)
return fused_documents | Get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of reranked documents. |
_similarity_search_with_relevance_scores | docs_dists = self.similarity_search_with_score(query, k=k, **kwargs)
docs, dists = zip(*docs_dists)
scores = [(1 / math.exp(dist)) for dist in dists]
return list(zip(list(docs), scores)) | def _similarity_search_with_relevance_scores(self, query: str, k: int=
DEFAULT_K, **kwargs: Any) ->List[Tuple[Document, float]]:
docs_dists = self.similarity_search_with_score(query, k=k, **kwargs)
docs, dists = zip(*docs_dists)
scores = [(1 / math.exp(dist)) for dist in dists]
return list(zip(list(docs), scores)) | null |
_import_spark_sql_tool_BaseSparkSQLTool | from langchain_community.tools.spark_sql.tool import BaseSparkSQLTool
return BaseSparkSQLTool | def _import_spark_sql_tool_BaseSparkSQLTool() ->Any:
from langchain_community.tools.spark_sql.tool import BaseSparkSQLTool
return BaseSparkSQLTool | null |
_evaluate_agent_trajectory | """Evaluate a trajectory.
Args:
prediction (str): The final predicted response.
agent_trajectory (List[Tuple[AgentAction, str]]):
The intermediate steps forming the agent trajectory.
input (str): The input to the agent.
reference (Optional[str]): The reference answer.
Returns:
dict: The evaluation result.
""" | @abstractmethod
def _evaluate_agent_trajectory(self, *, prediction: str, agent_trajectory:
Sequence[Tuple[AgentAction, str]], input: str, reference: Optional[str]
=None, **kwargs: Any) ->dict:
"""Evaluate a trajectory.
Args:
prediction (str): The final predicted response.
agent_trajectory (List[Tuple[AgentAction, str]]):
The intermediate steps forming the agent trajectory.
input (str): The input to the agent.
reference (Optional[str]): The reference answer.
Returns:
dict: The evaluation result.
""" | Evaluate a trajectory.
Args:
prediction (str): The final predicted response.
agent_trajectory (List[Tuple[AgentAction, str]]):
The intermediate steps forming the agent trajectory.
input (str): The input to the agent.
reference (Optional[str]): The reference answer.
Returns:
dict: The evaluation result. |
_model_default_factory | try:
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
except ImportError as e:
raise ImportError(
'Cannot import transformers, please install with `pip install transformers`.'
) from e
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
return pipeline('text-classification', model=model, tokenizer=tokenizer,
max_length=512, truncation=True) | def _model_default_factory(model_name: str=
'laiyer/deberta-v3-base-prompt-injection') ->Pipeline:
try:
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
except ImportError as e:
raise ImportError(
'Cannot import transformers, please install with `pip install transformers`.'
) from e
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
return pipeline('text-classification', model=model, tokenizer=tokenizer,
max_length=512, truncation=True) | null |
from_documents | texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return cls.from_texts(texts, embedding, metadatas, index_name, content_key,
metadata_key, **kwargs) | @classmethod
def from_documents(cls, documents: List[Document], embedding: Embeddings,
metadatas: Optional[List[dict]]=None, index_name: str='langchain',
content_key: str='content', metadata_key: str='metadata', **kwargs: Any
) ->Tair:
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return cls.from_texts(texts, embedding, metadatas, index_name,
content_key, metadata_key, **kwargs) | null |
__init__ | """Initialize a PALValidation instance.
Args:
solution_expression_name (str): Name of the expected solution expression.
If passed, solution_expression_type must be passed as well.
solution_expression_type (str): AST type of the expected solution
expression. If passed, solution_expression_name must be passed as well.
Must be one of PALValidation.SOLUTION_EXPRESSION_TYPE_FUNCTION,
PALValidation.SOLUTION_EXPRESSION_TYPE_VARIABLE.
allow_imports (bool): Allow import statements.
allow_command_exec (bool): Allow using known command execution functions.
"""
self.solution_expression_name = solution_expression_name
self.solution_expression_type = solution_expression_type
if solution_expression_name is not None:
if not isinstance(self.solution_expression_name, str):
raise ValueError(
f'Expected solution_expression_name to be str, instead found {type(self.solution_expression_name)}'
)
if solution_expression_type is not None:
if (self.solution_expression_type is not self.
SOLUTION_EXPRESSION_TYPE_FUNCTION and self.solution_expression_type
is not self.SOLUTION_EXPRESSION_TYPE_VARIABLE):
raise ValueError(
f'Expected solution_expression_type to be one of ({self.SOLUTION_EXPRESSION_TYPE_FUNCTION},{self.SOLUTION_EXPRESSION_TYPE_VARIABLE}),instead found {self.solution_expression_type}'
)
if solution_expression_name is not None and solution_expression_type is None:
raise TypeError(
'solution_expression_name requires solution_expression_type to be passed as well'
)
if solution_expression_name is None and solution_expression_type is not None:
raise TypeError(
'solution_expression_type requires solution_expression_name to be passed as well'
)
self.allow_imports = allow_imports
self.allow_command_exec = allow_command_exec | def __init__(self, solution_expression_name: Optional[str]=None,
solution_expression_type: Optional[type]=None, allow_imports: bool=
False, allow_command_exec: bool=False):
"""Initialize a PALValidation instance.
Args:
solution_expression_name (str): Name of the expected solution expression.
If passed, solution_expression_type must be passed as well.
solution_expression_type (str): AST type of the expected solution
expression. If passed, solution_expression_name must be passed as well.
Must be one of PALValidation.SOLUTION_EXPRESSION_TYPE_FUNCTION,
PALValidation.SOLUTION_EXPRESSION_TYPE_VARIABLE.
allow_imports (bool): Allow import statements.
allow_command_exec (bool): Allow using known command execution functions.
"""
self.solution_expression_name = solution_expression_name
self.solution_expression_type = solution_expression_type
if solution_expression_name is not None:
if not isinstance(self.solution_expression_name, str):
raise ValueError(
f'Expected solution_expression_name to be str, instead found {type(self.solution_expression_name)}'
)
if solution_expression_type is not None:
if (self.solution_expression_type is not self.
SOLUTION_EXPRESSION_TYPE_FUNCTION and self.
solution_expression_type is not self.
SOLUTION_EXPRESSION_TYPE_VARIABLE):
raise ValueError(
f'Expected solution_expression_type to be one of ({self.SOLUTION_EXPRESSION_TYPE_FUNCTION},{self.SOLUTION_EXPRESSION_TYPE_VARIABLE}),instead found {self.solution_expression_type}'
)
if (solution_expression_name is not None and solution_expression_type is
None):
raise TypeError(
'solution_expression_name requires solution_expression_type to be passed as well'
)
if (solution_expression_name is None and solution_expression_type is not
None):
raise TypeError(
'solution_expression_type requires solution_expression_name to be passed as well'
)
self.allow_imports = allow_imports
self.allow_command_exec = allow_command_exec | Initialize a PALValidation instance.
Args:
solution_expression_name (str): Name of the expected solution expression.
If passed, solution_expression_type must be passed as well.
solution_expression_type (str): AST type of the expected solution
expression. If passed, solution_expression_name must be passed as well.
Must be one of PALValidation.SOLUTION_EXPRESSION_TYPE_FUNCTION,
PALValidation.SOLUTION_EXPRESSION_TYPE_VARIABLE.
allow_imports (bool): Allow import statements.
allow_command_exec (bool): Allow using known command execution functions. |
_load_prompt | return create_prompt(num_plates=request.num_plates, num_rows=request.
num_rows, num_cols=request.num_cols) | def _load_prompt(request: FileProcessingRequest):
return create_prompt(num_plates=request.num_plates, num_rows=request.
num_rows, num_cols=request.num_cols) | null |
test_placeholder | """Used for compiling integration tests without running any real tests."""
pass | @pytest.mark.compile
def test_placeholder() ->None:
"""Used for compiling integration tests without running any real tests."""
pass | Used for compiling integration tests without running any real tests. |
validate_search_type | """Validate search type."""
if 'search_type' in values:
search_type = values['search_type']
if search_type not in ('similarity', 'hybrid', 'semantic_hybrid'):
raise ValueError(f'search_type of {search_type} not allowed.')
return values | @root_validator()
def validate_search_type(cls, values: Dict) ->Dict:
"""Validate search type."""
if 'search_type' in values:
search_type = values['search_type']
if search_type not in ('similarity', 'hybrid', 'semantic_hybrid'):
raise ValueError(f'search_type of {search_type} not allowed.')
return values | Validate search type. |
_collection_exists | """Checks whether a collection exists for this message history"""
try:
self.client.Collections.get(collection=self.collection)
except self.rockset.exceptions.NotFoundException:
return False
return True | def _collection_exists(self) ->bool:
"""Checks whether a collection exists for this message history"""
try:
self.client.Collections.get(collection=self.collection)
except self.rockset.exceptions.NotFoundException:
return False
return True | Checks whether a collection exists for this message history |
_import_json_tool_JsonGetValueTool | from langchain_community.tools.json.tool import JsonGetValueTool
return JsonGetValueTool | def _import_json_tool_JsonGetValueTool() ->Any:
from langchain_community.tools.json.tool import JsonGetValueTool
return JsonGetValueTool | null |
parse_filename | """Parse the filename from an url.
Args:
url: Url to parse the filename from.
Returns:
The filename.
"""
if (url_path := Path(url)) and url_path.suffix == '.pdf':
return url_path.name
else:
return self._parse_filename_from_url(url) | def parse_filename(self, url: str) ->str:
"""Parse the filename from an url.
Args:
url: Url to parse the filename from.
Returns:
The filename.
"""
if (url_path := Path(url)) and url_path.suffix == '.pdf':
return url_path.name
else:
return self._parse_filename_from_url(url) | Parse the filename from an url.
Args:
url: Url to parse the filename from.
Returns:
The filename. |
embeddings | return self.embedding_function | @property
def embeddings(self) ->Embeddings:
return self.embedding_function | null |
test_openai_invalid_model_kwargs | with pytest.raises(ValueError):
OpenAIEmbeddings(model_kwargs={'model': 'foo'}) | @pytest.mark.requires('openai')
def test_openai_invalid_model_kwargs() ->None:
with pytest.raises(ValueError):
OpenAIEmbeddings(model_kwargs={'model': 'foo'}) | null |
json | return {'status': 'ok', 'payload': base64.b64encode(bytes(
'{"some": "data"}}', 'utf-8'))} | def json(self) ->Any:
return {'status': 'ok', 'payload': base64.b64encode(bytes(
'{"some": "data"}}', 'utf-8'))} | null |
run | """Run query through Searx API and parse results.
You can pass any other params to the searx query API.
Args:
query: The query to search for.
query_suffix: Extra suffix appended to the query.
engines: List of engines to use for the query.
categories: List of categories to use for the query.
**kwargs: extra parameters to pass to the searx API.
Returns:
str: The result of the query.
Raises:
ValueError: If an error occurred with the query.
Example:
This will make a query to the qwant engine:
.. code-block:: python
from langchain_community.utilities import SearxSearchWrapper
searx = SearxSearchWrapper(searx_host="http://my.searx.host")
searx.run("what is the weather in France ?", engine="qwant")
# the same result can be achieved using the `!` syntax of searx
# to select the engine using `query_suffix`
searx.run("what is the weather in France ?", query_suffix="!qwant")
"""
_params = {'q': query}
params = {**self.params, **_params, **kwargs}
if self.query_suffix and len(self.query_suffix) > 0:
params['q'] += ' ' + self.query_suffix
if isinstance(query_suffix, str) and len(query_suffix) > 0:
params['q'] += ' ' + query_suffix
if isinstance(engines, list) and len(engines) > 0:
params['engines'] = ','.join(engines)
if isinstance(categories, list) and len(categories) > 0:
params['categories'] = ','.join(categories)
res = self._searx_api_query(params)
if len(res.answers) > 0:
toret = res.answers[0]
elif len(res.results) > 0:
toret = '\n\n'.join([r.get('content', '') for r in res.results[:self.k]])
else:
toret = 'No good search result found'
return toret | def run(self, query: str, engines: Optional[List[str]]=None, categories:
Optional[List[str]]=None, query_suffix: Optional[str]='', **kwargs: Any
) ->str:
"""Run query through Searx API and parse results.
You can pass any other params to the searx query API.
Args:
query: The query to search for.
query_suffix: Extra suffix appended to the query.
engines: List of engines to use for the query.
categories: List of categories to use for the query.
**kwargs: extra parameters to pass to the searx API.
Returns:
str: The result of the query.
Raises:
ValueError: If an error occurred with the query.
Example:
This will make a query to the qwant engine:
.. code-block:: python
from langchain_community.utilities import SearxSearchWrapper
searx = SearxSearchWrapper(searx_host="http://my.searx.host")
searx.run("what is the weather in France ?", engine="qwant")
# the same result can be achieved using the `!` syntax of searx
# to select the engine using `query_suffix`
searx.run("what is the weather in France ?", query_suffix="!qwant")
"""
_params = {'q': query}
params = {**self.params, **_params, **kwargs}
if self.query_suffix and len(self.query_suffix) > 0:
params['q'] += ' ' + self.query_suffix
if isinstance(query_suffix, str) and len(query_suffix) > 0:
params['q'] += ' ' + query_suffix
if isinstance(engines, list) and len(engines) > 0:
params['engines'] = ','.join(engines)
if isinstance(categories, list) and len(categories) > 0:
params['categories'] = ','.join(categories)
res = self._searx_api_query(params)
if len(res.answers) > 0:
toret = res.answers[0]
elif len(res.results) > 0:
toret = '\n\n'.join([r.get('content', '') for r in res.results[:
self.k]])
else:
toret = 'No good search result found'
return toret | Run query through Searx API and parse results.
You can pass any other params to the searx query API.
Args:
query: The query to search for.
query_suffix: Extra suffix appended to the query.
engines: List of engines to use for the query.
categories: List of categories to use for the query.
**kwargs: extra parameters to pass to the searx API.
Returns:
str: The result of the query.
Raises:
ValueError: If an error occurred with the query.
Example:
This will make a query to the qwant engine:
.. code-block:: python
from langchain_community.utilities import SearxSearchWrapper
searx = SearxSearchWrapper(searx_host="http://my.searx.host")
searx.run("what is the weather in France ?", engine="qwant")
# the same result can be achieved using the `!` syntax of searx
# to select the engine using `query_suffix`
searx.run("what is the weather in France ?", query_suffix="!qwant") |
llm_dataset_name | import pandas as pd
client = Client()
df = pd.DataFrame({'input': ["What's the capital of California?",
"What's the capital of Nevada?", "What's the capital of Oregon?",
"What's the capital of Washington?"], 'output': ['Sacramento',
'Carson City', 'Salem', 'Olympia']})
uid = str(uuid4())[-8:]
_dataset_name = f'lcp llm dataset integration tests - {uid}'
client.upload_dataframe(df, name=_dataset_name, input_keys=['input'],
output_keys=['output'], description='Integration test dataset',
data_type=DataType.llm)
yield _dataset_name | @pytest.fixture(scope='module')
def llm_dataset_name() ->Iterator[str]:
import pandas as pd
client = Client()
df = pd.DataFrame({'input': ["What's the capital of California?",
"What's the capital of Nevada?", "What's the capital of Oregon?",
"What's the capital of Washington?"], 'output': ['Sacramento',
'Carson City', 'Salem', 'Olympia']})
uid = str(uuid4())[-8:]
_dataset_name = f'lcp llm dataset integration tests - {uid}'
client.upload_dataframe(df, name=_dataset_name, input_keys=['input'],
output_keys=['output'], description='Integration test dataset',
data_type=DataType.llm)
yield _dataset_name | null |
on_agent_finish | """Do nothing""" | def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) ->None:
"""Do nothing""" | Do nothing |
_call | jsonformer = import_jsonformer()
from transformers import Text2TextGenerationPipeline
pipeline = cast(Text2TextGenerationPipeline, self.pipeline)
model = jsonformer.Jsonformer(model=pipeline.model, tokenizer=pipeline.
tokenizer, json_schema=self.json_schema, prompt=prompt,
max_number_tokens=self.max_new_tokens, debug=self.debug)
text = model()
return json.dumps(text) | def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
jsonformer = import_jsonformer()
from transformers import Text2TextGenerationPipeline
pipeline = cast(Text2TextGenerationPipeline, self.pipeline)
model = jsonformer.Jsonformer(model=pipeline.model, tokenizer=pipeline.
tokenizer, json_schema=self.json_schema, prompt=prompt,
max_number_tokens=self.max_new_tokens, debug=self.debug)
text = model()
return json.dumps(text) | null |
_import_faiss | from langchain_community.vectorstores.faiss import FAISS
return FAISS | def _import_faiss() ->Any:
from langchain_community.vectorstores.faiss import FAISS
return FAISS | null |
search_api | """Search the API for the query."""
assert isinstance(query, str)
return 'API result' | @tool(return_direct=True)
def search_api(query: str) ->str:
"""Search the API for the query."""
assert isinstance(query, str)
return 'API result' | Search the API for the query. |
key | """Construct the record key to use"""
return self.key_prefix + self.session_id | @property
def key(self) ->str:
"""Construct the record key to use"""
return self.key_prefix + self.session_id | Construct the record key to use |
visit_operation | args = [arg.accept(self) for arg in operation.arguments]
return {self._format_func(operation.operator): args} | def visit_operation(self, operation: Operation) ->Dict:
args = [arg.accept(self) for arg in operation.arguments]
return {self._format_func(operation.operator): args} | null |
get_excerpt | if self.AdditionalAttributes and self.AdditionalAttributes[0
].Key == 'AnswerText':
excerpt = self.get_attribute_value()
elif self.DocumentExcerpt:
excerpt = self.DocumentExcerpt.Text
else:
excerpt = ''
return excerpt | def get_excerpt(self) ->str:
if self.AdditionalAttributes and self.AdditionalAttributes[0
].Key == 'AnswerText':
excerpt = self.get_attribute_value()
elif self.DocumentExcerpt:
excerpt = self.DocumentExcerpt.Text
else:
excerpt = ''
return excerpt | null |
_parseMeta | filepath = ''
if filecol == '':
nvec = list(nvmap.keys())
vvec = list(nvmap.values())
else:
nvec = []
vvec = []
if filecol in nvmap:
nvec.append(filecol)
vvec.append(nvmap[filecol])
filepath = nvmap[filecol]
for k, v in nvmap.items():
if k != filecol:
nvec.append(k)
vvec.append(v)
return nvec, vvec, filepath | def _parseMeta(self, nvmap: dict, filecol: str) ->Tuple[List[str], List[str
], str]:
filepath = ''
if filecol == '':
nvec = list(nvmap.keys())
vvec = list(nvmap.values())
else:
nvec = []
vvec = []
if filecol in nvmap:
nvec.append(filecol)
vvec.append(nvmap[filecol])
filepath = nvmap[filecol]
for k, v in nvmap.items():
if k != filecol:
nvec.append(k)
vvec.append(v)
return nvec, vvec, filepath | null |
get_input_schema | return create_model('AnalyzeDocumentChain', **{self.input_key: (str, None)}) | def get_input_schema(self, config: Optional[RunnableConfig]=None) ->Type[
BaseModel]:
return create_model('AnalyzeDocumentChain', **{self.input_key: (str, None)}
) | null |
_get_relevant_documents | """Get relevated documents given a user question.
Args:
query: user question
Returns:
Relevant documents for re-phrased question
"""
response = self.llm_chain(query, callbacks=run_manager.get_child())
re_phrased_question = response['text']
logger.info(f'Re-phrased question: {re_phrased_question}')
docs = self.retriever.get_relevant_documents(re_phrased_question, callbacks
=run_manager.get_child())
return docs | def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
"""Get relevated documents given a user question.
Args:
query: user question
Returns:
Relevant documents for re-phrased question
"""
response = self.llm_chain(query, callbacks=run_manager.get_child())
re_phrased_question = response['text']
logger.info(f'Re-phrased question: {re_phrased_question}')
docs = self.retriever.get_relevant_documents(re_phrased_question,
callbacks=run_manager.get_child())
return docs | Get relevated documents given a user question.
Args:
query: user question
Returns:
Relevant documents for re-phrased question |
ParseFromString | self.uuid = 'fake_uuid' | def ParseFromString(self: Any, data: str) ->None:
self.uuid = 'fake_uuid' | null |
_get_source_id_assigner | """Get the source id from the document."""
if source_id_key is None:
return lambda doc: None
elif isinstance(source_id_key, str):
return lambda doc: doc.metadata[source_id_key]
elif callable(source_id_key):
return source_id_key
else:
raise ValueError(
f'source_id_key should be either None, a string or a callable. Got {source_id_key} of type {type(source_id_key)}.'
) | def _get_source_id_assigner(source_id_key: Union[str, Callable[[Document],
str], None]) ->Callable[[Document], Union[str, None]]:
"""Get the source id from the document."""
if source_id_key is None:
return lambda doc: None
elif isinstance(source_id_key, str):
return lambda doc: doc.metadata[source_id_key]
elif callable(source_id_key):
return source_id_key
else:
raise ValueError(
f'source_id_key should be either None, a string or a callable. Got {source_id_key} of type {type(source_id_key)}.'
) | Get the source id from the document. |
test_functions_call_thoughts | chat = QianfanChatEndpoint(model='ERNIE-Bot')
prompt_tmpl = 'Use the given functions to answer following question: {input}'
prompt_msgs = [HumanMessagePromptTemplate.from_template(prompt_tmpl)]
prompt = ChatPromptTemplate(messages=prompt_msgs)
chain = prompt | chat.bind(functions=_FUNCTIONS)
message = HumanMessage(content="What's the temperature in Shanghai today?")
response = chain.batch([{'input': message}])
assert isinstance(response[0], AIMessage)
assert 'function_call' in response[0].additional_kwargs | def test_functions_call_thoughts() ->None:
chat = QianfanChatEndpoint(model='ERNIE-Bot')
prompt_tmpl = (
'Use the given functions to answer following question: {input}')
prompt_msgs = [HumanMessagePromptTemplate.from_template(prompt_tmpl)]
prompt = ChatPromptTemplate(messages=prompt_msgs)
chain = prompt | chat.bind(functions=_FUNCTIONS)
message = HumanMessage(content="What's the temperature in Shanghai today?")
response = chain.batch([{'input': message}])
assert isinstance(response[0], AIMessage)
assert 'function_call' in response[0].additional_kwargs | null |
print_task_list | print('\x1b[95m\x1b[1m' + """
*****TASK LIST*****
""" + '\x1b[0m\x1b[0m')
for t in self.task_list:
print(str(t['task_id']) + ': ' + t['task_name']) | def print_task_list(self) ->None:
print('\x1b[95m\x1b[1m' + '\n*****TASK LIST*****\n' + '\x1b[0m\x1b[0m')
for t in self.task_list:
print(str(t['task_id']) + ': ' + t['task_name']) | null |
__init__ | """Initialize the LangChain tracer."""
super().__init__(**kwargs)
self.example_id = UUID(example_id) if isinstance(example_id, str
) else example_id
self.project_name = project_name or ls_utils.get_tracer_project()
self.client = client or get_client()
self._futures: weakref.WeakSet[Future] = weakref.WeakSet()
self.tags = tags or []
self.executor = _get_executor() if use_threading else None
self.latest_run: Optional[Run] = None
global _TRACERS
_TRACERS.add(self) | def __init__(self, example_id: Optional[Union[UUID, str]]=None,
project_name: Optional[str]=None, client: Optional[Client]=None, tags:
Optional[List[str]]=None, use_threading: bool=True, **kwargs: Any) ->None:
"""Initialize the LangChain tracer."""
super().__init__(**kwargs)
self.example_id = UUID(example_id) if isinstance(example_id, str
) else example_id
self.project_name = project_name or ls_utils.get_tracer_project()
self.client = client or get_client()
self._futures: weakref.WeakSet[Future] = weakref.WeakSet()
self.tags = tags or []
self.executor = _get_executor() if use_threading else None
self.latest_run: Optional[Run] = None
global _TRACERS
_TRACERS.add(self) | Initialize the LangChain tracer. |
test_prompt_with_chat_model | prompt = SystemMessagePromptTemplate.from_template('You are a nice assistant.'
) + '{question}'
chat = FakeListChatModel(responses=['foo'])
chain: Runnable = prompt | chat
assert repr(chain) == snapshot
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == []
assert chain.last == chat
assert dumps(chain, pretty=True) == snapshot
prompt_spy = mocker.spy(prompt.__class__, 'invoke')
chat_spy = mocker.spy(chat.__class__, 'invoke')
tracer = FakeTracer()
assert chain.invoke({'question': 'What is your name?'}, dict(callbacks=[
tracer])) == AIMessage(content='foo')
assert prompt_spy.call_args.args[1] == {'question': 'What is your name?'}
assert chat_spy.call_args.args[1] == ChatPromptValue(messages=[
SystemMessage(content='You are a nice assistant.'), HumanMessage(
content='What is your name?')])
assert tracer.runs == snapshot
mocker.stop(prompt_spy)
mocker.stop(chat_spy)
prompt_spy = mocker.spy(prompt.__class__, 'batch')
chat_spy = mocker.spy(chat.__class__, 'batch')
tracer = FakeTracer()
assert chain.batch([{'question': 'What is your name?'}, {'question':
'What is your favorite color?'}], dict(callbacks=[tracer])) == [AIMessage
(content='foo'), AIMessage(content='foo')]
assert prompt_spy.call_args.args[1] == [{'question': 'What is your name?'},
{'question': 'What is your favorite color?'}]
assert chat_spy.call_args.args[1] == [ChatPromptValue(messages=[
SystemMessage(content='You are a nice assistant.'), HumanMessage(
content='What is your name?')]), ChatPromptValue(messages=[
SystemMessage(content='You are a nice assistant.'), HumanMessage(
content='What is your favorite color?')])]
assert len([r for r in tracer.runs if r.parent_run_id is None and len(r.
child_runs) == 2]
) == 2, 'Each of 2 outer runs contains exactly two inner runs (1 prompt, 1 chat)'
mocker.stop(prompt_spy)
mocker.stop(chat_spy)
prompt_spy = mocker.spy(prompt.__class__, 'invoke')
chat_spy = mocker.spy(chat.__class__, 'stream')
tracer = FakeTracer()
assert [*chain.stream({'question': 'What is your name?'}, dict(callbacks=[
tracer]))] == [AIMessageChunk(content='f'), AIMessageChunk(content='o'),
AIMessageChunk(content='o')]
assert prompt_spy.call_args.args[1] == {'question': 'What is your name?'}
assert chat_spy.call_args.args[1] == ChatPromptValue(messages=[
SystemMessage(content='You are a nice assistant.'), HumanMessage(
content='What is your name?')]) | @freeze_time('2023-01-01')
def test_prompt_with_chat_model(mocker: MockerFixture, snapshot:
SnapshotAssertion) ->None:
prompt = SystemMessagePromptTemplate.from_template(
'You are a nice assistant.') + '{question}'
chat = FakeListChatModel(responses=['foo'])
chain: Runnable = prompt | chat
assert repr(chain) == snapshot
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == []
assert chain.last == chat
assert dumps(chain, pretty=True) == snapshot
prompt_spy = mocker.spy(prompt.__class__, 'invoke')
chat_spy = mocker.spy(chat.__class__, 'invoke')
tracer = FakeTracer()
assert chain.invoke({'question': 'What is your name?'}, dict(callbacks=
[tracer])) == AIMessage(content='foo')
assert prompt_spy.call_args.args[1] == {'question': 'What is your name?'}
assert chat_spy.call_args.args[1] == ChatPromptValue(messages=[
SystemMessage(content='You are a nice assistant.'), HumanMessage(
content='What is your name?')])
assert tracer.runs == snapshot
mocker.stop(prompt_spy)
mocker.stop(chat_spy)
prompt_spy = mocker.spy(prompt.__class__, 'batch')
chat_spy = mocker.spy(chat.__class__, 'batch')
tracer = FakeTracer()
assert chain.batch([{'question': 'What is your name?'}, {'question':
'What is your favorite color?'}], dict(callbacks=[tracer])) == [
AIMessage(content='foo'), AIMessage(content='foo')]
assert prompt_spy.call_args.args[1] == [{'question':
'What is your name?'}, {'question': 'What is your favorite color?'}]
assert chat_spy.call_args.args[1] == [ChatPromptValue(messages=[
SystemMessage(content='You are a nice assistant.'), HumanMessage(
content='What is your name?')]), ChatPromptValue(messages=[
SystemMessage(content='You are a nice assistant.'), HumanMessage(
content='What is your favorite color?')])]
assert len([r for r in tracer.runs if r.parent_run_id is None and len(r
.child_runs) == 2]
) == 2, 'Each of 2 outer runs contains exactly two inner runs (1 prompt, 1 chat)'
mocker.stop(prompt_spy)
mocker.stop(chat_spy)
prompt_spy = mocker.spy(prompt.__class__, 'invoke')
chat_spy = mocker.spy(chat.__class__, 'stream')
tracer = FakeTracer()
assert [*chain.stream({'question': 'What is your name?'}, dict(
callbacks=[tracer]))] == [AIMessageChunk(content='f'),
AIMessageChunk(content='o'), AIMessageChunk(content='o')]
assert prompt_spy.call_args.args[1] == {'question': 'What is your name?'}
assert chat_spy.call_args.args[1] == ChatPromptValue(messages=[
SystemMessage(content='You are a nice assistant.'), HumanMessage(
content='What is your name?')]) | null |
get_summaries_as_docs | return self.client.get_summaries_as_docs(self.query) | def get_summaries_as_docs(self) ->List[Document]:
return self.client.get_summaries_as_docs(self.query) | null |
_import_stackexchange | from langchain_community.utilities.stackexchange import StackExchangeAPIWrapper
return StackExchangeAPIWrapper | def _import_stackexchange() ->Any:
from langchain_community.utilities.stackexchange import StackExchangeAPIWrapper
return StackExchangeAPIWrapper | null |
_convert_date | return datetime.fromtimestamp(date / 1000).strftime('%Y-%m-%d %H:%M:%S') | def _convert_date(self, date: int) ->str:
return datetime.fromtimestamp(date / 1000).strftime('%Y-%m-%d %H:%M:%S') | null |
load_deanonymizer_mapping | """Load the deanonymizer mapping from a JSON or YAML file.
Args:
file_path: Path to file to load the mapping from.
Example:
.. code-block:: python
anonymizer.load_deanonymizer_mapping(file_path="path/mapping.json")
"""
load_path = Path(file_path)
if load_path.suffix not in ['.json', '.yaml']:
raise ValueError(f'{load_path} must have an extension of .json or .yaml')
if load_path.suffix == '.json':
with open(load_path, 'r') as f:
loaded_mapping = json.load(f)
elif load_path.suffix == '.yaml':
with open(load_path, 'r') as f:
loaded_mapping = yaml.load(f, Loader=yaml.FullLoader)
self._deanonymizer_mapping.update(loaded_mapping) | def load_deanonymizer_mapping(self, file_path: Union[Path, str]) ->None:
"""Load the deanonymizer mapping from a JSON or YAML file.
Args:
file_path: Path to file to load the mapping from.
Example:
.. code-block:: python
anonymizer.load_deanonymizer_mapping(file_path="path/mapping.json")
"""
load_path = Path(file_path)
if load_path.suffix not in ['.json', '.yaml']:
raise ValueError(
f'{load_path} must have an extension of .json or .yaml')
if load_path.suffix == '.json':
with open(load_path, 'r') as f:
loaded_mapping = json.load(f)
elif load_path.suffix == '.yaml':
with open(load_path, 'r') as f:
loaded_mapping = yaml.load(f, Loader=yaml.FullLoader)
self._deanonymizer_mapping.update(loaded_mapping) | Load the deanonymizer mapping from a JSON or YAML file.
Args:
file_path: Path to file to load the mapping from.
Example:
.. code-block:: python
anonymizer.load_deanonymizer_mapping(file_path="path/mapping.json") |
get_type_to_cls_dict | return {'ai21': _import_ai21, 'aleph_alpha': _import_aleph_alpha,
'amazon_api_gateway': _import_amazon_api_gateway, 'amazon_bedrock':
_import_bedrock, 'anthropic': _import_anthropic, 'anyscale':
_import_anyscale, 'arcee': _import_arcee, 'aviary': _import_aviary,
'azure': _import_azure_openai, 'azureml_endpoint':
_import_azureml_endpoint, 'bananadev': _import_bananadev, 'baseten':
_import_baseten, 'beam': _import_beam, 'cerebriumai':
_import_cerebriumai, 'chat_glm': _import_chatglm, 'clarifai':
_import_clarifai, 'cohere': _import_cohere, 'ctransformers':
_import_ctransformers, 'ctranslate2': _import_ctranslate2, 'databricks':
_import_databricks, 'databricks-chat': _import_databricks_chat,
'deepinfra': _import_deepinfra, 'deepsparse': _import_deepsparse,
'edenai': _import_edenai, 'fake-list': _import_fake, 'forefrontai':
_import_forefrontai, 'giga-chat-model': _import_gigachat, 'google_palm':
_import_google_palm, 'gooseai': _import_gooseai, 'gradient':
_import_gradient_ai, 'gpt4all': _import_gpt4all, 'huggingface_endpoint':
_import_huggingface_endpoint, 'huggingface_hub':
_import_huggingface_hub, 'huggingface_pipeline':
_import_huggingface_pipeline, 'huggingface_textgen_inference':
_import_huggingface_text_gen_inference, 'human-input': _import_human,
'koboldai': _import_koboldai, 'llamacpp': _import_llamacpp, 'textgen':
_import_textgen, 'minimax': _import_minimax, 'mlflow': _import_mlflow,
'mlflow-chat': _import_mlflow_chat, 'mlflow-ai-gateway':
_import_mlflow_ai_gateway, 'modal': _import_modal, 'mosaic':
_import_mosaicml, 'nebula': _import_symblai_nebula, 'nibittensor':
_import_bittensor, 'nlpcloud': _import_nlpcloud,
'oci_model_deployment_tgi_endpoint': _import_oci_md_tgi,
'oci_model_deployment_vllm_endpoint': _import_oci_md_vllm, 'ollama':
_import_ollama, 'openai': _import_openai, 'openlm': _import_openlm,
'pai_eas_endpoint': _import_pai_eas_endpoint, 'petals': _import_petals,
'pipelineai': _import_pipelineai, 'predibase': _import_predibase,
'opaqueprompts': _import_opaqueprompts, 'replicate': _import_replicate,
'rwkv': _import_rwkv, 'sagemaker_endpoint': _import_sagemaker_endpoint,
'self_hosted': _import_self_hosted, 'self_hosted_hugging_face':
_import_self_hosted_hugging_face, 'stochasticai': _import_stochasticai,
'together': _import_together, 'tongyi': _import_tongyi, 'titan_takeoff':
_import_titan_takeoff, 'titan_takeoff_pro': _import_titan_takeoff_pro,
'vertexai': _import_vertex, 'vertexai_model_garden':
_import_vertex_model_garden, 'openllm': _import_openllm,
'openllm_client': _import_openllm, 'vllm': _import_vllm, 'vllm_openai':
_import_vllm_openai, 'watsonxllm': _import_watsonxllm, 'writer':
_import_writer, 'xinference': _import_xinference, 'javelin-ai-gateway':
_import_javelin_ai_gateway, 'qianfan_endpoint':
_import_baidu_qianfan_endpoint, 'yandex_gpt': _import_yandex_gpt,
'VolcEngineMaasLLM': _import_volcengine_maas} | def get_type_to_cls_dict() ->Dict[str, Callable[[], Type[BaseLLM]]]:
return {'ai21': _import_ai21, 'aleph_alpha': _import_aleph_alpha,
'amazon_api_gateway': _import_amazon_api_gateway, 'amazon_bedrock':
_import_bedrock, 'anthropic': _import_anthropic, 'anyscale':
_import_anyscale, 'arcee': _import_arcee, 'aviary': _import_aviary,
'azure': _import_azure_openai, 'azureml_endpoint':
_import_azureml_endpoint, 'bananadev': _import_bananadev, 'baseten':
_import_baseten, 'beam': _import_beam, 'cerebriumai':
_import_cerebriumai, 'chat_glm': _import_chatglm, 'clarifai':
_import_clarifai, 'cohere': _import_cohere, 'ctransformers':
_import_ctransformers, 'ctranslate2': _import_ctranslate2,
'databricks': _import_databricks, 'databricks-chat':
_import_databricks_chat, 'deepinfra': _import_deepinfra,
'deepsparse': _import_deepsparse, 'edenai': _import_edenai,
'fake-list': _import_fake, 'forefrontai': _import_forefrontai,
'giga-chat-model': _import_gigachat, 'google_palm':
_import_google_palm, 'gooseai': _import_gooseai, 'gradient':
_import_gradient_ai, 'gpt4all': _import_gpt4all,
'huggingface_endpoint': _import_huggingface_endpoint,
'huggingface_hub': _import_huggingface_hub, 'huggingface_pipeline':
_import_huggingface_pipeline, 'huggingface_textgen_inference':
_import_huggingface_text_gen_inference, 'human-input':
_import_human, 'koboldai': _import_koboldai, 'llamacpp':
_import_llamacpp, 'textgen': _import_textgen, 'minimax':
_import_minimax, 'mlflow': _import_mlflow, 'mlflow-chat':
_import_mlflow_chat, 'mlflow-ai-gateway': _import_mlflow_ai_gateway,
'modal': _import_modal, 'mosaic': _import_mosaicml, 'nebula':
_import_symblai_nebula, 'nibittensor': _import_bittensor,
'nlpcloud': _import_nlpcloud, 'oci_model_deployment_tgi_endpoint':
_import_oci_md_tgi, 'oci_model_deployment_vllm_endpoint':
_import_oci_md_vllm, 'ollama': _import_ollama, 'openai':
_import_openai, 'openlm': _import_openlm, 'pai_eas_endpoint':
_import_pai_eas_endpoint, 'petals': _import_petals, 'pipelineai':
_import_pipelineai, 'predibase': _import_predibase, 'opaqueprompts':
_import_opaqueprompts, 'replicate': _import_replicate, 'rwkv':
_import_rwkv, 'sagemaker_endpoint': _import_sagemaker_endpoint,
'self_hosted': _import_self_hosted, 'self_hosted_hugging_face':
_import_self_hosted_hugging_face, 'stochasticai':
_import_stochasticai, 'together': _import_together, 'tongyi':
_import_tongyi, 'titan_takeoff': _import_titan_takeoff,
'titan_takeoff_pro': _import_titan_takeoff_pro, 'vertexai':
_import_vertex, 'vertexai_model_garden':
_import_vertex_model_garden, 'openllm': _import_openllm,
'openllm_client': _import_openllm, 'vllm': _import_vllm,
'vllm_openai': _import_vllm_openai, 'watsonxllm':
_import_watsonxllm, 'writer': _import_writer, 'xinference':
_import_xinference, 'javelin-ai-gateway':
_import_javelin_ai_gateway, 'qianfan_endpoint':
_import_baidu_qianfan_endpoint, 'yandex_gpt': _import_yandex_gpt,
'VolcEngineMaasLLM': _import_volcengine_maas} | null |
__init__ | """Initializes the selector."""
self.examples = examples | def __init__(self, examples: Sequence[Dict[str, str]]) ->None:
"""Initializes the selector."""
self.examples = examples | Initializes the selector. |
load_chain_from_config | """Load chain from Config Dict."""
if '_type' not in config:
raise ValueError('Must specify a chain Type in config')
config_type = config.pop('_type')
if config_type not in type_to_loader_dict:
raise ValueError(f'Loading {config_type} chain not supported')
chain_loader = type_to_loader_dict[config_type]
return chain_loader(config, **kwargs) | def load_chain_from_config(config: dict, **kwargs: Any) ->Chain:
"""Load chain from Config Dict."""
if '_type' not in config:
raise ValueError('Must specify a chain Type in config')
config_type = config.pop('_type')
if config_type not in type_to_loader_dict:
raise ValueError(f'Loading {config_type} chain not supported')
chain_loader = type_to_loader_dict[config_type]
return chain_loader(config, **kwargs) | Load chain from Config Dict. |
test_index_specification_generation | index_schema = {'text': [{'name': 'job'}, {'name': 'title'}], 'numeric': [{
'name': 'salary'}]}
text = ['foo']
meta = {'job': 'engineer', 'title': 'principal engineer', 'salary': 100000}
docs = [Document(page_content=t, metadata=meta) for t in text]
r = Redis.from_documents(docs, FakeEmbeddings(), redis_url=TEST_REDIS_URL,
index_schema=index_schema)
output = r.similarity_search('foo', k=1, return_metadata=True)
assert output[0].metadata['job'] == 'engineer'
assert output[0].metadata['title'] == 'principal engineer'
assert int(output[0].metadata['salary']) == 100000
info = convert_bytes(r.client.ft(r.index_name).info())
attributes = info['attributes']
assert len(attributes) == 5
for attr in attributes:
d = make_dict(attr)
if d['identifier'] == 'job':
assert d['type'] == 'TEXT'
elif d['identifier'] == 'title':
assert d['type'] == 'TEXT'
elif d['identifier'] == 'salary':
assert d['type'] == 'NUMERIC'
elif d['identifier'] == 'content':
assert d['type'] == 'TEXT'
elif d['identifier'] == 'content_vector':
assert d['type'] == 'VECTOR'
else:
raise ValueError('Unexpected attribute in index schema')
assert drop(r.index_name) | def test_index_specification_generation() ->None:
index_schema = {'text': [{'name': 'job'}, {'name': 'title'}], 'numeric':
[{'name': 'salary'}]}
text = ['foo']
meta = {'job': 'engineer', 'title': 'principal engineer', 'salary': 100000}
docs = [Document(page_content=t, metadata=meta) for t in text]
r = Redis.from_documents(docs, FakeEmbeddings(), redis_url=
TEST_REDIS_URL, index_schema=index_schema)
output = r.similarity_search('foo', k=1, return_metadata=True)
assert output[0].metadata['job'] == 'engineer'
assert output[0].metadata['title'] == 'principal engineer'
assert int(output[0].metadata['salary']) == 100000
info = convert_bytes(r.client.ft(r.index_name).info())
attributes = info['attributes']
assert len(attributes) == 5
for attr in attributes:
d = make_dict(attr)
if d['identifier'] == 'job':
assert d['type'] == 'TEXT'
elif d['identifier'] == 'title':
assert d['type'] == 'TEXT'
elif d['identifier'] == 'salary':
assert d['type'] == 'NUMERIC'
elif d['identifier'] == 'content':
assert d['type'] == 'TEXT'
elif d['identifier'] == 'content_vector':
assert d['type'] == 'VECTOR'
else:
raise ValueError('Unexpected attribute in index schema')
assert drop(r.index_name) | null |
test_facebook_chat_loader | """Test FacebookChatLoader."""
file_path = Path(__file__).parent.parent / 'examples/facebook_chat.json'
loader = FacebookChatLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
assert docs[0].metadata['source'] == str(file_path)
assert docs[0].page_content == """User 2 on 2023-02-05 13:46:11: Bye!
User 1 on 2023-02-05 13:43:55: Oh no worries! Bye
User 2 on 2023-02-05 13:24:37: No Im sorry it was my mistake, the blue one is not for sale
User 1 on 2023-02-05 13:05:40: I thought you were selling the blue one!
User 1 on 2023-02-05 13:05:09: Im not interested in this bag. Im interested in the blue one!
User 2 on 2023-02-05 13:04:28: Here is $129
User 2 on 2023-02-05 13:04:05: Online is at least $100
User 1 on 2023-02-05 12:59:59: How much do you want?
User 2 on 2023-02-05 08:17:56: Goodmorning! $50 is too low.
User 1 on 2023-02-05 00:17:02: Hi! Im interested in your bag. Im offering $50. Let me know if you are interested. Thanks!
""" | def test_facebook_chat_loader() ->None:
"""Test FacebookChatLoader."""
file_path = Path(__file__).parent.parent / 'examples/facebook_chat.json'
loader = FacebookChatLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
assert docs[0].metadata['source'] == str(file_path)
assert docs[0].page_content == """User 2 on 2023-02-05 13:46:11: Bye!
User 1 on 2023-02-05 13:43:55: Oh no worries! Bye
User 2 on 2023-02-05 13:24:37: No Im sorry it was my mistake, the blue one is not for sale
User 1 on 2023-02-05 13:05:40: I thought you were selling the blue one!
User 1 on 2023-02-05 13:05:09: Im not interested in this bag. Im interested in the blue one!
User 2 on 2023-02-05 13:04:28: Here is $129
User 2 on 2023-02-05 13:04:05: Online is at least $100
User 1 on 2023-02-05 12:59:59: How much do you want?
User 2 on 2023-02-05 08:17:56: Goodmorning! $50 is too low.
User 1 on 2023-02-05 00:17:02: Hi! Im interested in your bag. Im offering $50. Let me know if you are interested. Thanks!
""" | Test FacebookChatLoader. |
test_google_generativeai_generate | n = 1 if model_name == 'gemini-pro' else 2
llm = GoogleGenerativeAI(temperature=0.3, n=n, model=model_name)
output = llm.generate(['Say foo:'])
assert isinstance(output, LLMResult)
assert len(output.generations) == 1
assert len(output.generations[0]) == n | @pytest.mark.parametrize('model_name', model_names)
def test_google_generativeai_generate(model_name: str) ->None:
n = 1 if model_name == 'gemini-pro' else 2
llm = GoogleGenerativeAI(temperature=0.3, n=n, model=model_name)
output = llm.generate(['Say foo:'])
assert isinstance(output, LLMResult)
assert len(output.generations) == 1
assert len(output.generations[0]) == n | null |
format | """Format the chat template into a string.
Args:
**kwargs: keyword arguments to use for filling in template variables
in all the template messages in this chat template.
Returns:
formatted string
"""
return self.format_prompt(**kwargs).to_string() | def format(self, **kwargs: Any) ->str:
"""Format the chat template into a string.
Args:
**kwargs: keyword arguments to use for filling in template variables
in all the template messages in this chat template.
Returns:
formatted string
"""
return self.format_prompt(**kwargs).to_string() | Format the chat template into a string.
Args:
**kwargs: keyword arguments to use for filling in template variables
in all the template messages in this chat template.
Returns:
formatted string |
_create_chat_result | generations = []
if not isinstance(response, dict):
response = response.dict()
for res in response['choices']:
message = convert_dict_to_message(res['message'])
generation_info = dict(finish_reason=res.get('finish_reason'))
if 'logprobs' in res:
generation_info['logprobs'] = res['logprobs']
gen = ChatGeneration(message=message, generation_info=generation_info)
generations.append(gen)
token_usage = response.get('usage', {})
llm_output = {'token_usage': token_usage, 'model_name': self.model_name,
'system_fingerprint': response.get('system_fingerprint', '')}
return ChatResult(generations=generations, llm_output=llm_output) | def _create_chat_result(self, response: Union[dict, BaseModel]) ->ChatResult:
generations = []
if not isinstance(response, dict):
response = response.dict()
for res in response['choices']:
message = convert_dict_to_message(res['message'])
generation_info = dict(finish_reason=res.get('finish_reason'))
if 'logprobs' in res:
generation_info['logprobs'] = res['logprobs']
gen = ChatGeneration(message=message, generation_info=generation_info)
generations.append(gen)
token_usage = response.get('usage', {})
llm_output = {'token_usage': token_usage, 'model_name': self.model_name,
'system_fingerprint': response.get('system_fingerprint', '')}
return ChatResult(generations=generations, llm_output=llm_output) | null |