method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
generate_prompt
prompt_messages = [p.to_messages() for p in prompts] return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)
def generate_prompt(self, prompts: List[PromptValue], stop: Optional[List[ str]]=None, callbacks: Callbacks=None, **kwargs: Any) ->LLMResult: prompt_messages = [p.to_messages() for p in prompts] return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)
null
__from
if ids is None: ids = [str(uuid.uuid1()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] store = cls(embedding=embedding, search_type=search_type, **kwargs) embedding_dimension = store.retrieve_existing_index() if not embedding_dimension: store.create_new_index() elif not store.embedding_dimension == embedding_dimension: raise ValueError( f"""Index with name {store.index_name} already exists.The provided embedding function and vector index dimensions do not match. Embedding function dimension: {store.embedding_dimension} Vector index dimension: {embedding_dimension}""" ) if search_type == SearchType.HYBRID: fts_node_label = store.retrieve_existing_fts_index() if not fts_node_label: store.create_new_keyword_index() elif not fts_node_label == store.node_label: raise ValueError( "Vector and keyword index don't index the same node label") if create_id_index: store.query( f'CREATE CONSTRAINT IF NOT EXISTS FOR (n:`{store.node_label}`) REQUIRE n.id IS UNIQUE;' ) store.add_embeddings(texts=texts, embeddings=embeddings, metadatas= metadatas, ids=ids, **kwargs) return store
@classmethod def __from(cls, texts: List[str], embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]]=None, ids: Optional[List[ str]]=None, create_id_index: bool=True, search_type: SearchType= SearchType.VECTOR, **kwargs: Any) ->Neo4jVector: if ids is None: ids = [str(uuid.uuid1()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] store = cls(embedding=embedding, search_type=search_type, **kwargs) embedding_dimension = store.retrieve_existing_index() if not embedding_dimension: store.create_new_index() elif not store.embedding_dimension == embedding_dimension: raise ValueError( f"""Index with name {store.index_name} already exists.The provided embedding function and vector index dimensions do not match. Embedding function dimension: {store.embedding_dimension} Vector index dimension: {embedding_dimension}""" ) if search_type == SearchType.HYBRID: fts_node_label = store.retrieve_existing_fts_index() if not fts_node_label: store.create_new_keyword_index() elif not fts_node_label == store.node_label: raise ValueError( "Vector and keyword index don't index the same node label") if create_id_index: store.query( f'CREATE CONSTRAINT IF NOT EXISTS FOR (n:`{store.node_label}`) REQUIRE n.id IS UNIQUE;' ) store.add_embeddings(texts=texts, embeddings=embeddings, metadatas= metadatas, ids=ids, **kwargs) return store
null
validate_environment
"""Validate that FastEmbed has been installed.""" try: from fastembed.embedding import FlagEmbedding model_name = values.get('model_name') max_length = values.get('max_length') cache_dir = values.get('cache_dir') threads = values.get('threads') values['_model'] = FlagEmbedding(model_name=model_name, max_length= max_length, cache_dir=cache_dir, threads=threads) except ImportError as ie: raise ImportError( "Could not import 'fastembed' Python package. Please install it with `pip install fastembed`." ) from ie return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that FastEmbed has been installed.""" try: from fastembed.embedding import FlagEmbedding model_name = values.get('model_name') max_length = values.get('max_length') cache_dir = values.get('cache_dir') threads = values.get('threads') values['_model'] = FlagEmbedding(model_name=model_name, max_length= max_length, cache_dir=cache_dir, threads=threads) except ImportError as ie: raise ImportError( "Could not import 'fastembed' Python package. Please install it with `pip install fastembed`." ) from ie return values
Validate that FastEmbed has been installed.
test_invalid_request_format
"""Test invalid request format.""" class CustomContentFormatter(ContentFormatterBase): content_type = 'application/json' accepts = 'application/json' def format_request_payload(self, prompt: str, model_kwargs: Dict) ->bytes: input_str = json.dumps({'incorrect_input': {'input_string': [prompt ]}, 'parameters': model_kwargs}) return str.encode(input_str) def format_response_payload(self, output: bytes) ->str: response_json = json.loads(output) return response_json[0]['0'] with pytest.raises(HTTPError): llm = AzureMLOnlineEndpoint(endpoint_api_key=os.getenv( 'OSS_ENDPOINT_API_KEY'), endpoint_url=os.getenv('OSS_ENDPOINT_URL'), deployment_name=os.getenv('OSS_DEPLOYMENT_NAME'), content_formatter =CustomContentFormatter()) llm('Foo')
def test_invalid_request_format() ->None: """Test invalid request format.""" class CustomContentFormatter(ContentFormatterBase): content_type = 'application/json' accepts = 'application/json' def format_request_payload(self, prompt: str, model_kwargs: Dict ) ->bytes: input_str = json.dumps({'incorrect_input': {'input_string': [ prompt]}, 'parameters': model_kwargs}) return str.encode(input_str) def format_response_payload(self, output: bytes) ->str: response_json = json.loads(output) return response_json[0]['0'] with pytest.raises(HTTPError): llm = AzureMLOnlineEndpoint(endpoint_api_key=os.getenv( 'OSS_ENDPOINT_API_KEY'), endpoint_url=os.getenv( 'OSS_ENDPOINT_URL'), deployment_name=os.getenv( 'OSS_DEPLOYMENT_NAME'), content_formatter=CustomContentFormatter()) llm('Foo')
Test invalid request format.
_create_weaviate_client
try: import weaviate except ImportError: raise ImportError( 'Could not import weaviate python package. Please install it with `pip install weaviate-client`' ) url = url or os.environ.get('WEAVIATE_URL') api_key = api_key or os.environ.get('WEAVIATE_API_KEY') auth = weaviate.auth.AuthApiKey(api_key=api_key) if api_key else None return weaviate.Client(url=url, auth_client_secret=auth, **kwargs)
def _create_weaviate_client(url: Optional[str]=None, api_key: Optional[str] =None, **kwargs: Any) ->weaviate.Client: try: import weaviate except ImportError: raise ImportError( 'Could not import weaviate python package. Please install it with `pip install weaviate-client`' ) url = url or os.environ.get('WEAVIATE_URL') api_key = api_key or os.environ.get('WEAVIATE_API_KEY') auth = weaviate.auth.AuthApiKey(api_key=api_key) if api_key else None return weaviate.Client(url=url, auth_client_secret=auth, **kwargs)
null
convert_pydantic_to_ernie_tool
"""Converts a Pydantic model to a function description for the Ernie API.""" function = convert_pydantic_to_ernie_function(model, name=name, description =description) return {'type': 'function', 'function': function}
def convert_pydantic_to_ernie_tool(model: Type[BaseModel], *, name: Optional[str]=None, description: Optional[str]=None) ->ToolDescription: """Converts a Pydantic model to a function description for the Ernie API.""" function = convert_pydantic_to_ernie_function(model, name=name, description=description) return {'type': 'function', 'function': function}
Converts a Pydantic model to a function description for the Ernie API.
test_openai_streaming_multiple_prompts_error
"""Test validation for streaming fails if multiple prompts are given.""" with pytest.raises(ValueError): _get_llm(streaming=True).generate(["I'm Pickle Rick", "I'm Pickle Rick"])
def test_openai_streaming_multiple_prompts_error() ->None: """Test validation for streaming fails if multiple prompts are given.""" with pytest.raises(ValueError): _get_llm(streaming=True).generate(["I'm Pickle Rick", "I'm Pickle Rick"])
Test validation for streaming fails if multiple prompts are given.
callback
log_method(text, extra=extra)
def callback(text: str) ->None: log_method(text, extra=extra)
null
__next__
while True: self.buffer.seek(self.read_pos) line = self.buffer.readline() if line and line[-1] == ord('\n'): self.read_pos += len(line) return line[:-1] try: chunk = next(self.byte_iterator) except StopIteration: if self.read_pos < self.buffer.getbuffer().nbytes: continue raise if 'PayloadPart' not in chunk: continue self.buffer.seek(0, io.SEEK_END) self.buffer.write(chunk['PayloadPart']['Bytes'])
def __next__(self) ->Any: while True: self.buffer.seek(self.read_pos) line = self.buffer.readline() if line and line[-1] == ord('\n'): self.read_pos += len(line) return line[:-1] try: chunk = next(self.byte_iterator) except StopIteration: if self.read_pos < self.buffer.getbuffer().nbytes: continue raise if 'PayloadPart' not in chunk: continue self.buffer.seek(0, io.SEEK_END) self.buffer.write(chunk['PayloadPart']['Bytes'])
null
mocked_responses
"""Fixture mocking requests.get.""" with responses.RequestsMock() as rsps: yield rsps
@pytest.fixture(autouse=True) def mocked_responses() ->Iterable[responses.RequestsMock]: """Fixture mocking requests.get.""" with responses.RequestsMock() as rsps: yield rsps
Fixture mocking requests.get.
test_run_multiple_args_error
"""Test run method with multiple args errors as expected.""" chain = FakeChain() with pytest.raises(ValueError): chain.run('bar', 'foo')
def test_run_multiple_args_error() ->None: """Test run method with multiple args errors as expected.""" chain = FakeChain() with pytest.raises(ValueError): chain.run('bar', 'foo')
Test run method with multiple args errors as expected.
test_parse_with_language
llm_output = """I can use the `foo` tool to achieve the goal. Action: ```json { "action": "foo", "action_input": "bar" } ``` """ action, action_input = get_action_and_input(llm_output) assert action == 'foo' assert action_input == 'bar'
def test_parse_with_language() ->None: llm_output = """I can use the `foo` tool to achieve the goal. Action: ```json { "action": "foo", "action_input": "bar" } ``` """ action, action_input = get_action_and_input(llm_output) assert action == 'foo' assert action_input == 'bar'
null
create_table_if_not_exists
Table(self.collection_name, Base.metadata, Column('id', TEXT, primary_key= True, default=uuid.uuid4), Column('embedding', ARRAY(REAL)), Column( 'document', String, nullable=True), Column('metadata', JSON, nullable= True), extend_existing=True) with self.engine.connect() as conn: with conn.begin(): Base.metadata.create_all(conn) index_name = f'{self.collection_name}_embedding_idx' index_query = text( f""" SELECT 1 FROM pg_indexes WHERE indexname = '{index_name}'; """ ) result = conn.execute(index_query).scalar() if not result: index_statement = text( f""" CREATE INDEX {index_name} ON {self.collection_name} USING ann(embedding) WITH ( "dim" = {self.embedding_dimension}, "hnsw_m" = 100 ); """ ) conn.execute(index_statement)
def create_table_if_not_exists(self) ->None: Table(self.collection_name, Base.metadata, Column('id', TEXT, primary_key=True, default=uuid.uuid4), Column('embedding', ARRAY( REAL)), Column('document', String, nullable=True), Column( 'metadata', JSON, nullable=True), extend_existing=True) with self.engine.connect() as conn: with conn.begin(): Base.metadata.create_all(conn) index_name = f'{self.collection_name}_embedding_idx' index_query = text( f""" SELECT 1 FROM pg_indexes WHERE indexname = '{index_name}'; """ ) result = conn.execute(index_query).scalar() if not result: index_statement = text( f""" CREATE INDEX {index_name} ON {self.collection_name} USING ann(embedding) WITH ( "dim" = {self.embedding_dimension}, "hnsw_m" = 100 ); """ ) conn.execute(index_statement)
null
__init__
"""Initialize with necessary components. Args: table_name (str, optional): Specifies the name of the table in use. Defaults to "message_store". id_field (str, optional): Specifies the name of the id field in the table. Defaults to "id". session_id_field (str, optional): Specifies the name of the session_id field in the table. Defaults to "session_id". message_field (str, optional): Specifies the name of the message field in the table. Defaults to "message". Following arguments pertain to the connection pool: pool_size (int, optional): Determines the number of active connections in the pool. Defaults to 5. max_overflow (int, optional): Determines the maximum number of connections allowed beyond the pool_size. Defaults to 10. timeout (float, optional): Specifies the maximum wait time in seconds for establishing a connection. Defaults to 30. Following arguments pertain to the database connection: host (str, optional): Specifies the hostname, IP address, or URL for the database connection. The default scheme is "mysql". user (str, optional): Database username. password (str, optional): Database password. port (int, optional): Database port. Defaults to 3306 for non-HTTP connections, 80 for HTTP connections, and 443 for HTTPS connections. database (str, optional): Database name. Additional optional arguments provide further customization over the database connection: pure_python (bool, optional): Toggles the connector mode. If True, operates in pure Python mode. local_infile (bool, optional): Allows local file uploads. charset (str, optional): Specifies the character set for string values. ssl_key (str, optional): Specifies the path of the file containing the SSL key. ssl_cert (str, optional): Specifies the path of the file containing the SSL certificate. ssl_ca (str, optional): Specifies the path of the file containing the SSL certificate authority. ssl_cipher (str, optional): Sets the SSL cipher list. ssl_disabled (bool, optional): Disables SSL usage. ssl_verify_cert (bool, optional): Verifies the server's certificate. Automatically enabled if ``ssl_ca`` is specified. ssl_verify_identity (bool, optional): Verifies the server's identity. conv (dict[int, Callable], optional): A dictionary of data conversion functions. credential_type (str, optional): Specifies the type of authentication to use: auth.PASSWORD, auth.JWT, or auth.BROWSER_SSO. autocommit (bool, optional): Enables autocommits. results_type (str, optional): Determines the structure of the query results: tuples, namedtuples, dicts. results_format (str, optional): Deprecated. This option has been renamed to results_type. Examples: Basic Usage: .. code-block:: python from langchain_community.chat_message_histories import ( SingleStoreDBChatMessageHistory ) message_history = SingleStoreDBChatMessageHistory( session_id="my-session", host="https://user:password@127.0.0.1:3306/database" ) Advanced Usage: .. code-block:: python from langchain_community.chat_message_histories import ( SingleStoreDBChatMessageHistory ) message_history = SingleStoreDBChatMessageHistory( session_id="my-session", host="127.0.0.1", port=3306, user="user", password="password", database="db", table_name="my_custom_table", pool_size=10, timeout=60, ) Using environment variables: .. code-block:: python from langchain_community.chat_message_histories import ( SingleStoreDBChatMessageHistory ) os.environ['SINGLESTOREDB_URL'] = 'me:p455w0rd@s2-host.com/my_db' message_history = SingleStoreDBChatMessageHistory("my-session") """ self.table_name = self._sanitize_input(table_name) self.session_id = self._sanitize_input(session_id) self.id_field = self._sanitize_input(id_field) self.session_id_field = self._sanitize_input(session_id_field) self.message_field = self._sanitize_input(message_field) self.connection_kwargs = kwargs if 'conn_attrs' not in self.connection_kwargs: self.connection_kwargs['conn_attrs'] = dict() self.connection_kwargs['conn_attrs']['_connector_name' ] = 'langchain python sdk' self.connection_kwargs['conn_attrs']['_connector_version'] = '1.0.1' try: from sqlalchemy.pool import QueuePool except ImportError: raise ImportError( 'Could not import sqlalchemy.pool python package. Please install it with `pip install singlestoredb`.' ) self.connection_pool = QueuePool(self._get_connection, max_overflow= max_overflow, pool_size=pool_size, timeout=timeout) self.table_created = False
def __init__(self, session_id: str, *, table_name: str='message_store', id_field: str='id', session_id_field: str='session_id', message_field: str='message', pool_size: int=5, max_overflow: int=10, timeout: float= 30, **kwargs: Any): """Initialize with necessary components. Args: table_name (str, optional): Specifies the name of the table in use. Defaults to "message_store". id_field (str, optional): Specifies the name of the id field in the table. Defaults to "id". session_id_field (str, optional): Specifies the name of the session_id field in the table. Defaults to "session_id". message_field (str, optional): Specifies the name of the message field in the table. Defaults to "message". Following arguments pertain to the connection pool: pool_size (int, optional): Determines the number of active connections in the pool. Defaults to 5. max_overflow (int, optional): Determines the maximum number of connections allowed beyond the pool_size. Defaults to 10. timeout (float, optional): Specifies the maximum wait time in seconds for establishing a connection. Defaults to 30. Following arguments pertain to the database connection: host (str, optional): Specifies the hostname, IP address, or URL for the database connection. The default scheme is "mysql". user (str, optional): Database username. password (str, optional): Database password. port (int, optional): Database port. Defaults to 3306 for non-HTTP connections, 80 for HTTP connections, and 443 for HTTPS connections. database (str, optional): Database name. Additional optional arguments provide further customization over the database connection: pure_python (bool, optional): Toggles the connector mode. If True, operates in pure Python mode. local_infile (bool, optional): Allows local file uploads. charset (str, optional): Specifies the character set for string values. ssl_key (str, optional): Specifies the path of the file containing the SSL key. ssl_cert (str, optional): Specifies the path of the file containing the SSL certificate. ssl_ca (str, optional): Specifies the path of the file containing the SSL certificate authority. ssl_cipher (str, optional): Sets the SSL cipher list. ssl_disabled (bool, optional): Disables SSL usage. ssl_verify_cert (bool, optional): Verifies the server's certificate. Automatically enabled if ``ssl_ca`` is specified. ssl_verify_identity (bool, optional): Verifies the server's identity. conv (dict[int, Callable], optional): A dictionary of data conversion functions. credential_type (str, optional): Specifies the type of authentication to use: auth.PASSWORD, auth.JWT, or auth.BROWSER_SSO. autocommit (bool, optional): Enables autocommits. results_type (str, optional): Determines the structure of the query results: tuples, namedtuples, dicts. results_format (str, optional): Deprecated. This option has been renamed to results_type. Examples: Basic Usage: .. code-block:: python from langchain_community.chat_message_histories import ( SingleStoreDBChatMessageHistory ) message_history = SingleStoreDBChatMessageHistory( session_id="my-session", host="https://user:password@127.0.0.1:3306/database" ) Advanced Usage: .. code-block:: python from langchain_community.chat_message_histories import ( SingleStoreDBChatMessageHistory ) message_history = SingleStoreDBChatMessageHistory( session_id="my-session", host="127.0.0.1", port=3306, user="user", password="password", database="db", table_name="my_custom_table", pool_size=10, timeout=60, ) Using environment variables: .. code-block:: python from langchain_community.chat_message_histories import ( SingleStoreDBChatMessageHistory ) os.environ['SINGLESTOREDB_URL'] = 'me:p455w0rd@s2-host.com/my_db' message_history = SingleStoreDBChatMessageHistory("my-session") """ self.table_name = self._sanitize_input(table_name) self.session_id = self._sanitize_input(session_id) self.id_field = self._sanitize_input(id_field) self.session_id_field = self._sanitize_input(session_id_field) self.message_field = self._sanitize_input(message_field) self.connection_kwargs = kwargs if 'conn_attrs' not in self.connection_kwargs: self.connection_kwargs['conn_attrs'] = dict() self.connection_kwargs['conn_attrs']['_connector_name' ] = 'langchain python sdk' self.connection_kwargs['conn_attrs']['_connector_version'] = '1.0.1' try: from sqlalchemy.pool import QueuePool except ImportError: raise ImportError( 'Could not import sqlalchemy.pool python package. Please install it with `pip install singlestoredb`.' ) self.connection_pool = QueuePool(self._get_connection, max_overflow= max_overflow, pool_size=pool_size, timeout=timeout) self.table_created = False
Initialize with necessary components. Args: table_name (str, optional): Specifies the name of the table in use. Defaults to "message_store". id_field (str, optional): Specifies the name of the id field in the table. Defaults to "id". session_id_field (str, optional): Specifies the name of the session_id field in the table. Defaults to "session_id". message_field (str, optional): Specifies the name of the message field in the table. Defaults to "message". Following arguments pertain to the connection pool: pool_size (int, optional): Determines the number of active connections in the pool. Defaults to 5. max_overflow (int, optional): Determines the maximum number of connections allowed beyond the pool_size. Defaults to 10. timeout (float, optional): Specifies the maximum wait time in seconds for establishing a connection. Defaults to 30. Following arguments pertain to the database connection: host (str, optional): Specifies the hostname, IP address, or URL for the database connection. The default scheme is "mysql". user (str, optional): Database username. password (str, optional): Database password. port (int, optional): Database port. Defaults to 3306 for non-HTTP connections, 80 for HTTP connections, and 443 for HTTPS connections. database (str, optional): Database name. Additional optional arguments provide further customization over the database connection: pure_python (bool, optional): Toggles the connector mode. If True, operates in pure Python mode. local_infile (bool, optional): Allows local file uploads. charset (str, optional): Specifies the character set for string values. ssl_key (str, optional): Specifies the path of the file containing the SSL key. ssl_cert (str, optional): Specifies the path of the file containing the SSL certificate. ssl_ca (str, optional): Specifies the path of the file containing the SSL certificate authority. ssl_cipher (str, optional): Sets the SSL cipher list. ssl_disabled (bool, optional): Disables SSL usage. ssl_verify_cert (bool, optional): Verifies the server's certificate. Automatically enabled if ``ssl_ca`` is specified. ssl_verify_identity (bool, optional): Verifies the server's identity. conv (dict[int, Callable], optional): A dictionary of data conversion functions. credential_type (str, optional): Specifies the type of authentication to use: auth.PASSWORD, auth.JWT, or auth.BROWSER_SSO. autocommit (bool, optional): Enables autocommits. results_type (str, optional): Determines the structure of the query results: tuples, namedtuples, dicts. results_format (str, optional): Deprecated. This option has been renamed to results_type. Examples: Basic Usage: .. code-block:: python from langchain_community.chat_message_histories import ( SingleStoreDBChatMessageHistory ) message_history = SingleStoreDBChatMessageHistory( session_id="my-session", host="https://user:password@127.0.0.1:3306/database" ) Advanced Usage: .. code-block:: python from langchain_community.chat_message_histories import ( SingleStoreDBChatMessageHistory ) message_history = SingleStoreDBChatMessageHistory( session_id="my-session", host="127.0.0.1", port=3306, user="user", password="password", database="db", table_name="my_custom_table", pool_size=10, timeout=60, ) Using environment variables: .. code-block:: python from langchain_community.chat_message_histories import ( SingleStoreDBChatMessageHistory ) os.environ['SINGLESTOREDB_URL'] = 'me:p455w0rd@s2-host.com/my_db' message_history = SingleStoreDBChatMessageHistory("my-session")
try_load_from_hub
"""Load configuration from hub. Returns None if path is not a hub path.""" if not isinstance(path, str) or not (match := HUB_PATH_RE.match(path)): return None ref, remote_path_str = match.groups() ref = ref[1:] if ref else DEFAULT_REF remote_path = Path(remote_path_str) if remote_path.parts[0] != valid_prefix: return None if remote_path.suffix[1:] not in valid_suffixes: raise ValueError(f'Unsupported file type, must be one of {valid_suffixes}.' ) full_url = urljoin(URL_BASE.format(ref=ref), PurePosixPath(remote_path). __str__()) r = requests.get(full_url, timeout=5) if r.status_code != 200: raise ValueError(f'Could not find file at {full_url}') with tempfile.TemporaryDirectory() as tmpdirname: file = Path(tmpdirname) / remote_path.name with open(file, 'wb') as f: f.write(r.content) return loader(str(file), **kwargs)
def try_load_from_hub(path: Union[str, Path], loader: Callable[[str], T], valid_prefix: str, valid_suffixes: Set[str], **kwargs: Any) ->Optional[T]: """Load configuration from hub. Returns None if path is not a hub path.""" if not isinstance(path, str) or not (match := HUB_PATH_RE.match(path)): return None ref, remote_path_str = match.groups() ref = ref[1:] if ref else DEFAULT_REF remote_path = Path(remote_path_str) if remote_path.parts[0] != valid_prefix: return None if remote_path.suffix[1:] not in valid_suffixes: raise ValueError( f'Unsupported file type, must be one of {valid_suffixes}.') full_url = urljoin(URL_BASE.format(ref=ref), PurePosixPath(remote_path) .__str__()) r = requests.get(full_url, timeout=5) if r.status_code != 200: raise ValueError(f'Could not find file at {full_url}') with tempfile.TemporaryDirectory() as tmpdirname: file = Path(tmpdirname) / remote_path.name with open(file, 'wb') as f: f.write(r.content) return loader(str(file), **kwargs)
Load configuration from hub. Returns None if path is not a hub path.
test_visit_structured_query_complex
query = 'What is the capital of France?' op = Operation(operator=Operator.AND, arguments=[Comparison(comparator= Comparator.EQ, attribute='foo', value=2), Operation(operator=Operator. OR, arguments=[Comparison(comparator=Comparator.LT, attribute='bar', value=1), Comparison(comparator=Comparator.LIKE, attribute='bar', value ='10')])]) structured_query = StructuredQuery(query=query, filter=op, limit=None) expected = query, {'filter': [{'bool': {'must': [{'term': {'metadata.foo': 2}}, {'bool': {'should': [{'range': {'metadata.bar': {'lt': 1}}}, { 'match': {'metadata.bar': {'query': '10', 'fuzziness': 'AUTO'}}}]}}]}}]} actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual
def test_visit_structured_query_complex() ->None: query = 'What is the capital of France?' op = Operation(operator=Operator.AND, arguments=[Comparison(comparator= Comparator.EQ, attribute='foo', value=2), Operation(operator= Operator.OR, arguments=[Comparison(comparator=Comparator.LT, attribute='bar', value=1), Comparison(comparator=Comparator.LIKE, attribute='bar', value='10')])]) structured_query = StructuredQuery(query=query, filter=op, limit=None) expected = query, {'filter': [{'bool': {'must': [{'term': { 'metadata.foo': 2}}, {'bool': {'should': [{'range': {'metadata.bar': {'lt': 1}}}, {'match': {'metadata.bar': {'query': '10', 'fuzziness': 'AUTO'}}}]}}]}}]} actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual
null
_get_stacktrace
"""Get the stacktrace of the parent error.""" msg = repr(error) try: if sys.version_info < (3, 10): tb = traceback.format_exception(error.__class__, error, error. __traceback__) else: tb = traceback.format_exception(error) return (msg + '\n\n'.join(tb)).strip() except: return msg
@staticmethod def _get_stacktrace(error: BaseException) ->str: """Get the stacktrace of the parent error.""" msg = repr(error) try: if sys.version_info < (3, 10): tb = traceback.format_exception(error.__class__, error, error. __traceback__) else: tb = traceback.format_exception(error) return (msg + '\n\n'.join(tb)).strip() except: return msg
Get the stacktrace of the parent error.
_import_titan_takeoff
from langchain_community.llms.titan_takeoff import TitanTakeoff return TitanTakeoff
def _import_titan_takeoff() ->Any: from langchain_community.llms.titan_takeoff import TitanTakeoff return TitanTakeoff
null
get_child
"""Get a child callback manager. Args: tag (str, optional): The tag for the child callback manager. Defaults to None. Returns: CallbackManager: The child callback manager. """ manager = CallbackManager(handlers=[], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) manager.add_metadata(self.inheritable_metadata) if tag is not None: manager.add_tags([tag], False) return manager
def get_child(self, tag: Optional[str]=None) ->CallbackManager: """Get a child callback manager. Args: tag (str, optional): The tag for the child callback manager. Defaults to None. Returns: CallbackManager: The child callback manager. """ manager = CallbackManager(handlers=[], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) manager.add_metadata(self.inheritable_metadata) if tag is not None: manager.add_tags([tag], False) return manager
Get a child callback manager. Args: tag (str, optional): The tag for the child callback manager. Defaults to None. Returns: CallbackManager: The child callback manager.
_document_exists
return len(self._query( f""" SELECT 1 FROM {self.location} WHERE _id=:session_id LIMIT 1 """ , session_id=self.session_id)) != 0
def _document_exists(self) ->bool: return len(self._query( f""" SELECT 1 FROM {self.location} WHERE _id=:session_id LIMIT 1 """ , session_id=self.session_id)) != 0
null
test_retry_logic
"""Tests that two queries (which would usually exceed the rate limit) works""" llm = MosaicML(inject_instruction_format=True, model_kwargs={ 'max_new_tokens': 10}) instruction = 'Repeat the word foo' prompt = llm._transform_prompt(instruction) expected_prompt = PROMPT_FOR_GENERATION_FORMAT.format(instruction=instruction) assert prompt == expected_prompt output = llm(prompt) assert isinstance(output, str) output = llm(prompt) assert isinstance(output, str)
def test_retry_logic() ->None: """Tests that two queries (which would usually exceed the rate limit) works""" llm = MosaicML(inject_instruction_format=True, model_kwargs={ 'max_new_tokens': 10}) instruction = 'Repeat the word foo' prompt = llm._transform_prompt(instruction) expected_prompt = PROMPT_FOR_GENERATION_FORMAT.format(instruction= instruction) assert prompt == expected_prompt output = llm(prompt) assert isinstance(output, str) output = llm(prompt) assert isinstance(output, str)
Tests that two queries (which would usually exceed the rate limit) works
add_texts
"""Run more texts through the embeddings and add to the vectorstore. Args: texts: List of strings to add to the vectorstore. metadatas: Optional list of metadata associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ embs = self.embedding_model.embed_documents(texts) return self.add_texts_with_embeddings(texts, embs, metadatas, **kwargs)
def add_texts(self, texts: List[str], metadatas: Optional[List[dict]]=None, **kwargs: Any) ->List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: List of strings to add to the vectorstore. metadatas: Optional list of metadata associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ embs = self.embedding_model.embed_documents(texts) return self.add_texts_with_embeddings(texts, embs, metadatas, **kwargs)
Run more texts through the embeddings and add to the vectorstore. Args: texts: List of strings to add to the vectorstore. metadatas: Optional list of metadata associated with the texts. Returns: List of ids from adding the texts into the vectorstore.
_validate_inputs
super()._validate_inputs(inputs) if self.selected_input_key in inputs.keys( ) or self.selected_based_on_input_key in inputs.keys(): raise ValueError( f"The rl chain does not accept '{self.selected_input_key}' or '{self.selected_based_on_input_key}' as input keys, they are reserved for internal use during auto reward." )
def _validate_inputs(self, inputs: Dict[str, Any]) ->None: super()._validate_inputs(inputs) if self.selected_input_key in inputs.keys( ) or self.selected_based_on_input_key in inputs.keys(): raise ValueError( f"The rl chain does not accept '{self.selected_input_key}' or '{self.selected_based_on_input_key}' as input keys, they are reserved for internal use during auto reward." )
null
test_input_dict_with_history_key
runnable = RunnableLambda(lambda input: 'you said: ' + '\n'.join([str(m. content) for m in input['history'] if isinstance(m, HumanMessage)] + [ input['input']])) get_session_history = _get_get_session_history() with_history = RunnableWithMessageHistory(runnable, get_session_history, input_messages_key='input', history_messages_key='history') config: RunnableConfig = {'configurable': {'session_id': '3'}} output = with_history.invoke({'input': 'hello'}, config) assert output == 'you said: hello' output = with_history.invoke({'input': 'good bye'}, config) assert output == """you said: hello good bye"""
def test_input_dict_with_history_key() ->None: runnable = RunnableLambda(lambda input: 'you said: ' + '\n'.join([str(m .content) for m in input['history'] if isinstance(m, HumanMessage)] + [input['input']])) get_session_history = _get_get_session_history() with_history = RunnableWithMessageHistory(runnable, get_session_history, input_messages_key='input', history_messages_key='history') config: RunnableConfig = {'configurable': {'session_id': '3'}} output = with_history.invoke({'input': 'hello'}, config) assert output == 'you said: hello' output = with_history.invoke({'input': 'good bye'}, config) assert output == 'you said: hello\ngood bye'
null
prepare_output_stream
stream = response.get('body') if not stream: return if provider not in cls.provider_to_output_key_map: raise ValueError( f'Unknown streaming response output key for provider: {provider}') for event in stream: chunk = event.get('chunk') if chunk: chunk_obj = json.loads(chunk.get('bytes').decode()) if provider == 'cohere' and (chunk_obj['is_finished'] or chunk_obj[ cls.provider_to_output_key_map[provider]] == '<EOS_TOKEN>'): return yield GenerationChunk(text=chunk_obj[cls.provider_to_output_key_map [provider]])
@classmethod def prepare_output_stream(cls, provider: str, response: Any, stop: Optional [List[str]]=None) ->Iterator[GenerationChunk]: stream = response.get('body') if not stream: return if provider not in cls.provider_to_output_key_map: raise ValueError( f'Unknown streaming response output key for provider: {provider}') for event in stream: chunk = event.get('chunk') if chunk: chunk_obj = json.loads(chunk.get('bytes').decode()) if provider == 'cohere' and (chunk_obj['is_finished'] or chunk_obj[cls.provider_to_output_key_map[provider]] == '<EOS_TOKEN>'): return yield GenerationChunk(text=chunk_obj[cls. provider_to_output_key_map[provider]])
null
_combine_message_texts
""" Combine the message texts for each parent message ID based on the list of message threads. Args: message_threads (dict): A dictionary where the key is the parent message ID and the value is a list of message IDs in ascending order. data (pd.DataFrame): A DataFrame containing the conversation data: - message.sender_id - text - date - message.id - is_reply - reply_to_id Returns: str: A combined string of message texts sorted by date. """ combined_text = '' for parent_id, message_ids in message_threads.items(): message_texts = data[data['message.id'].isin(message_ids)].sort_values(by ='date')['text'].tolist() message_texts = [str(elem) for elem in message_texts] combined_text += ' '.join(message_texts) + '.\n' return combined_text.strip()
def _combine_message_texts(self, message_threads: Dict[int, List[int]], data: pd.DataFrame) ->str: """ Combine the message texts for each parent message ID based on the list of message threads. Args: message_threads (dict): A dictionary where the key is the parent message ID and the value is a list of message IDs in ascending order. data (pd.DataFrame): A DataFrame containing the conversation data: - message.sender_id - text - date - message.id - is_reply - reply_to_id Returns: str: A combined string of message texts sorted by date. """ combined_text = '' for parent_id, message_ids in message_threads.items(): message_texts = data[data['message.id'].isin(message_ids)].sort_values( by='date')['text'].tolist() message_texts = [str(elem) for elem in message_texts] combined_text += ' '.join(message_texts) + '.\n' return combined_text.strip()
Combine the message texts for each parent message ID based on the list of message threads. Args: message_threads (dict): A dictionary where the key is the parent message ID and the value is a list of message IDs in ascending order. data (pd.DataFrame): A DataFrame containing the conversation data: - message.sender_id - text - date - message.id - is_reply - reply_to_id Returns: str: A combined string of message texts sorted by date.
_import_llm_rails
from langchain_community.vectorstores.llm_rails import LLMRails return LLMRails
def _import_llm_rails() ->Any: from langchain_community.vectorstores.llm_rails import LLMRails return LLMRails
null
test_default_w_embeddings_off
llm, PROMPT = setup() feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT, feature_embedder=feature_embedder, auto_embed=False) str1 = '0' str2 = '1' str3 = '2' ctx_str_1 = 'context1' expected = f"""shared |User {ctx_str_1} |action {str1} |action {str2} |action {str3} """ actions = [str1, str2, str3] response = chain.run(User=rl_chain.BasedOn(ctx_str_1), action=rl_chain. ToSelectFrom(actions)) selection_metadata = response['selection_metadata'] vw_str = feature_embedder.format(selection_metadata) assert vw_str == expected
@pytest.mark.requires('vowpal_wabbit_next', 'sentence_transformers') def test_default_w_embeddings_off() ->None: llm, PROMPT = setup() feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed= False, model=MockEncoder()) chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT, feature_embedder=feature_embedder, auto_embed=False) str1 = '0' str2 = '1' str3 = '2' ctx_str_1 = 'context1' expected = ( f'shared |User {ctx_str_1} \n|action {str1} \n|action {str2} \n|action {str3} ' ) actions = [str1, str2, str3] response = chain.run(User=rl_chain.BasedOn(ctx_str_1), action=rl_chain. ToSelectFrom(actions)) selection_metadata = response['selection_metadata'] vw_str = feature_embedder.format(selection_metadata) assert vw_str == expected
null
_transform
xml_start_re = re.compile('<[a-zA-Z:_]') parser = ET.XMLPullParser(['start', 'end']) xml_started = False current_path: List[str] = [] current_path_has_children = False buffer = '' for chunk in input: if isinstance(chunk, BaseMessage): chunk_content = chunk.content if not isinstance(chunk_content, str): continue chunk = chunk_content buffer += chunk if not xml_started: if (match := xml_start_re.search(buffer)): buffer = buffer[match.start():] xml_started = True else: continue parser.feed(buffer) buffer = '' for event, elem in parser.read_events(): if event == 'start': current_path.append(elem.tag) current_path_has_children = False elif event == 'end': current_path.pop() if not current_path_has_children: yield nested_element(current_path, elem) if current_path: current_path_has_children = True else: xml_started = False parser.close()
def _transform(self, input: Iterator[Union[str, BaseMessage]]) ->Iterator[ AddableDict]: xml_start_re = re.compile('<[a-zA-Z:_]') parser = ET.XMLPullParser(['start', 'end']) xml_started = False current_path: List[str] = [] current_path_has_children = False buffer = '' for chunk in input: if isinstance(chunk, BaseMessage): chunk_content = chunk.content if not isinstance(chunk_content, str): continue chunk = chunk_content buffer += chunk if not xml_started: if (match := xml_start_re.search(buffer)): buffer = buffer[match.start():] xml_started = True else: continue parser.feed(buffer) buffer = '' for event, elem in parser.read_events(): if event == 'start': current_path.append(elem.tag) current_path_has_children = False elif event == 'end': current_path.pop() if not current_path_has_children: yield nested_element(current_path, elem) if current_path: current_path_has_children = True else: xml_started = False parser.close()
null
sanitize_input
"""Sanitize input to the python REPL. Remove whitespace, backtick & python (if llm mistakes python console as terminal) Args: query: The query to sanitize Returns: str: The sanitized query """ query = re.sub('^(\\s|`)*(?i:python)?\\s*', '', query) query = re.sub('(\\s|`)*$', '', query) return query
def sanitize_input(query: str) ->str: """Sanitize input to the python REPL. Remove whitespace, backtick & python (if llm mistakes python console as terminal) Args: query: The query to sanitize Returns: str: The sanitized query """ query = re.sub('^(\\s|`)*(?i:python)?\\s*', '', query) query = re.sub('(\\s|`)*$', '', query) return query
Sanitize input to the python REPL. Remove whitespace, backtick & python (if llm mistakes python console as terminal) Args: query: The query to sanitize Returns: str: The sanitized query
on_agent_finish
"""Run on agent end.""" print_text(finish.log, color=color or self.color, end='\n', file=self.file)
def on_agent_finish(self, finish: AgentFinish, color: Optional[str]=None, **kwargs: Any) ->None: """Run on agent end.""" print_text(finish.log, color=color or self.color, end='\n', file=self.file)
Run on agent end.
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'schema', 'prompt_template']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'schema', 'prompt_template']
Get the namespace of the langchain object.
test_usearch_from_texts
"""Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] docsearch = USearch.from_texts(texts, FakeEmbeddings()) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
def test_usearch_from_texts() ->None: """Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] docsearch = USearch.from_texts(texts, FakeEmbeddings()) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
Test end to end construction and search.
last_node
"""Find the single node that is not a source of any edge. If there is no such node, or there are multiple, return None. When drawing the graph this node would be the destination. """ sources = {edge.source for edge in self.edges} found: List[Node] = [] for node in self.nodes.values(): if node.id not in sources: found.append(node) return found[0] if len(found) == 1 else None
def last_node(self) ->Optional[Node]: """Find the single node that is not a source of any edge. If there is no such node, or there are multiple, return None. When drawing the graph this node would be the destination. """ sources = {edge.source for edge in self.edges} found: List[Node] = [] for node in self.nodes.values(): if node.id not in sources: found.append(node) return found[0] if len(found) == 1 else None
Find the single node that is not a source of any edge. If there is no such node, or there are multiple, return None. When drawing the graph this node would be the destination.
_get_default_output_parser
return ChatOutputParser()
@classmethod def _get_default_output_parser(cls, **kwargs: Any) ->AgentOutputParser: return ChatOutputParser()
null
api_client
return ArxivAPIWrapper()
@pytest.fixture def api_client() ->ArxivAPIWrapper: return ArxivAPIWrapper()
null
test_raise_error_if_path_not_exist
loader = DirectoryLoader('./not_exist_directory') with pytest.raises(FileNotFoundError) as e: loader.load() assert str(e.value) == "Directory not found: './not_exist_directory'"
def test_raise_error_if_path_not_exist() ->None: loader = DirectoryLoader('./not_exist_directory') with pytest.raises(FileNotFoundError) as e: loader.load() assert str(e.value) == "Directory not found: './not_exist_directory'"
null
__init__
"""Load a list of URLs using Playwright.""" try: import playwright except ImportError: raise ImportError( 'playwright package not found, please install it with `pip install playwright`' ) self.urls = urls self.continue_on_failure = continue_on_failure self.headless = headless if remove_selectors and evaluator: raise ValueError( '`remove_selectors` and `evaluator` cannot be both not None') self.evaluator = evaluator or UnstructuredHtmlEvaluator(remove_selectors)
def __init__(self, urls: List[str], continue_on_failure: bool=True, headless: bool=True, remove_selectors: Optional[List[str]]=None, evaluator: Optional[PlaywrightEvaluator]=None): """Load a list of URLs using Playwright.""" try: import playwright except ImportError: raise ImportError( 'playwright package not found, please install it with `pip install playwright`' ) self.urls = urls self.continue_on_failure = continue_on_failure self.headless = headless if remove_selectors and evaluator: raise ValueError( '`remove_selectors` and `evaluator` cannot be both not None') self.evaluator = evaluator or UnstructuredHtmlEvaluator(remove_selectors)
Load a list of URLs using Playwright.
create_connection
import sqlite3 import sqlite_vss connection = sqlite3.connect(db_file) connection.row_factory = sqlite3.Row connection.enable_load_extension(True) sqlite_vss.load(connection) connection.enable_load_extension(False) return connection
@staticmethod def create_connection(db_file: str) ->sqlite3.Connection: import sqlite3 import sqlite_vss connection = sqlite3.connect(db_file) connection.row_factory = sqlite3.Row connection.enable_load_extension(True) sqlite_vss.load(connection) connection.enable_load_extension(False) return connection
null
_run_llm
""" Run the language model on the example. Args: llm: The language model to run. inputs: The input dictionary. callbacks: The callbacks to use during the run. tags: Optional tags to add to the run. input_mapper: function to map to the inputs dictionary from an Example Returns: The LLMResult or ChatResult. Raises: ValueError: If the LLM type is unsupported. InputFormatError: If the input format is invalid. """ if input_mapper is not None: prompt_or_messages = input_mapper(inputs) if isinstance(prompt_or_messages, str): llm_output: Union[str, BaseMessage] = llm.predict(prompt_or_messages, callbacks=callbacks, tags=tags) elif isinstance(prompt_or_messages, list) and all(isinstance(msg, BaseMessage) for msg in prompt_or_messages): llm_output = llm.predict_messages(prompt_or_messages, callbacks= callbacks, tags=tags) else: raise InputFormatError( f"""Input mapper returned invalid format: {prompt_or_messages} Expected a single string or list of chat messages.""" ) else: try: llm_prompts = _get_prompt(inputs) llm_output = llm.predict(llm_prompts, callbacks=callbacks, tags=tags) except InputFormatError: llm_messages = _get_messages(inputs) llm_output = llm.predict_messages(llm_messages, callbacks=callbacks) return llm_output
def _run_llm(llm: BaseLanguageModel, inputs: Dict[str, Any], callbacks: Callbacks, *, tags: Optional[List[str]]=None, input_mapper: Optional[ Callable[[Dict], Any]]=None) ->Union[str, BaseMessage]: """ Run the language model on the example. Args: llm: The language model to run. inputs: The input dictionary. callbacks: The callbacks to use during the run. tags: Optional tags to add to the run. input_mapper: function to map to the inputs dictionary from an Example Returns: The LLMResult or ChatResult. Raises: ValueError: If the LLM type is unsupported. InputFormatError: If the input format is invalid. """ if input_mapper is not None: prompt_or_messages = input_mapper(inputs) if isinstance(prompt_or_messages, str): llm_output: Union[str, BaseMessage] = llm.predict( prompt_or_messages, callbacks=callbacks, tags=tags) elif isinstance(prompt_or_messages, list) and all(isinstance(msg, BaseMessage) for msg in prompt_or_messages): llm_output = llm.predict_messages(prompt_or_messages, callbacks =callbacks, tags=tags) else: raise InputFormatError( f"""Input mapper returned invalid format: {prompt_or_messages} Expected a single string or list of chat messages.""" ) else: try: llm_prompts = _get_prompt(inputs) llm_output = llm.predict(llm_prompts, callbacks=callbacks, tags =tags) except InputFormatError: llm_messages = _get_messages(inputs) llm_output = llm.predict_messages(llm_messages, callbacks=callbacks ) return llm_output
Run the language model on the example. Args: llm: The language model to run. inputs: The input dictionary. callbacks: The callbacks to use during the run. tags: Optional tags to add to the run. input_mapper: function to map to the inputs dictionary from an Example Returns: The LLMResult or ChatResult. Raises: ValueError: If the LLM type is unsupported. InputFormatError: If the input format is invalid.
similarity_search
"""Return typesense documents most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 10. Minimum 10 results would be returned. filter: typesense filter_by expression to filter documents on Returns: List of Documents most similar to the query and score for each """ docs_and_score = self.similarity_search_with_score(query, k=k, filter=filter) return [doc for doc, _ in docs_and_score]
def similarity_search(self, query: str, k: int=10, filter: Optional[str]='', **kwargs: Any) ->List[Document]: """Return typesense documents most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 10. Minimum 10 results would be returned. filter: typesense filter_by expression to filter documents on Returns: List of Documents most similar to the query and score for each """ docs_and_score = self.similarity_search_with_score(query, k=k, filter= filter) return [doc for doc, _ in docs_and_score]
Return typesense documents most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 10. Minimum 10 results would be returned. filter: typesense filter_by expression to filter documents on Returns: List of Documents most similar to the query and score for each
get_docstore
"""Get the metadata store used for this example.""" return LocalFileStore(str(Path(__file__).parent.parent / 'multi_vector_retriever_metadata'))
def get_docstore(): """Get the metadata store used for this example.""" return LocalFileStore(str(Path(__file__).parent.parent / 'multi_vector_retriever_metadata'))
Get the metadata store used for this example.
from_embeddings
texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls._initialize_from_embeddings(texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, pre_delete_collection=pre_delete_collection, **kwargs)
@classmethod def from_embeddings(cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]]=None, collection_name: str=_LANGCHAIN_DEFAULT_COLLECTION_NAME, ids: Optional[ List[str]]=None, pre_delete_collection: bool=False, **kwargs: Any ) ->PGEmbedding: texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls._initialize_from_embeddings(texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, pre_delete_collection=pre_delete_collection, **kwargs)
null
test_api_key_masked_when_passed_via_constructor
mock_response = mock_get.return_value mock_response.status_code = 200 mock_response.json.return_value = {'model_id': '', 'status': 'training_complete'} arcee_without_env_var = Arcee(model='DALM-PubMed', arcee_api_key= 'secret_api_key', arcee_api_url='https://localhost', arcee_api_version= 'version') print(arcee_without_env_var.arcee_api_key, end='') captured = capsys.readouterr() assert '**********' == captured.out
@patch('langchain_community.utilities.arcee.requests.get') def test_api_key_masked_when_passed_via_constructor(mock_get: MagicMock, capsys: CaptureFixture) ->None: mock_response = mock_get.return_value mock_response.status_code = 200 mock_response.json.return_value = {'model_id': '', 'status': 'training_complete'} arcee_without_env_var = Arcee(model='DALM-PubMed', arcee_api_key= 'secret_api_key', arcee_api_url='https://localhost', arcee_api_version='version') print(arcee_without_env_var.arcee_api_key, end='') captured = capsys.readouterr() assert '**********' == captured.out
null
from_texts
"""Create an Astra DB vectorstore from raw texts. Args: texts (List[str]): the texts to insert. embedding (Embeddings): the embedding function to use in the store. metadatas (Optional[List[dict]]): metadata dicts for the texts. ids (Optional[List[str]]): ids to associate to the texts. *Additional arguments*: you can pass any argument that you would to 'add_texts' and/or to the 'AstraDB' class constructor (see these methods for details). These arguments will be routed to the respective methods as they are. Returns: an `AstraDb` vectorstore. """ known_kwargs = {'collection_name', 'token', 'api_endpoint', 'astra_db_client', 'namespace', 'metric', 'batch_size', 'bulk_insert_batch_concurrency', 'bulk_insert_overwrite_concurrency', 'bulk_delete_concurrency', 'batch_concurrency', 'overwrite_concurrency'} if kwargs: unknown_kwargs = set(kwargs.keys()) - known_kwargs if unknown_kwargs: warnings.warn( f"Method 'from_texts' of AstraDB vector store invoked with unsupported arguments ({', '.join(sorted(unknown_kwargs))}), which will be ignored." ) collection_name: str = kwargs['collection_name'] token = kwargs.get('token') api_endpoint = kwargs.get('api_endpoint') astra_db_client = kwargs.get('astra_db_client') namespace = kwargs.get('namespace') metric = kwargs.get('metric') astra_db_store = cls(embedding=embedding, collection_name=collection_name, token=token, api_endpoint=api_endpoint, astra_db_client=astra_db_client, namespace=namespace, metric=metric, batch_size=kwargs.get('batch_size'), bulk_insert_batch_concurrency=kwargs.get( 'bulk_insert_batch_concurrency'), bulk_insert_overwrite_concurrency= kwargs.get('bulk_insert_overwrite_concurrency'), bulk_delete_concurrency=kwargs.get('bulk_delete_concurrency')) astra_db_store.add_texts(texts=texts, metadatas=metadatas, ids=ids, batch_size=kwargs.get('batch_size'), batch_concurrency=kwargs.get( 'batch_concurrency'), overwrite_concurrency=kwargs.get( 'overwrite_concurrency')) return astra_db_store
@classmethod def from_texts(cls: Type[ADBVST], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, ids: Optional[List[str]]=None, ** kwargs: Any) ->ADBVST: """Create an Astra DB vectorstore from raw texts. Args: texts (List[str]): the texts to insert. embedding (Embeddings): the embedding function to use in the store. metadatas (Optional[List[dict]]): metadata dicts for the texts. ids (Optional[List[str]]): ids to associate to the texts. *Additional arguments*: you can pass any argument that you would to 'add_texts' and/or to the 'AstraDB' class constructor (see these methods for details). These arguments will be routed to the respective methods as they are. Returns: an `AstraDb` vectorstore. """ known_kwargs = {'collection_name', 'token', 'api_endpoint', 'astra_db_client', 'namespace', 'metric', 'batch_size', 'bulk_insert_batch_concurrency', 'bulk_insert_overwrite_concurrency', 'bulk_delete_concurrency', 'batch_concurrency', 'overwrite_concurrency'} if kwargs: unknown_kwargs = set(kwargs.keys()) - known_kwargs if unknown_kwargs: warnings.warn( f"Method 'from_texts' of AstraDB vector store invoked with unsupported arguments ({', '.join(sorted(unknown_kwargs))}), which will be ignored." ) collection_name: str = kwargs['collection_name'] token = kwargs.get('token') api_endpoint = kwargs.get('api_endpoint') astra_db_client = kwargs.get('astra_db_client') namespace = kwargs.get('namespace') metric = kwargs.get('metric') astra_db_store = cls(embedding=embedding, collection_name= collection_name, token=token, api_endpoint=api_endpoint, astra_db_client=astra_db_client, namespace=namespace, metric=metric, batch_size=kwargs.get('batch_size'), bulk_insert_batch_concurrency= kwargs.get('bulk_insert_batch_concurrency'), bulk_insert_overwrite_concurrency=kwargs.get( 'bulk_insert_overwrite_concurrency'), bulk_delete_concurrency= kwargs.get('bulk_delete_concurrency')) astra_db_store.add_texts(texts=texts, metadatas=metadatas, ids=ids, batch_size=kwargs.get('batch_size'), batch_concurrency=kwargs.get( 'batch_concurrency'), overwrite_concurrency=kwargs.get( 'overwrite_concurrency')) return astra_db_store
Create an Astra DB vectorstore from raw texts. Args: texts (List[str]): the texts to insert. embedding (Embeddings): the embedding function to use in the store. metadatas (Optional[List[dict]]): metadata dicts for the texts. ids (Optional[List[str]]): ids to associate to the texts. *Additional arguments*: you can pass any argument that you would to 'add_texts' and/or to the 'AstraDB' class constructor (see these methods for details). These arguments will be routed to the respective methods as they are. Returns: an `AstraDb` vectorstore.
_llm_type
"""Return type of llm.""" return 'google_palm'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'google_palm'
Return type of llm.
test_similarity_search_without_metadata
"""Test end to end construction and search without metadata.""" texts = ['foo', 'bar', 'baz'] docsearch = ElasticVectorSearch.from_texts(texts, FakeEmbeddings(), elasticsearch_url=elasticsearch_url) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
def test_similarity_search_without_metadata(self, elasticsearch_url: str ) ->None: """Test end to end construction and search without metadata.""" texts = ['foo', 'bar', 'baz'] docsearch = ElasticVectorSearch.from_texts(texts, FakeEmbeddings(), elasticsearch_url=elasticsearch_url) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
Test end to end construction and search without metadata.
generate_queries
"""Generate queries based upon user input. Args: question: user query Returns: List of LLM generated queries that are similar to the user input """ response = self.llm_chain({'question': question}, callbacks=run_manager. get_child()) lines = getattr(response['text'], self.parser_key, []) if self.verbose: logger.info(f'Generated queries: {lines}') return lines
def generate_queries(self, question: str, run_manager: CallbackManagerForRetrieverRun) ->List[str]: """Generate queries based upon user input. Args: question: user query Returns: List of LLM generated queries that are similar to the user input """ response = self.llm_chain({'question': question}, callbacks=run_manager .get_child()) lines = getattr(response['text'], self.parser_key, []) if self.verbose: logger.info(f'Generated queries: {lines}') return lines
Generate queries based upon user input. Args: question: user query Returns: List of LLM generated queries that are similar to the user input
from_credentials
"""Convenience constructor that builds TrelloClient init param for you. Args: board_name: The name of the Trello board. api_key: Trello API key. Can also be specified as environment variable TRELLO_API_KEY. token: Trello token. Can also be specified as environment variable TRELLO_TOKEN. include_card_name: Whether to include the name of the card in the document. include_comments: Whether to include the comments on the card in the document. include_checklist: Whether to include the checklist on the card in the document. card_filter: Filter on card status. Valid values are "closed", "open", "all". extra_metadata: List of additional metadata fields to include as document metadata.Valid values are "due_date", "labels", "list", "closed". """ try: from trello import TrelloClient except ImportError as ex: raise ImportError( 'Could not import trello python package. Please install it with `pip install py-trello`.' ) from ex api_key = api_key or get_from_env('api_key', 'TRELLO_API_KEY') token = token or get_from_env('token', 'TRELLO_TOKEN') client = TrelloClient(api_key=api_key, token=token) return cls(client, board_name, **kwargs)
@classmethod def from_credentials(cls, board_name: str, *, api_key: Optional[str]=None, token: Optional[str]=None, **kwargs: Any) ->TrelloLoader: """Convenience constructor that builds TrelloClient init param for you. Args: board_name: The name of the Trello board. api_key: Trello API key. Can also be specified as environment variable TRELLO_API_KEY. token: Trello token. Can also be specified as environment variable TRELLO_TOKEN. include_card_name: Whether to include the name of the card in the document. include_comments: Whether to include the comments on the card in the document. include_checklist: Whether to include the checklist on the card in the document. card_filter: Filter on card status. Valid values are "closed", "open", "all". extra_metadata: List of additional metadata fields to include as document metadata.Valid values are "due_date", "labels", "list", "closed". """ try: from trello import TrelloClient except ImportError as ex: raise ImportError( 'Could not import trello python package. Please install it with `pip install py-trello`.' ) from ex api_key = api_key or get_from_env('api_key', 'TRELLO_API_KEY') token = token or get_from_env('token', 'TRELLO_TOKEN') client = TrelloClient(api_key=api_key, token=token) return cls(client, board_name, **kwargs)
Convenience constructor that builds TrelloClient init param for you. Args: board_name: The name of the Trello board. api_key: Trello API key. Can also be specified as environment variable TRELLO_API_KEY. token: Trello token. Can also be specified as environment variable TRELLO_TOKEN. include_card_name: Whether to include the name of the card in the document. include_comments: Whether to include the comments on the card in the document. include_checklist: Whether to include the checklist on the card in the document. card_filter: Filter on card status. Valid values are "closed", "open", "all". extra_metadata: List of additional metadata fields to include as document metadata.Valid values are "due_date", "labels", "list", "closed".
_import_marqo
from langchain_community.vectorstores.marqo import Marqo return Marqo
def _import_marqo() ->Any: from langchain_community.vectorstores.marqo import Marqo return Marqo
null
test_load_single_page
loader = GitbookLoader(web_page) result = loader.load() assert len(result) == expected_number_results
@pytest.mark.parametrize('web_page, expected_number_results', [( 'https://platform-docs.opentargets.org/getting-started', 1)]) def test_load_single_page(self, web_page: str, expected_number_results: int ) ->None: loader = GitbookLoader(web_page) result = loader.load() assert len(result) == expected_number_results
null
test_openai_streaming
"""Test streaming tokens from OpenAI.""" for token in llm.stream("I'm Pickle Rick"): assert isinstance(token.content, str)
@pytest.mark.scheduled def test_openai_streaming(llm: AzureChatOpenAI) ->None: """Test streaming tokens from OpenAI.""" for token in llm.stream("I'm Pickle Rick"): assert isinstance(token.content, str)
Test streaming tokens from OpenAI.
__get_headers
is_managed = self.url == MANAGED_URL headers = {'Content-Type': 'application/json'} if is_managed and not (self.api_key and self.client_id): raise ValueError( """ You must provide an API key or a client ID to use the managed version of Motorhead. Visit https://getmetal.io for more information. """ ) if is_managed and self.api_key and self.client_id: headers['x-metal-api-key'] = self.api_key headers['x-metal-client-id'] = self.client_id return headers
def __get_headers(self) ->Dict[str, str]: is_managed = self.url == MANAGED_URL headers = {'Content-Type': 'application/json'} if is_managed and not (self.api_key and self.client_id): raise ValueError( """ You must provide an API key or a client ID to use the managed version of Motorhead. Visit https://getmetal.io for more information. """ ) if is_managed and self.api_key and self.client_id: headers['x-metal-api-key'] = self.api_key headers['x-metal-client-id'] = self.client_id return headers
null
test_call
"""Test valid call to qianfan.""" llm = QianfanLLMEndpoint() output = llm('write a joke') assert isinstance(output, str)
def test_call() ->None: """Test valid call to qianfan.""" llm = QianfanLLMEndpoint() output = llm('write a joke') assert isinstance(output, str)
Test valid call to qianfan.
router
if input['key'] == 'math': return itemgetter('input') | math_chain elif input['key'] == 'english': return itemgetter('input') | english_chain else: raise ValueError(f"Unknown key: {input['key']}")
def router(input: Dict[str, Any]) ->Runnable: if input['key'] == 'math': return itemgetter('input') | math_chain elif input['key'] == 'english': return itemgetter('input') | english_chain else: raise ValueError(f"Unknown key: {input['key']}")
null
full_key_prefix
return f'{self.key_prefix}:{self.session_id}'
@property def full_key_prefix(self) ->str: return f'{self.key_prefix}:{self.session_id}'
null
test_api_key_masked_when_passed_via_constructor
llm = ChatTongyi(dashscope_api_key='secret-api-key') print(llm.dashscope_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture ) ->None: llm = ChatTongyi(dashscope_api_key='secret-api-key') print(llm.dashscope_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
null
parse
"""Parse the output of an LLM call.""" match = re.search(self.regex, text) if match: return {key: match.group(i + 1) for i, key in enumerate(self.output_keys)} elif self.default_output_key is None: raise ValueError(f'Could not parse output: {text}') else: return {key: (text if key == self.default_output_key else '') for key in self.output_keys}
def parse(self, text: str) ->Dict[str, str]: """Parse the output of an LLM call.""" match = re.search(self.regex, text) if match: return {key: match.group(i + 1) for i, key in enumerate(self. output_keys)} elif self.default_output_key is None: raise ValueError(f'Could not parse output: {text}') else: return {key: (text if key == self.default_output_key else '') for key in self.output_keys}
Parse the output of an LLM call.
test_parse_invalid_grammar
with pytest.raises((ValueError, lark.exceptions.UnexpectedToken)): DEFAULT_PARSER.parse_folder(x)
@pytest.mark.parametrize('x', ('', 'foo', 'foo("bar", "baz")')) def test_parse_invalid_grammar(x: str) ->None: with pytest.raises((ValueError, lark.exceptions.UnexpectedToken)): DEFAULT_PARSER.parse_folder(x)
null
_on_llm_end
"""Process the LLM Run.""" self._process_end_trace(run)
def _on_llm_end(self, run: 'Run') ->None: """Process the LLM Run.""" self._process_end_trace(run)
Process the LLM Run.
__init__
self.client = client self.moderation_beacon = {'moderation_chain_id': chain_id, 'moderation_type': 'Toxicity', 'moderation_status': 'LABELS_NOT_FOUND'} self.callback = callback self.unique_id = unique_id
def __init__(self, client: Any, callback: Optional[Any]=None, unique_id: Optional[str]=None, chain_id: Optional[str]=None) ->None: self.client = client self.moderation_beacon = {'moderation_chain_id': chain_id, 'moderation_type': 'Toxicity', 'moderation_status': 'LABELS_NOT_FOUND'} self.callback = callback self.unique_id = unique_id
null
__init__
super().__init__(criteria=criteria, **kwargs)
def __init__(self, criteria: Optional[CRITERIA_TYPE]=None, **kwargs: Any ) ->None: super().__init__(criteria=criteria, **kwargs)
null
test_graph_cypher_qa_chain_prompt_selection_3
memory = ConversationBufferMemory(memory_key='chat_history') readonlymemory = ReadOnlySharedMemory(memory=memory) chain = GraphCypherQAChain.from_llm(llm=FakeLLM(), graph=FakeGraphStore(), verbose=True, return_intermediate_steps=False, cypher_llm_kwargs={ 'memory': readonlymemory}, qa_llm_kwargs={'memory': readonlymemory}) assert chain.qa_chain.prompt == CYPHER_QA_PROMPT assert chain.cypher_generation_chain.prompt == CYPHER_GENERATION_PROMPT
def test_graph_cypher_qa_chain_prompt_selection_3() ->None: memory = ConversationBufferMemory(memory_key='chat_history') readonlymemory = ReadOnlySharedMemory(memory=memory) chain = GraphCypherQAChain.from_llm(llm=FakeLLM(), graph=FakeGraphStore (), verbose=True, return_intermediate_steps=False, cypher_llm_kwargs={'memory': readonlymemory}, qa_llm_kwargs={ 'memory': readonlymemory}) assert chain.qa_chain.prompt == CYPHER_QA_PROMPT assert chain.cypher_generation_chain.prompt == CYPHER_GENERATION_PROMPT
null
batch
"""Default implementation runs invoke in parallel using a thread pool executor. The default implementation of batch works well for IO bound runnables. Subclasses should override this method if they can batch more efficiently; e.g., if the underlying runnable uses an API which supports a batch mode. """ if not inputs: return [] configs = get_config_list(config, len(inputs)) def invoke(input: Input, config: RunnableConfig) ->Union[Output, Exception]: if return_exceptions: try: return self.invoke(input, config, **kwargs) except Exception as e: return e else: return self.invoke(input, config, **kwargs) if len(inputs) == 1: return cast(List[Output], [invoke(inputs[0], configs[0])]) with get_executor_for_config(configs[0]) as executor: return cast(List[Output], list(executor.map(invoke, inputs, configs)))
def batch(self, inputs: List[Input], config: Optional[Union[RunnableConfig, List[RunnableConfig]]]=None, *, return_exceptions: bool=False, **kwargs: Optional[Any]) ->List[Output]: """Default implementation runs invoke in parallel using a thread pool executor. The default implementation of batch works well for IO bound runnables. Subclasses should override this method if they can batch more efficiently; e.g., if the underlying runnable uses an API which supports a batch mode. """ if not inputs: return [] configs = get_config_list(config, len(inputs)) def invoke(input: Input, config: RunnableConfig) ->Union[Output, Exception ]: if return_exceptions: try: return self.invoke(input, config, **kwargs) except Exception as e: return e else: return self.invoke(input, config, **kwargs) if len(inputs) == 1: return cast(List[Output], [invoke(inputs[0], configs[0])]) with get_executor_for_config(configs[0]) as executor: return cast(List[Output], list(executor.map(invoke, inputs, configs)))
Default implementation runs invoke in parallel using a thread pool executor. The default implementation of batch works well for IO bound runnables. Subclasses should override this method if they can batch more efficiently; e.g., if the underlying runnable uses an API which supports a batch mode.
_import_json_tool_JsonListKeysTool
from langchain_community.tools.json.tool import JsonListKeysTool return JsonListKeysTool
def _import_json_tool_JsonListKeysTool() ->Any: from langchain_community.tools.json.tool import JsonListKeysTool return JsonListKeysTool
null
test_openai_opeanapi
chain = get_openapi_chain( 'https://www.klarna.com/us/shopping/public/openai/v0/api-docs/') output = chain.run( "What are some options for a men's large blue button down shirt") assert isinstance(output, dict)
def test_openai_opeanapi() ->None: chain = get_openapi_chain( 'https://www.klarna.com/us/shopping/public/openai/v0/api-docs/') output = chain.run( "What are some options for a men's large blue button down shirt") assert isinstance(output, dict)
null
test_document_found
dummy_dict = {'foo': Document(page_content='bar')} docstore = DocstoreFn(lambda x: dummy_dict[x]) output = docstore.search('foo') assert isinstance(output, Document) assert output.page_content == 'bar'
def test_document_found() ->None: dummy_dict = {'foo': Document(page_content='bar')} docstore = DocstoreFn(lambda x: dummy_dict[x]) output = docstore.search('foo') assert isinstance(output, Document) assert output.page_content == 'bar'
null
parse_dependencies
num_deps = max(len(dependencies) if dependencies is not None else 0, len( repo), len(branch)) if dependencies and len(dependencies) != num_deps or api_path and len(api_path ) != num_deps or repo and len(repo) not in [1, num_deps] or branch and len( branch) not in [1, num_deps]: raise ValueError( 'Number of defined repos/branches/api_paths did not match the number of templates.' ) inner_deps = _list_arg_to_length(dependencies, num_deps) inner_api_paths = _list_arg_to_length(api_path, num_deps) inner_repos = _list_arg_to_length(repo, num_deps) inner_branches = _list_arg_to_length(branch, num_deps) return [parse_dependency_string(iter_dep, iter_repo, iter_branch, iter_api_path) for iter_dep, iter_repo, iter_branch, iter_api_path in zip(inner_deps, inner_repos, inner_branches, inner_api_paths)]
def parse_dependencies(dependencies: Optional[List[str]], repo: List[str], branch: List[str], api_path: List[str]) ->List[DependencySource]: num_deps = max(len(dependencies) if dependencies is not None else 0, len(repo), len(branch)) if dependencies and len(dependencies) != num_deps or api_path and len( api_path) != num_deps or repo and len(repo) not in [1, num_deps ] or branch and len(branch) not in [1, num_deps]: raise ValueError( 'Number of defined repos/branches/api_paths did not match the number of templates.' ) inner_deps = _list_arg_to_length(dependencies, num_deps) inner_api_paths = _list_arg_to_length(api_path, num_deps) inner_repos = _list_arg_to_length(repo, num_deps) inner_branches = _list_arg_to_length(branch, num_deps) return [parse_dependency_string(iter_dep, iter_repo, iter_branch, iter_api_path) for iter_dep, iter_repo, iter_branch, iter_api_path in zip(inner_deps, inner_repos, inner_branches, inner_api_paths)]
null
test_openai_callback_batch_llm
llm = OpenAI(temperature=0) with get_openai_callback() as cb: llm.generate(['What is the square root of 4?', 'What is the square root of 4?']) assert cb.total_tokens > 0 total_tokens = cb.total_tokens with get_openai_callback() as cb: llm('What is the square root of 4?') llm('What is the square root of 4?') assert cb.total_tokens == total_tokens
def test_openai_callback_batch_llm() ->None: llm = OpenAI(temperature=0) with get_openai_callback() as cb: llm.generate(['What is the square root of 4?', 'What is the square root of 4?']) assert cb.total_tokens > 0 total_tokens = cb.total_tokens with get_openai_callback() as cb: llm('What is the square root of 4?') llm('What is the square root of 4?') assert cb.total_tokens == total_tokens
null
_import_office365_events_search
from langchain_community.tools.office365.events_search import O365SearchEvents return O365SearchEvents
def _import_office365_events_search() ->Any: from langchain_community.tools.office365.events_search import O365SearchEvents return O365SearchEvents
null
_create_cls_from_kwargs
index_name = kwargs.get('index_name') if index_name is None: raise ValueError('Please provide an index_name.') es_connection = kwargs.get('es_connection') es_cloud_id = kwargs.get('es_cloud_id') es_url = kwargs.get('es_url') es_user = kwargs.get('es_user') es_password = kwargs.get('es_password') es_api_key = kwargs.get('es_api_key') vector_query_field = kwargs.get('vector_query_field') query_field = kwargs.get('query_field') distance_strategy = kwargs.get('distance_strategy') strategy = kwargs.get('strategy', ElasticsearchStore.ApproxRetrievalStrategy()) optional_args = {} if vector_query_field is not None: optional_args['vector_query_field'] = vector_query_field if query_field is not None: optional_args['query_field'] = query_field return ElasticsearchStore(index_name=index_name, embedding=embedding, es_url=es_url, es_connection=es_connection, es_cloud_id=es_cloud_id, es_user=es_user, es_password=es_password, es_api_key=es_api_key, strategy=strategy, distance_strategy=distance_strategy, **optional_args)
@staticmethod def _create_cls_from_kwargs(embedding: Optional[Embeddings]=None, **kwargs: Any ) ->'ElasticsearchStore': index_name = kwargs.get('index_name') if index_name is None: raise ValueError('Please provide an index_name.') es_connection = kwargs.get('es_connection') es_cloud_id = kwargs.get('es_cloud_id') es_url = kwargs.get('es_url') es_user = kwargs.get('es_user') es_password = kwargs.get('es_password') es_api_key = kwargs.get('es_api_key') vector_query_field = kwargs.get('vector_query_field') query_field = kwargs.get('query_field') distance_strategy = kwargs.get('distance_strategy') strategy = kwargs.get('strategy', ElasticsearchStore. ApproxRetrievalStrategy()) optional_args = {} if vector_query_field is not None: optional_args['vector_query_field'] = vector_query_field if query_field is not None: optional_args['query_field'] = query_field return ElasticsearchStore(index_name=index_name, embedding=embedding, es_url=es_url, es_connection=es_connection, es_cloud_id=es_cloud_id, es_user=es_user, es_password=es_password, es_api_key=es_api_key, strategy=strategy, distance_strategy=distance_strategy, **optional_args )
null
run
"""Run body through Twilio and respond with message sid. Args: body: The text of the message you want to send. Can be up to 1,600 characters in length. to: The destination phone number in [E.164](https://www.twilio.com/docs/glossary/what-e164) format for SMS/MMS or [Channel user address](https://www.twilio.com/docs/sms/channels#channel-addresses) for other 3rd-party channels. """ message = self.client.messages.create(to, from_=self.from_number, body=body) return message.sid
def run(self, body: str, to: str) ->str: """Run body through Twilio and respond with message sid. Args: body: The text of the message you want to send. Can be up to 1,600 characters in length. to: The destination phone number in [E.164](https://www.twilio.com/docs/glossary/what-e164) format for SMS/MMS or [Channel user address](https://www.twilio.com/docs/sms/channels#channel-addresses) for other 3rd-party channels. """ message = self.client.messages.create(to, from_=self.from_number, body=body ) return message.sid
Run body through Twilio and respond with message sid. Args: body: The text of the message you want to send. Can be up to 1,600 characters in length. to: The destination phone number in [E.164](https://www.twilio.com/docs/glossary/what-e164) format for SMS/MMS or [Channel user address](https://www.twilio.com/docs/sms/channels#channel-addresses) for other 3rd-party channels.
_chunk
for i in range(0, len(texts), size): yield texts[i:i + size]
def _chunk(texts: List[str], size: int) ->Iterator[List[str]]: for i in range(0, len(texts), size): yield texts[i:i + size]
null
_call
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() _run_manager.on_text(inputs[self.input_key]) llm_output = self.llm_chain.predict(question=inputs[self.input_key], stop=[ '```output'], callbacks=_run_manager.get_child()) return self._process_llm_result(llm_output, _run_manager)
def _call(self, inputs: Dict[str, str], run_manager: Optional[ CallbackManagerForChainRun]=None) ->Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() _run_manager.on_text(inputs[self.input_key]) llm_output = self.llm_chain.predict(question=inputs[self.input_key], stop=['```output'], callbacks=_run_manager.get_child()) return self._process_llm_result(llm_output, _run_manager)
null
similarity_search_with_score
"""Perform a search on a query string and return results with score. For more information about the search parameters, take a look at the pymilvus documentation found here: https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md Args: query (str): The text being searched. k (int, optional): The amount of results to return. Defaults to 4. param (dict): The search params for the specified index. Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): How long to wait before timeout error. Defaults to None. kwargs: Collection.search() keyword arguments. Returns: List[float], List[Tuple[Document, any, any]]: """ if self.col is None: logger.debug('No existing collection to search.') return [] embedding = self.embedding_func.embed_query(query) res = self.similarity_search_with_score_by_vector(embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs) return res
def similarity_search_with_score(self, query: str, k: int=4, param: Optional[dict]=None, expr: Optional[str]=None, timeout: Optional[int]= None, **kwargs: Any) ->List[Tuple[Document, float]]: """Perform a search on a query string and return results with score. For more information about the search parameters, take a look at the pymilvus documentation found here: https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md Args: query (str): The text being searched. k (int, optional): The amount of results to return. Defaults to 4. param (dict): The search params for the specified index. Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): How long to wait before timeout error. Defaults to None. kwargs: Collection.search() keyword arguments. Returns: List[float], List[Tuple[Document, any, any]]: """ if self.col is None: logger.debug('No existing collection to search.') return [] embedding = self.embedding_func.embed_query(query) res = self.similarity_search_with_score_by_vector(embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs) return res
Perform a search on a query string and return results with score. For more information about the search parameters, take a look at the pymilvus documentation found here: https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md Args: query (str): The text being searched. k (int, optional): The amount of results to return. Defaults to 4. param (dict): The search params for the specified index. Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): How long to wait before timeout error. Defaults to None. kwargs: Collection.search() keyword arguments. Returns: List[float], List[Tuple[Document, any, any]]:
transform
return self._transform_stream_with_config(input, self._transform, config, **kwargs)
def transform(self, input: Iterator[Input], config: Optional[RunnableConfig ]=None, **kwargs: Any) ->Iterator[Output]: return self._transform_stream_with_config(input, self._transform, config, **kwargs)
null
raise_deprecation
warnings.warn( '`VectorDBQA` is deprecated - please use `from langchain.chains import RetrievalQA`' ) return values
@root_validator() def raise_deprecation(cls, values: Dict) ->Dict: warnings.warn( '`VectorDBQA` is deprecated - please use `from langchain.chains import RetrievalQA`' ) return values
null
_import_sql_database
from langchain_community.utilities.sql_database import SQLDatabase return SQLDatabase
def _import_sql_database() ->Any: from langchain_community.utilities.sql_database import SQLDatabase return SQLDatabase
null
similarity_search_with_score_by_vector
"""Return docs most similar to embedding vector. Args: embedding (str): Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. Returns: List of (Document, score), the most similar to the query vector. """ return [(doc, score) for doc, score, doc_id in self. similarity_search_with_score_id_by_vector(embedding=embedding, k=k, filter=filter)]
def similarity_search_with_score_by_vector(self, embedding: List[float], k: int=4, filter: Optional[Dict[str, str]]=None) ->List[Tuple[Document, float] ]: """Return docs most similar to embedding vector. Args: embedding (str): Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. Returns: List of (Document, score), the most similar to the query vector. """ return [(doc, score) for doc, score, doc_id in self. similarity_search_with_score_id_by_vector(embedding=embedding, k=k, filter=filter)]
Return docs most similar to embedding vector. Args: embedding (str): Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. Returns: List of (Document, score), the most similar to the query vector.
_get_single_prompt
if suffix is not None: suffix_to_use = suffix include_df_head = True elif include_df_in_prompt: suffix_to_use = SUFFIX_WITH_DF include_df_head = True else: suffix_to_use = SUFFIX_NO_DF include_df_head = False if input_variables is None: input_variables = ['input', 'agent_scratchpad'] if include_df_head: input_variables += ['df_head'] if prefix is None: prefix = PREFIX tools = [PythonAstREPLTool(locals={'df': df})] + list(extra_tools) prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix, suffix= suffix_to_use, input_variables=input_variables) partial_prompt = prompt.partial() if 'df_head' in input_variables: partial_prompt = partial_prompt.partial(df_head=str(df.head( number_of_head_rows).to_markdown())) return partial_prompt, tools
def _get_single_prompt(df: Any, prefix: Optional[str]=None, suffix: Optional[str]=None, input_variables: Optional[List[str]]=None, include_df_in_prompt: Optional[bool]=True, number_of_head_rows: int=5, extra_tools: Sequence[BaseTool]=()) ->Tuple[BasePromptTemplate, List[ BaseTool]]: if suffix is not None: suffix_to_use = suffix include_df_head = True elif include_df_in_prompt: suffix_to_use = SUFFIX_WITH_DF include_df_head = True else: suffix_to_use = SUFFIX_NO_DF include_df_head = False if input_variables is None: input_variables = ['input', 'agent_scratchpad'] if include_df_head: input_variables += ['df_head'] if prefix is None: prefix = PREFIX tools = [PythonAstREPLTool(locals={'df': df})] + list(extra_tools) prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix, suffix= suffix_to_use, input_variables=input_variables) partial_prompt = prompt.partial() if 'df_head' in input_variables: partial_prompt = partial_prompt.partial(df_head=str(df.head( number_of_head_rows).to_markdown())) return partial_prompt, tools
null
test_extra_kwargs
chat = ChatHunyuan(temperature=0.88, top_p=0.7) assert chat.temperature == 0.88 assert chat.top_p == 0.7
def test_extra_kwargs() ->None: chat = ChatHunyuan(temperature=0.88, top_p=0.7) assert chat.temperature == 0.88 assert chat.top_p == 0.7
null
test_valid_arguments
loader = RSpaceLoader(url=TestRSpaceLoader.url, api_key=TestRSpaceLoader. api_key, global_id=TestRSpaceLoader.global_id) self.assertEqual(TestRSpaceLoader.url, loader.url) self.assertEqual(TestRSpaceLoader.api_key, loader.api_key) self.assertEqual(TestRSpaceLoader.global_id, loader.global_id)
def test_valid_arguments(self) ->None: loader = RSpaceLoader(url=TestRSpaceLoader.url, api_key= TestRSpaceLoader.api_key, global_id=TestRSpaceLoader.global_id) self.assertEqual(TestRSpaceLoader.url, loader.url) self.assertEqual(TestRSpaceLoader.api_key, loader.api_key) self.assertEqual(TestRSpaceLoader.global_id, loader.global_id)
null
multi_modal_rag_chain
""" Multi-modal RAG chain, :param retriever: A function that retrieves the necessary context for the model. :return: A chain of functions representing the multi-modal RAG process. """ model = ChatOpenAI(temperature=0, model='gpt-4-vision-preview', max_tokens=1024 ) chain = {'context': retriever | RunnableLambda(get_resized_images), 'question': RunnablePassthrough()} | RunnableLambda(img_prompt_func ) | model | StrOutputParser() return chain
def multi_modal_rag_chain(retriever): """ Multi-modal RAG chain, :param retriever: A function that retrieves the necessary context for the model. :return: A chain of functions representing the multi-modal RAG process. """ model = ChatOpenAI(temperature=0, model='gpt-4-vision-preview', max_tokens=1024) chain = {'context': retriever | RunnableLambda(get_resized_images), 'question': RunnablePassthrough()} | RunnableLambda(img_prompt_func ) | model | StrOutputParser() return chain
Multi-modal RAG chain, :param retriever: A function that retrieves the necessary context for the model. :return: A chain of functions representing the multi-modal RAG process.
test_php_code_splitter
splitter = RecursiveCharacterTextSplitter.from_language(Language.PHP, chunk_size=CHUNK_SIZE, chunk_overlap=0) code = """ <?php function hello_world() { echo "Hello, World!"; } hello_world(); ?> """ chunks = splitter.split_text(code) assert chunks == ['<?php', 'function', 'hello_world() {', 'echo', '"Hello,', 'World!";', '}', 'hello_world();', '?>']
def test_php_code_splitter() ->None: splitter = RecursiveCharacterTextSplitter.from_language(Language.PHP, chunk_size=CHUNK_SIZE, chunk_overlap=0) code = """ <?php function hello_world() { echo "Hello, World!"; } hello_world(); ?> """ chunks = splitter.split_text(code) assert chunks == ['<?php', 'function', 'hello_world() {', 'echo', '"Hello,', 'World!";', '}', 'hello_world();', '?>']
null
__init__
"""Initialize a MarkdownTextSplitter.""" separators = self.get_separators_for_language(Language.MARKDOWN) super().__init__(separators=separators, **kwargs)
def __init__(self, **kwargs: Any) ->None: """Initialize a MarkdownTextSplitter.""" separators = self.get_separators_for_language(Language.MARKDOWN) super().__init__(separators=separators, **kwargs)
Initialize a MarkdownTextSplitter.
__init__
"""Initialize MaxCompute document loader. Args: client: odps.ODPS MaxCompute client object. """ self.client = client
def __init__(self, client: ODPS): """Initialize MaxCompute document loader. Args: client: odps.ODPS MaxCompute client object. """ self.client = client
Initialize MaxCompute document loader. Args: client: odps.ODPS MaxCompute client object.
_import_elasticsearch
from langchain_community.vectorstores.elasticsearch import ElasticsearchStore return ElasticsearchStore
def _import_elasticsearch() ->Any: from langchain_community.vectorstores.elasticsearch import ElasticsearchStore return ElasticsearchStore
null
test_unstructured_tsv_loader
"""Test unstructured loader.""" file_path = os.path.join(EXAMPLE_DIRECTORY, 'stanley-cups.tsv') loader = UnstructuredTSVLoader(str(file_path)) docs = loader.load() assert len(docs) == 1
def test_unstructured_tsv_loader() ->None: """Test unstructured loader.""" file_path = os.path.join(EXAMPLE_DIRECTORY, 'stanley-cups.tsv') loader = UnstructuredTSVLoader(str(file_path)) docs = loader.load() assert len(docs) == 1
Test unstructured loader.
test_pai_eas_v1_streaming
"""Test streaming call to PAI-EAS Service.""" llm = PaiEasEndpoint(eas_service_url=os.getenv('EAS_SERVICE_URL'), eas_service_token=os.getenv('EAS_SERVICE_TOKEN'), version='1.0') generator = llm.stream("Q: How do you say 'hello' in German? A:'", stop=['.']) stream_results_string = '' assert isinstance(generator, Generator) for chunk in generator: assert isinstance(chunk, str) stream_results_string = chunk assert len(stream_results_string.strip()) > 1
def test_pai_eas_v1_streaming() ->None: """Test streaming call to PAI-EAS Service.""" llm = PaiEasEndpoint(eas_service_url=os.getenv('EAS_SERVICE_URL'), eas_service_token=os.getenv('EAS_SERVICE_TOKEN'), version='1.0') generator = llm.stream("Q: How do you say 'hello' in German? A:'", stop =['.']) stream_results_string = '' assert isinstance(generator, Generator) for chunk in generator: assert isinstance(chunk, str) stream_results_string = chunk assert len(stream_results_string.strip()) > 1
Test streaming call to PAI-EAS Service.
test_results_exists
"""Test that results gives the correct output format.""" search = api_client.results(query='What is the best programming language?', sort='relevance', time_filter='all', subreddit='all', limit=10) assert_results_exists(search)
@pytest.mark.requires('praw') def test_results_exists(api_client: RedditSearchAPIWrapper) ->None: """Test that results gives the correct output format.""" search = api_client.results(query= 'What is the best programming language?', sort='relevance', time_filter='all', subreddit='all', limit=10) assert_results_exists(search)
Test that results gives the correct output format.
add
"""Add a sequence of addable objects together.""" final = None for chunk in addables: if final is None: final = chunk else: final = final + chunk return final
def add(addables: Iterable[Addable]) ->Optional[Addable]: """Add a sequence of addable objects together.""" final = None for chunk in addables: if final is None: final = chunk else: final = final + chunk return final
Add a sequence of addable objects together.
_reset
_task_type = task_type if task_type else self.task_type _workspace = workspace if workspace else self.workspace _project_name = project_name if project_name else self.project_name _tags = tags if tags else self.tags _name = name if name else self.name _visualizations = visualizations if visualizations else self.visualizations _complexity_metrics = (complexity_metrics if complexity_metrics else self. complexity_metrics) _custom_metrics = custom_metrics if custom_metrics else self.custom_metrics self.__init__(task_type=_task_type, workspace=_workspace, project_name= _project_name, tags=_tags, name=_name, visualizations=_visualizations, complexity_metrics=_complexity_metrics, custom_metrics=_custom_metrics) self.reset_callback_meta() self.temp_dir = tempfile.TemporaryDirectory()
def _reset(self, task_type: Optional[str]=None, workspace: Optional[str]= None, project_name: Optional[str]=None, tags: Optional[Sequence]=None, name: Optional[str]=None, visualizations: Optional[List[str]]=None, complexity_metrics: bool=False, custom_metrics: Optional[Callable]=None ) ->None: _task_type = task_type if task_type else self.task_type _workspace = workspace if workspace else self.workspace _project_name = project_name if project_name else self.project_name _tags = tags if tags else self.tags _name = name if name else self.name _visualizations = visualizations if visualizations else self.visualizations _complexity_metrics = (complexity_metrics if complexity_metrics else self.complexity_metrics) _custom_metrics = custom_metrics if custom_metrics else self.custom_metrics self.__init__(task_type=_task_type, workspace=_workspace, project_name= _project_name, tags=_tags, name=_name, visualizations= _visualizations, complexity_metrics=_complexity_metrics, custom_metrics=_custom_metrics) self.reset_callback_meta() self.temp_dir = tempfile.TemporaryDirectory()
null
similarity_search_by_vector_with_score
"""Return docs most similar to the embedding and their cosine distance. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Optional. A list of Namespaces for filtering the matching results. For example: [Namespace("color", ["red"], []), Namespace("shape", [], ["squared"])] will match datapoints that satisfy "red color" but not include datapoints with "squared shape". Please refer to https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#json for more detail. Returns: List[Tuple[Document, float]]: List of documents most similar to the query text and cosine distance in float for each. Lower score represents more similarity. """ filter = filter or [] if hasattr(self.endpoint, '_public_match_client' ) and self.endpoint._public_match_client: response = self.endpoint.find_neighbors(deployed_index_id=self. _get_index_id(), queries=[embedding], num_neighbors=k, filter=filter) else: response = self.endpoint.match(deployed_index_id=self._get_index_id(), queries=[embedding], num_neighbors=k, filter=filter) logger.debug(f'Found {len(response)} matches.') if len(response) == 0: return [] docs: List[Tuple[Document, float]] = [] for result in response[0]: page_content = self._download_from_gcs(f'documents/{result.id}') metadata = {} if self.document_id_key is not None: metadata[self.document_id_key] = result.id document = Document(page_content=page_content, metadata=metadata) docs.append((document, result.distance)) logger.debug('Downloaded documents for query.') return docs
def similarity_search_by_vector_with_score(self, embedding: List[float], k: int=4, filter: Optional[List[Namespace]]=None) ->List[Tuple[Document, float]]: """Return docs most similar to the embedding and their cosine distance. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Optional. A list of Namespaces for filtering the matching results. For example: [Namespace("color", ["red"], []), Namespace("shape", [], ["squared"])] will match datapoints that satisfy "red color" but not include datapoints with "squared shape". Please refer to https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#json for more detail. Returns: List[Tuple[Document, float]]: List of documents most similar to the query text and cosine distance in float for each. Lower score represents more similarity. """ filter = filter or [] if hasattr(self.endpoint, '_public_match_client' ) and self.endpoint._public_match_client: response = self.endpoint.find_neighbors(deployed_index_id=self. _get_index_id(), queries=[embedding], num_neighbors=k, filter= filter) else: response = self.endpoint.match(deployed_index_id=self._get_index_id (), queries=[embedding], num_neighbors=k, filter=filter) logger.debug(f'Found {len(response)} matches.') if len(response) == 0: return [] docs: List[Tuple[Document, float]] = [] for result in response[0]: page_content = self._download_from_gcs(f'documents/{result.id}') metadata = {} if self.document_id_key is not None: metadata[self.document_id_key] = result.id document = Document(page_content=page_content, metadata=metadata) docs.append((document, result.distance)) logger.debug('Downloaded documents for query.') return docs
Return docs most similar to the embedding and their cosine distance. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Optional. A list of Namespaces for filtering the matching results. For example: [Namespace("color", ["red"], []), Namespace("shape", [], ["squared"])] will match datapoints that satisfy "red color" but not include datapoints with "squared shape". Please refer to https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#json for more detail. Returns: List[Tuple[Document, float]]: List of documents most similar to the query text and cosine distance in float for each. Lower score represents more similarity.
__init__
self.segment_sentences = segment_sentences self.grobid_server = grobid_server try: requests.get(grobid_server) except requests.exceptions.RequestException: logger.error( 'GROBID server does not appear up and running, please ensure Grobid is installed and the server is running' ) raise ServerUnavailableException
def __init__(self, segment_sentences: bool, grobid_server: str= 'http://localhost:8070/api/processFulltextDocument') ->None: self.segment_sentences = segment_sentences self.grobid_server = grobid_server try: requests.get(grobid_server) except requests.exceptions.RequestException: logger.error( 'GROBID server does not appear up and running, please ensure Grobid is installed and the server is running' ) raise ServerUnavailableException
null
max_marginal_relevance_search_by_vector
"""Perform a search and return results that are reordered by MMR.""" filter = None if expr is None else self.document.Filter(expr) ef = 10 if param is None else param.get('ef', 10) res: List[List[Dict]] = self.collection.search(vectors=[embedding], filter= filter, params=self.document.HNSWSearchParams(ef=ef), retrieve_vector= True, limit=fetch_k, timeout=timeout) documents = [] ordered_result_embeddings = [] for result in res[0]: meta = result.get(self.field_metadata) if meta is not None: meta = json.loads(meta) doc = Document(page_content=result.get(self.field_text), metadata=meta) documents.append(doc) ordered_result_embeddings.append(result.get(self.field_vector)) new_ordering = maximal_marginal_relevance(np.array(embedding), ordered_result_embeddings, k=k, lambda_mult=lambda_mult) ret = [] for x in new_ordering: if x == -1: break else: ret.append(documents[x]) return ret
def max_marginal_relevance_search_by_vector(self, embedding: list[float], k: int=4, fetch_k: int=20, lambda_mult: float=0.5, param: Optional[dict]= None, expr: Optional[str]=None, timeout: Optional[int]=None, **kwargs: Any ) ->List[Document]: """Perform a search and return results that are reordered by MMR.""" filter = None if expr is None else self.document.Filter(expr) ef = 10 if param is None else param.get('ef', 10) res: List[List[Dict]] = self.collection.search(vectors=[embedding], filter=filter, params=self.document.HNSWSearchParams(ef=ef), retrieve_vector=True, limit=fetch_k, timeout=timeout) documents = [] ordered_result_embeddings = [] for result in res[0]: meta = result.get(self.field_metadata) if meta is not None: meta = json.loads(meta) doc = Document(page_content=result.get(self.field_text), metadata=meta) documents.append(doc) ordered_result_embeddings.append(result.get(self.field_vector)) new_ordering = maximal_marginal_relevance(np.array(embedding), ordered_result_embeddings, k=k, lambda_mult=lambda_mult) ret = [] for x in new_ordering: if x == -1: break else: ret.append(documents[x]) return ret
Perform a search and return results that are reordered by MMR.
test_memory_with_message_store
"""Test the memory with a message store.""" message_history = SingleStoreDBChatMessageHistory(session_id='test-session', host=TEST_SINGLESTOREDB_URL) memory = ConversationBufferMemory(memory_key='baz', chat_memory= message_history, return_messages=True) memory.chat_memory.add_ai_message('This is me, the AI') memory.chat_memory.add_user_message('This is me, the human') messages = memory.chat_memory.messages messages_json = json.dumps([message_to_dict(msg) for msg in messages]) assert 'This is me, the AI' in messages_json assert 'This is me, the human' in messages_json memory.chat_memory.clear() assert memory.chat_memory.messages == []
def test_memory_with_message_store() ->None: """Test the memory with a message store.""" message_history = SingleStoreDBChatMessageHistory(session_id= 'test-session', host=TEST_SINGLESTOREDB_URL) memory = ConversationBufferMemory(memory_key='baz', chat_memory= message_history, return_messages=True) memory.chat_memory.add_ai_message('This is me, the AI') memory.chat_memory.add_user_message('This is me, the human') messages = memory.chat_memory.messages messages_json = json.dumps([message_to_dict(msg) for msg in messages]) assert 'This is me, the AI' in messages_json assert 'This is me, the human' in messages_json memory.chat_memory.clear() assert memory.chat_memory.messages == []
Test the memory with a message store.
from_text
"""Get an OpenAPI spec from a text.""" try: spec_dict = json.loads(text) except json.JSONDecodeError: spec_dict = yaml.safe_load(text) return cls.from_spec_dict(spec_dict)
@classmethod def from_text(cls, text: str) ->OpenAPISpec: """Get an OpenAPI spec from a text.""" try: spec_dict = json.loads(text) except json.JSONDecodeError: spec_dict = yaml.safe_load(text) return cls.from_spec_dict(spec_dict)
Get an OpenAPI spec from a text.
from_texts
"""Construct Meilisearch wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided Meilisearch index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import Meilisearch from langchain_community.embeddings import OpenAIEmbeddings import meilisearch # The environment should be the one specified next to the API key # in your Meilisearch console client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***') embeddings = OpenAIEmbeddings() docsearch = Meilisearch.from_texts( client=client, embeddings=embeddings, ) """ client = _create_client(client=client, url=url, api_key=api_key) vectorstore = cls(embedding=embedding, client=client, index_name=index_name) vectorstore.add_texts(texts=texts, metadatas=metadatas, ids=ids, text_key= text_key, metadata_key=metadata_key) return vectorstore
@classmethod def from_texts(cls: Type[Meilisearch], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, client: Optional[ Client]=None, url: Optional[str]=None, api_key: Optional[str]=None, index_name: str='langchain-demo', ids: Optional[List[str]]=None, text_key: Optional[str]='text', metadata_key: Optional[str]='metadata', **kwargs: Any) ->Meilisearch: """Construct Meilisearch wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided Meilisearch index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import Meilisearch from langchain_community.embeddings import OpenAIEmbeddings import meilisearch # The environment should be the one specified next to the API key # in your Meilisearch console client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***') embeddings = OpenAIEmbeddings() docsearch = Meilisearch.from_texts( client=client, embeddings=embeddings, ) """ client = _create_client(client=client, url=url, api_key=api_key) vectorstore = cls(embedding=embedding, client=client, index_name=index_name ) vectorstore.add_texts(texts=texts, metadatas=metadatas, ids=ids, text_key=text_key, metadata_key=metadata_key) return vectorstore
Construct Meilisearch wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided Meilisearch index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import Meilisearch from langchain_community.embeddings import OpenAIEmbeddings import meilisearch # The environment should be the one specified next to the API key # in your Meilisearch console client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***') embeddings = OpenAIEmbeddings() docsearch = Meilisearch.from_texts( client=client, embeddings=embeddings, )
embeddings
return self.embedding
@property def embeddings(self) ->Embeddings: return self.embedding
null
validate_environment
"""Validate that api key and python package exists in environment.""" values['gradient_access_token'] = get_from_dict_or_env(values, 'gradient_access_token', 'GRADIENT_ACCESS_TOKEN') values['gradient_workspace_id'] = get_from_dict_or_env(values, 'gradient_workspace_id', 'GRADIENT_WORKSPACE_ID') values['gradient_api_url'] = get_from_dict_or_env(values, 'gradient_api_url', 'GRADIENT_API_URL') try: import gradientai except ImportError: raise ImportError( 'GradientEmbeddings requires `pip install -U "gradientai>=1.4.0"`.') if parse(gradientai.__version__) < parse('1.4.0'): raise ImportError( 'GradientEmbeddings requires `pip install -U "gradientai>=1.4.0"`.') gradient = gradientai.Gradient(access_token=values['gradient_access_token'], workspace_id=values['gradient_workspace_id'], host=values[ 'gradient_api_url']) values['client'] = gradient.get_embeddings_model(slug=values['model']) return values
@root_validator(allow_reuse=True) def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" values['gradient_access_token'] = get_from_dict_or_env(values, 'gradient_access_token', 'GRADIENT_ACCESS_TOKEN') values['gradient_workspace_id'] = get_from_dict_or_env(values, 'gradient_workspace_id', 'GRADIENT_WORKSPACE_ID') values['gradient_api_url'] = get_from_dict_or_env(values, 'gradient_api_url', 'GRADIENT_API_URL') try: import gradientai except ImportError: raise ImportError( 'GradientEmbeddings requires `pip install -U "gradientai>=1.4.0"`.' ) if parse(gradientai.__version__) < parse('1.4.0'): raise ImportError( 'GradientEmbeddings requires `pip install -U "gradientai>=1.4.0"`.' ) gradient = gradientai.Gradient(access_token=values[ 'gradient_access_token'], workspace_id=values[ 'gradient_workspace_id'], host=values['gradient_api_url']) values['client'] = gradient.get_embeddings_model(slug=values['model']) return values
Validate that api key and python package exists in environment.