method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
test_python_loader
"""Test Python loader.""" file_path = Path(__file__).parent.parent / 'examples' / filename loader = PythonLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 metadata = docs[0].metadata assert metadata['source'] == str(file_path)
@pytest.mark.parametrize('filename', ['default-encoding.py', 'non-utf8-encoding.py']) def test_python_loader(filename: str) ->None: """Test Python loader.""" file_path = Path(__file__).parent.parent / 'examples' / filename loader = PythonLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 metadata = docs[0].metadata assert metadata['source'] == str(file_path)
Test Python loader.
is_lc_serializable
return True
@classmethod def is_lc_serializable(self) ->bool: return True
null
on_agent_action
"""Run on agent action.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({'action': 'on_agent_action', 'tool': action.tool, 'tool_input': action.tool_input, 'log': action.log}) resp.update(self.get_custom_callback_meta()) self.on_agent_action_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.logger.report_text(resp)
def on_agent_action(self, action: AgentAction, **kwargs: Any) ->Any: """Run on agent action.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({'action': 'on_agent_action', 'tool': action.tool, 'tool_input': action.tool_input, 'log': action.log}) resp.update(self.get_custom_callback_meta()) self.on_agent_action_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.logger.report_text(resp)
Run on agent action.
_type
return 'react'
@property def _type(self) ->str: return 'react'
null
test_empty
memory = ToTDFSMemory([]) self.assertEqual(self.controller(memory), ())
def test_empty(self) ->None: memory = ToTDFSMemory([]) self.assertEqual(self.controller(memory), ())
null
from_texts
"""Construct Dingo wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided Dingo index This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import Dingo from langchain_community.embeddings import OpenAIEmbeddings import dingodb sss embeddings = OpenAIEmbeddings() dingo = Dingo.from_texts( texts, embeddings, index_name="langchain-demo" ) """ try: import dingodb except ImportError: raise ImportError( 'Could not import dingo python package. Please install it with `pip install dingodb`.' ) if client is not None: dingo_client = client else: try: dingo_client = dingodb.DingoDB(user, password, host) except ValueError as e: raise ValueError(f'Dingo failed to connect: {e}') if kwargs is not None and kwargs.get('self_id') is True: if index_name is not None and index_name not in dingo_client.get_index( ) and index_name.upper() not in dingo_client.get_index(): dingo_client.create_index(index_name, dimension=dimension, auto_id= False) elif index_name is not None and index_name not in dingo_client.get_index( ) and index_name.upper() not in dingo_client.get_index(): dingo_client.create_index(index_name, dimension=dimension) ids = ids or [str(uuid.uuid1().int)[:13] for _ in texts] metadatas_list = [] texts = list(texts) embeds = embedding.embed_documents(texts) for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} metadata[text_key] = text metadatas_list.append(metadata) for i in range(0, len(list(texts)), batch_size): j = i + batch_size add_res = dingo_client.vector_add(index_name, metadatas_list[i:j], embeds[i:j], ids[i:j]) if not add_res: raise Exception('vector add fail') return cls(embedding, text_key, client=dingo_client, index_name=index_name)
@classmethod def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, ids: Optional[List[str]]=None, text_key: str ='text', index_name: Optional[str]=None, dimension: int=1024, client: Any=None, host: List[str]=['172.20.31.10:13000'], user: str='root', password: str='123123', batch_size: int=500, **kwargs: Any) ->Dingo: """Construct Dingo wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided Dingo index This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import Dingo from langchain_community.embeddings import OpenAIEmbeddings import dingodb sss embeddings = OpenAIEmbeddings() dingo = Dingo.from_texts( texts, embeddings, index_name="langchain-demo" ) """ try: import dingodb except ImportError: raise ImportError( 'Could not import dingo python package. Please install it with `pip install dingodb`.' ) if client is not None: dingo_client = client else: try: dingo_client = dingodb.DingoDB(user, password, host) except ValueError as e: raise ValueError(f'Dingo failed to connect: {e}') if kwargs is not None and kwargs.get('self_id') is True: if index_name is not None and index_name not in dingo_client.get_index( ) and index_name.upper() not in dingo_client.get_index(): dingo_client.create_index(index_name, dimension=dimension, auto_id=False) elif index_name is not None and index_name not in dingo_client.get_index( ) and index_name.upper() not in dingo_client.get_index(): dingo_client.create_index(index_name, dimension=dimension) ids = ids or [str(uuid.uuid1().int)[:13] for _ in texts] metadatas_list = [] texts = list(texts) embeds = embedding.embed_documents(texts) for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} metadata[text_key] = text metadatas_list.append(metadata) for i in range(0, len(list(texts)), batch_size): j = i + batch_size add_res = dingo_client.vector_add(index_name, metadatas_list[i:j], embeds[i:j], ids[i:j]) if not add_res: raise Exception('vector add fail') return cls(embedding, text_key, client=dingo_client, index_name=index_name)
Construct Dingo wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided Dingo index This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import Dingo from langchain_community.embeddings import OpenAIEmbeddings import dingodb sss embeddings = OpenAIEmbeddings() dingo = Dingo.from_texts( texts, embeddings, index_name="langchain-demo" )
test_context_w_namespace_no_emb
expected = [{'test_namespace': 'test'}] assert base.embed({'test_namespace': 'test'}, MockEncoder()) == expected
@pytest.mark.requires('vowpal_wabbit_next') def test_context_w_namespace_no_emb() ->None: expected = [{'test_namespace': 'test'}] assert base.embed({'test_namespace': 'test'}, MockEncoder()) == expected
null
_Try
self.fill('try') self.enter() self.dispatch(t.body) self.leave() for ex in t.handlers: self.dispatch(ex) if t.orelse: self.fill('else') self.enter() self.dispatch(t.orelse) self.leave() if t.finalbody: self.fill('finally') self.enter() self.dispatch(t.finalbody) self.leave()
def _Try(self, t): self.fill('try') self.enter() self.dispatch(t.body) self.leave() for ex in t.handlers: self.dispatch(ex) if t.orelse: self.fill('else') self.enter() self.dispatch(t.orelse) self.leave() if t.finalbody: self.fill('finally') self.enter() self.dispatch(t.finalbody) self.leave()
null
_result_as_string
toret = 'No good search result found' if 'answer_box' in result.keys() and 'answer' in result['answer_box'].keys(): toret = result['answer_box']['answer'] elif 'answer_box' in result.keys() and 'snippet' in result['answer_box'].keys( ): toret = result['answer_box']['snippet'] elif 'knowledge_graph' in result.keys(): toret = result['knowledge_graph']['description'] elif 'organic_results' in result.keys(): snippets = [r['snippet'] for r in result['organic_results'] if 'snippet' in r.keys()] toret = '\n'.join(snippets) elif 'jobs' in result.keys(): jobs = [r['description'] for r in result['jobs'] if 'description' in r. keys()] toret = '\n'.join(jobs) elif 'videos' in result.keys(): videos = [f'Title: "{r[\'title\']}" Link: {r[\'link\']}' for r in result['videos'] if 'title' in r.keys()] toret = '\n'.join(videos) elif 'images' in result.keys(): images = [f'Title: "{r[\'title\']}" Link: {r[\'original\'][\'link\']}' for r in result['images'] if 'original' in r.keys()] toret = '\n'.join(images) return toret
@staticmethod def _result_as_string(result: dict) ->str: toret = 'No good search result found' if 'answer_box' in result.keys() and 'answer' in result['answer_box'].keys( ): toret = result['answer_box']['answer'] elif 'answer_box' in result.keys() and 'snippet' in result['answer_box' ].keys(): toret = result['answer_box']['snippet'] elif 'knowledge_graph' in result.keys(): toret = result['knowledge_graph']['description'] elif 'organic_results' in result.keys(): snippets = [r['snippet'] for r in result['organic_results'] if 'snippet' in r.keys()] toret = '\n'.join(snippets) elif 'jobs' in result.keys(): jobs = [r['description'] for r in result['jobs'] if 'description' in r.keys()] toret = '\n'.join(jobs) elif 'videos' in result.keys(): videos = [f'Title: "{r[\'title\']}" Link: {r[\'link\']}' for r in result['videos'] if 'title' in r.keys()] toret = '\n'.join(videos) elif 'images' in result.keys(): images = [f'Title: "{r[\'title\']}" Link: {r[\'original\'][\'link\']}' for r in result['images'] if 'original' in r.keys()] toret = '\n'.join(images) return toret
null
setUp
try: import kuzu except ImportError as e: raise ImportError( 'Cannot import Python package kuzu. Please install it by running `pip install kuzu`.' ) from e self.tmpdir = tempfile.mkdtemp() self.kuzu_database = kuzu.Database(self.tmpdir) self.conn = kuzu.Connection(self.kuzu_database) self.conn.execute('CREATE NODE TABLE Movie (name STRING, PRIMARY KEY(name))') self.conn.execute("CREATE (:Movie {name: 'The Godfather'})") self.conn.execute("CREATE (:Movie {name: 'The Godfather: Part II'})") self.conn.execute( "CREATE (:Movie {name: 'The Godfather Coda: The Death of Michael Corleone'})" ) self.kuzu_graph = KuzuGraph(self.kuzu_database)
def setUp(self) ->None: try: import kuzu except ImportError as e: raise ImportError( 'Cannot import Python package kuzu. Please install it by running `pip install kuzu`.' ) from e self.tmpdir = tempfile.mkdtemp() self.kuzu_database = kuzu.Database(self.tmpdir) self.conn = kuzu.Connection(self.kuzu_database) self.conn.execute( 'CREATE NODE TABLE Movie (name STRING, PRIMARY KEY(name))') self.conn.execute("CREATE (:Movie {name: 'The Godfather'})") self.conn.execute("CREATE (:Movie {name: 'The Godfather: Part II'})") self.conn.execute( "CREATE (:Movie {name: 'The Godfather Coda: The Death of Michael Corleone'})" ) self.kuzu_graph = KuzuGraph(self.kuzu_database)
null
test_returns_expected_results
fake_llm = FakeLLM(queries={'text': """The meaning of life CORRECT"""}, sequential_responses=True) chain = chain_cls.from_llm(fake_llm) results = chain.evaluate_strings(prediction='my prediction', reference= 'my reference', input='my input') assert results['score'] == 1
@pytest.mark.parametrize('chain_cls', [QAEvalChain, ContextQAEvalChain, CotQAEvalChain]) def test_returns_expected_results(chain_cls: Type[LLMChain]) ->None: fake_llm = FakeLLM(queries={'text': 'The meaning of life\nCORRECT'}, sequential_responses=True) chain = chain_cls.from_llm(fake_llm) results = chain.evaluate_strings(prediction='my prediction', reference= 'my reference', input='my input') assert results['score'] == 1
null
load
pdf_id = self.send_pdf() contents = self.get_processed_pdf(pdf_id) if self.should_clean_pdf: contents = self.clean_pdf(contents) metadata = {'source': self.source, 'file_path': self.source} return [Document(page_content=contents, metadata=metadata)]
def load(self) ->List[Document]: pdf_id = self.send_pdf() contents = self.get_processed_pdf(pdf_id) if self.should_clean_pdf: contents = self.clean_pdf(contents) metadata = {'source': self.source, 'file_path': self.source} return [Document(page_content=contents, metadata=metadata)]
null
test_json_distance_evaluator_evaluate_strings_custom_operator_equal
"""Custom operator that returns 0.5 if strings are different.""" def custom_distance(a: str, b: str) ->float: return 0.5 if a != b else 0.0 evaluator = JsonEditDistanceEvaluator(string_distance=custom_distance) prediction = '{"a": "apple", "b": "banana"}' reference = '{"a": "apple", "b": "berries"}' result = evaluator._evaluate_strings(prediction=prediction, reference=reference ) assert result['score'] == 0.5
@pytest.mark.requires('rapidfuzz') def test_json_distance_evaluator_evaluate_strings_custom_operator_equal( ) ->None: """Custom operator that returns 0.5 if strings are different.""" def custom_distance(a: str, b: str) ->float: return 0.5 if a != b else 0.0 evaluator = JsonEditDistanceEvaluator(string_distance=custom_distance) prediction = '{"a": "apple", "b": "banana"}' reference = '{"a": "apple", "b": "berries"}' result = evaluator._evaluate_strings(prediction=prediction, reference= reference) assert result['score'] == 0.5
Custom operator that returns 0.5 if strings are different.
seq_naive_rag_scoped
context = ['Hi there!', 'How are you?', "What's your name?"] retriever = RunnableLambda(lambda x: context) prompt = PromptTemplate.from_template('{context} {question}') llm = FakeListLLM(responses=['hello']) scoped = Context.create_scope('a_scope') return Context.setter('input') | {'context': retriever | Context.setter( 'context'), 'question': RunnablePassthrough(), 'scoped': scoped.setter( 'context') | scoped.getter('context')} | prompt | llm | StrOutputParser( ) | Context.setter('result') | Context.getter(['context', 'input', 'result'])
def seq_naive_rag_scoped() ->Runnable: context = ['Hi there!', 'How are you?', "What's your name?"] retriever = RunnableLambda(lambda x: context) prompt = PromptTemplate.from_template('{context} {question}') llm = FakeListLLM(responses=['hello']) scoped = Context.create_scope('a_scope') return Context.setter('input') | {'context': retriever | Context.setter ('context'), 'question': RunnablePassthrough(), 'scoped': scoped. setter('context') | scoped.getter('context') } | prompt | llm | StrOutputParser() | Context.setter('result' ) | Context.getter(['context', 'input', 'result'])
null
paginate_request
"""Paginate the various methods to retrieve groups of pages. Unfortunately, due to page size, sometimes the Confluence API doesn't match the limit value. If `limit` is >100 confluence seems to cap the response to 100. Also, due to the Atlassian Python package, we don't get the "next" values from the "_links" key because they only return the value from the result key. So here, the pagination starts from 0 and goes until the max_pages, getting the `limit` number of pages with each request. We have to manually check if there are more docs based on the length of the returned list of pages, rather than just checking for the presence of a `next` key in the response like this page would have you do: https://developer.atlassian.com/server/confluence/pagination-in-the-rest-api/ :param retrieval_method: Function used to retrieve docs :type retrieval_method: callable :return: List of documents :rtype: List """ max_pages = kwargs.pop('max_pages') docs: List[dict] = [] while len(docs) < max_pages: get_pages = retry(reraise=True, stop=stop_after_attempt(self. number_of_retries), wait=wait_exponential(multiplier=1, min=self. min_retry_seconds, max=self.max_retry_seconds), before_sleep= before_sleep_log(logger, logging.WARNING))(retrieval_method) batch = get_pages(**kwargs, start=len(docs)) if not batch: break docs.extend(batch) return docs[:max_pages]
def paginate_request(self, retrieval_method: Callable, **kwargs: Any) ->List: """Paginate the various methods to retrieve groups of pages. Unfortunately, due to page size, sometimes the Confluence API doesn't match the limit value. If `limit` is >100 confluence seems to cap the response to 100. Also, due to the Atlassian Python package, we don't get the "next" values from the "_links" key because they only return the value from the result key. So here, the pagination starts from 0 and goes until the max_pages, getting the `limit` number of pages with each request. We have to manually check if there are more docs based on the length of the returned list of pages, rather than just checking for the presence of a `next` key in the response like this page would have you do: https://developer.atlassian.com/server/confluence/pagination-in-the-rest-api/ :param retrieval_method: Function used to retrieve docs :type retrieval_method: callable :return: List of documents :rtype: List """ max_pages = kwargs.pop('max_pages') docs: List[dict] = [] while len(docs) < max_pages: get_pages = retry(reraise=True, stop=stop_after_attempt(self. number_of_retries), wait=wait_exponential(multiplier=1, min= self.min_retry_seconds, max=self.max_retry_seconds), before_sleep=before_sleep_log(logger, logging.WARNING))( retrieval_method) batch = get_pages(**kwargs, start=len(docs)) if not batch: break docs.extend(batch) return docs[:max_pages]
Paginate the various methods to retrieve groups of pages. Unfortunately, due to page size, sometimes the Confluence API doesn't match the limit value. If `limit` is >100 confluence seems to cap the response to 100. Also, due to the Atlassian Python package, we don't get the "next" values from the "_links" key because they only return the value from the result key. So here, the pagination starts from 0 and goes until the max_pages, getting the `limit` number of pages with each request. We have to manually check if there are more docs based on the length of the returned list of pages, rather than just checking for the presence of a `next` key in the response like this page would have you do: https://developer.atlassian.com/server/confluence/pagination-in-the-rest-api/ :param retrieval_method: Function used to retrieve docs :type retrieval_method: callable :return: List of documents :rtype: List
validate_environment
"""Validate that api key and python package exists in environment.""" replicate_api_token = get_from_dict_or_env(values, 'replicate_api_token', 'REPLICATE_API_TOKEN') values['replicate_api_token'] = replicate_api_token return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" replicate_api_token = get_from_dict_or_env(values, 'replicate_api_token', 'REPLICATE_API_TOKEN') values['replicate_api_token'] = replicate_api_token return values
Validate that api key and python package exists in environment.
build_tree
"""Builds a nested dictionary from a list of runs. :param runs: The list of runs to build the tree from. :return: The nested dictionary representing the langchain Run in a tree structure compatible with WBTraceTree. """ id_to_data = {} child_to_parent = {} for entity in runs: for key, data in entity.items(): id_val = data.pop('id', None) parent_run_id = data.pop('parent_run_id', None) id_to_data[id_val] = {key: data} if parent_run_id: child_to_parent[id_val] = parent_run_id for child_id, parent_id in child_to_parent.items(): parent_dict = id_to_data[parent_id] parent_dict[next(iter(parent_dict))][next(iter(id_to_data[child_id])) ] = id_to_data[child_id][next(iter(id_to_data[child_id]))] root_dict = next(data for id_val, data in id_to_data.items() if id_val not in child_to_parent) return root_dict
def build_tree(self, runs: List[Dict[str, Any]]) ->Dict[str, Any]: """Builds a nested dictionary from a list of runs. :param runs: The list of runs to build the tree from. :return: The nested dictionary representing the langchain Run in a tree structure compatible with WBTraceTree. """ id_to_data = {} child_to_parent = {} for entity in runs: for key, data in entity.items(): id_val = data.pop('id', None) parent_run_id = data.pop('parent_run_id', None) id_to_data[id_val] = {key: data} if parent_run_id: child_to_parent[id_val] = parent_run_id for child_id, parent_id in child_to_parent.items(): parent_dict = id_to_data[parent_id] parent_dict[next(iter(parent_dict))][next(iter(id_to_data[child_id])) ] = id_to_data[child_id][next(iter(id_to_data[child_id]))] root_dict = next(data for id_val, data in id_to_data.items() if id_val not in child_to_parent) return root_dict
Builds a nested dictionary from a list of runs. :param runs: The list of runs to build the tree from. :return: The nested dictionary representing the langchain Run in a tree structure compatible with WBTraceTree.
_llm_type
return 'ernie-bot-chat'
@property def _llm_type(self) ->str: return 'ernie-bot-chat'
null
__ror__
"""Compose this runnable with another object to create a RunnableSequence.""" return RunnableSequence(coerce_to_runnable(other), self)
def __ror__(self, other: Union[Runnable[Other, Any], Callable[[Other], Any], Callable[[Iterator[Other]], Iterator[Any]], Mapping[str, Union[Runnable [Other, Any], Callable[[Other], Any], Any]]]) ->RunnableSerializable[ Other, Output]: """Compose this runnable with another object to create a RunnableSequence.""" return RunnableSequence(coerce_to_runnable(other), self)
Compose this runnable with another object to create a RunnableSequence.
on_text_common
self.text += 1
def on_text_common(self) ->None: self.text += 1
null
empty_str_to_none
"""Empty strings are not allowed""" if v == '': return None return v
@validator('*', pre=True) def empty_str_to_none(cls, v: str) ->Union[str, None]: """Empty strings are not allowed""" if v == '': return None return v
Empty strings are not allowed
create_messages
"""Create messages.""" system_message_prompt = SystemMessagePromptTemplate(prompt=PromptTemplate( template="Here's some context: {context}", input_variables=['context'])) human_message_prompt = HumanMessagePromptTemplate(prompt=PromptTemplate( template="Hello {foo}, I'm {bar}. Thanks for the {context}", input_variables=['foo', 'bar', 'context'])) ai_message_prompt = AIMessagePromptTemplate(prompt=PromptTemplate(template= "I'm an AI. I'm {foo}. I'm {bar}.", input_variables=['foo', 'bar'])) chat_message_prompt = ChatMessagePromptTemplate(role='test', prompt= PromptTemplate(template="I'm a generic message. I'm {foo}. I'm {bar}.", input_variables=['foo', 'bar'])) return [system_message_prompt, human_message_prompt, ai_message_prompt, chat_message_prompt]
def create_messages() ->List[BaseMessagePromptTemplate]: """Create messages.""" system_message_prompt = SystemMessagePromptTemplate(prompt= PromptTemplate(template="Here's some context: {context}", input_variables=['context'])) human_message_prompt = HumanMessagePromptTemplate(prompt=PromptTemplate (template="Hello {foo}, I'm {bar}. Thanks for the {context}", input_variables=['foo', 'bar', 'context'])) ai_message_prompt = AIMessagePromptTemplate(prompt=PromptTemplate( template="I'm an AI. I'm {foo}. I'm {bar}.", input_variables=['foo', 'bar'])) chat_message_prompt = ChatMessagePromptTemplate(role='test', prompt= PromptTemplate(template= "I'm a generic message. I'm {foo}. I'm {bar}.", input_variables=[ 'foo', 'bar'])) return [system_message_prompt, human_message_prompt, ai_message_prompt, chat_message_prompt]
Create messages.
_import_deepsparse
from langchain_community.llms.deepsparse import DeepSparse return DeepSparse
def _import_deepsparse() ->Any: from langchain_community.llms.deepsparse import DeepSparse return DeepSparse
null
fake_llm_summarization_checker_chain
"""Fake LLMCheckerChain for testing.""" queries = {CREATE_ASSERTIONS_PROMPT.format(summary='a'): 'b', CHECK_ASSERTIONS_PROMPT.format(assertions='b'): '- b - True', REVISED_SUMMARY_PROMPT.format(checked_assertions='- b - True', summary= 'a'): 'b', ARE_ALL_TRUE_PROMPT.format(checked_assertions='- b - True'): 'True'} fake_llm = FakeLLM(queries=queries) return LLMSummarizationCheckerChain.from_llm(fake_llm, input_key='q', output_key='a')
@pytest.fixture def fake_llm_summarization_checker_chain() ->LLMSummarizationCheckerChain: """Fake LLMCheckerChain for testing.""" queries = {CREATE_ASSERTIONS_PROMPT.format(summary='a'): 'b', CHECK_ASSERTIONS_PROMPT.format(assertions='b'): '- b - True', REVISED_SUMMARY_PROMPT.format(checked_assertions='- b - True', summary='a'): 'b', ARE_ALL_TRUE_PROMPT.format(checked_assertions= '- b - True'): 'True'} fake_llm = FakeLLM(queries=queries) return LLMSummarizationCheckerChain.from_llm(fake_llm, input_key='q', output_key='a')
Fake LLMCheckerChain for testing.
_run
"""Use the tool.""" return str(self.api_wrapper.results(query))
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Use the tool.""" return str(self.api_wrapper.results(query))
Use the tool.
search_api
"""Search the API for the query.""" return 'API result'
@tool def search_api(query: str) ->str: """Search the API for the query.""" return 'API result'
Search the API for the query.
embed_documents
"""Call out to Infinity's embedding endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = self.client.embed(model=self.model, texts=texts) return embeddings
def embed_documents(self, texts: List[str]) ->List[List[float]]: """Call out to Infinity's embedding endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = self.client.embed(model=self.model, texts=texts) return embeddings
Call out to Infinity's embedding endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text.
create_index
"""Creates an index in your project. See https://docs.nomic.ai/atlas_api.html#nomic.project.AtlasProject.create_index for full detail. """ with self.project.wait_for_project_lock(): return self.project.create_index(**kwargs)
def create_index(self, **kwargs: Any) ->Any: """Creates an index in your project. See https://docs.nomic.ai/atlas_api.html#nomic.project.AtlasProject.create_index for full detail. """ with self.project.wait_for_project_lock(): return self.project.create_index(**kwargs)
Creates an index in your project. See https://docs.nomic.ai/atlas_api.html#nomic.project.AtlasProject.create_index for full detail.
test_load_nonexistent_feature
"""Tests that KeyError is thrown for nonexistent feature/key in dataset""" page_content_column = 'langchain' name = 'v2' loader = HuggingFaceDatasetLoader(HUGGING_FACE_EXAMPLE_DATASET, page_content_column, name) with pytest.raises(KeyError): loader.load()
@pytest.mark.requires('datasets') @pytest.fixture def test_load_nonexistent_feature() ->None: """Tests that KeyError is thrown for nonexistent feature/key in dataset""" page_content_column = 'langchain' name = 'v2' loader = HuggingFaceDatasetLoader(HUGGING_FACE_EXAMPLE_DATASET, page_content_column, name) with pytest.raises(KeyError): loader.load()
Tests that KeyError is thrown for nonexistent feature/key in dataset
_load_file_from_ids
"""Load files from a list of IDs.""" if not self.file_ids: raise ValueError('file_ids must be set') docs = [] for file_id in self.file_ids: docs.extend(self._load_file_from_id(file_id)) return docs
def _load_file_from_ids(self) ->List[Document]: """Load files from a list of IDs.""" if not self.file_ids: raise ValueError('file_ids must be set') docs = [] for file_id in self.file_ids: docs.extend(self._load_file_from_id(file_id)) return docs
Load files from a list of IDs.
__getitem__
...
@overload def __getitem__(self, index: slice) ->ChatPromptTemplate: ...
null
test_initialization_ghe
loader = GitHubIssuesLoader(repo='repo', access_token='access_token', github_api_url='https://github.example.com/api/v3') assert loader.repo == 'repo' assert loader.access_token == 'access_token' assert loader.github_api_url == 'https://github.example.com/api/v3' assert loader.headers == {'Accept': 'application/vnd.github+json', 'Authorization': 'Bearer access_token'}
def test_initialization_ghe() ->None: loader = GitHubIssuesLoader(repo='repo', access_token='access_token', github_api_url='https://github.example.com/api/v3') assert loader.repo == 'repo' assert loader.access_token == 'access_token' assert loader.github_api_url == 'https://github.example.com/api/v3' assert loader.headers == {'Accept': 'application/vnd.github+json', 'Authorization': 'Bearer access_token'}
null
_strip
return text.strip()
def _strip(text: str) ->str: return text.strip()
null
validate_environment
"""Validate that api key and python package exists in environment.""" try: import cohere except ImportError: raise ImportError( 'Could not import cohere python package. Please install it with `pip install cohere`.' ) else: cohere_api_key = get_from_dict_or_env(values, 'cohere_api_key', 'COHERE_API_KEY') client_name = values['user_agent'] values['client'] = cohere.Client(cohere_api_key, client_name=client_name) values['async_client'] = cohere.AsyncClient(cohere_api_key, client_name =client_name) return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" try: import cohere except ImportError: raise ImportError( 'Could not import cohere python package. Please install it with `pip install cohere`.' ) else: cohere_api_key = get_from_dict_or_env(values, 'cohere_api_key', 'COHERE_API_KEY') client_name = values['user_agent'] values['client'] = cohere.Client(cohere_api_key, client_name= client_name) values['async_client'] = cohere.AsyncClient(cohere_api_key, client_name=client_name) return values
Validate that api key and python package exists in environment.
test_add_texts_with_metadata
index = mock_index(DIRECT_ACCESS_INDEX) vectorsearch = default_databricks_vector_search(index) vectors = DEFAULT_EMBEDDING_MODEL.embed_documents(fake_texts) metadatas = [{'feat1': str(i), 'feat2': i + 1000} for i in range(len( fake_texts))] added_ids = vectorsearch.add_texts(fake_texts, metadatas=metadatas) index.upsert.assert_called_once_with([{DEFAULT_PRIMARY_KEY: id_, DEFAULT_TEXT_COLUMN: text, DEFAULT_VECTOR_COLUMN: vector, **metadata} for text, vector, id_, metadata in zip(fake_texts, vectors, added_ids, metadatas)]) assert len(added_ids) == len(fake_texts) assert all([is_valid_uuid(id_) for id_ in added_ids])
@pytest.mark.requires('databricks', 'databricks.vector_search') def test_add_texts_with_metadata() ->None: index = mock_index(DIRECT_ACCESS_INDEX) vectorsearch = default_databricks_vector_search(index) vectors = DEFAULT_EMBEDDING_MODEL.embed_documents(fake_texts) metadatas = [{'feat1': str(i), 'feat2': i + 1000} for i in range(len( fake_texts))] added_ids = vectorsearch.add_texts(fake_texts, metadatas=metadatas) index.upsert.assert_called_once_with([{DEFAULT_PRIMARY_KEY: id_, DEFAULT_TEXT_COLUMN: text, DEFAULT_VECTOR_COLUMN: vector, ** metadata} for text, vector, id_, metadata in zip(fake_texts, vectors, added_ids, metadatas)]) assert len(added_ids) == len(fake_texts) assert all([is_valid_uuid(id_) for id_ in added_ids])
null
test_api_key_masked_when_passed_via_constructor
llm = StochasticAI(stochasticai_api_key='secret-api-key') print(llm.stochasticai_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture ) ->None: llm = StochasticAI(stochasticai_api_key='secret-api-key') print(llm.stochasticai_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
null
dummy_transform
"""Transform a dummy input for tests.""" outputs = inputs outputs['greeting' ] = f"{inputs['first_name']} {inputs['last_name']} says hello" del outputs['first_name'] del outputs['last_name'] return outputs
def dummy_transform(inputs: Dict[str, str]) ->Dict[str, str]: """Transform a dummy input for tests.""" outputs = inputs outputs['greeting' ] = f"{inputs['first_name']} {inputs['last_name']} says hello" del outputs['first_name'] del outputs['last_name'] return outputs
Transform a dummy input for tests.
run
"""Execute a DAX command and return a json representing the results.""" logger.debug('Running command: %s', command) response = requests.post(self.request_url, json=self._create_json_content( command), headers=self.headers, timeout=10) if response.status_code == 403: return ( 'TokenError: Could not login to PowerBI, please check your credentials.' ) return response.json()
def run(self, command: str) ->Any: """Execute a DAX command and return a json representing the results.""" logger.debug('Running command: %s', command) response = requests.post(self.request_url, json=self. _create_json_content(command), headers=self.headers, timeout=10) if response.status_code == 403: return ( 'TokenError: Could not login to PowerBI, please check your credentials.' ) return response.json()
Execute a DAX command and return a json representing the results.
test_each
prompt = SystemMessagePromptTemplate.from_template('You are a nice assistant.' ) + '{question}' first_llm = FakeStreamingListLLM(responses=[ 'first item, second item, third item']) parser = FakeSplitIntoListParser() second_llm = FakeStreamingListLLM(responses=['this', 'is', 'a', 'test']) chain = prompt | first_llm | parser | second_llm.map() assert dumps(chain, pretty=True) == snapshot output = chain.invoke({'question': 'What up'}) assert output == ['this', 'is', 'a'] assert (parser | second_llm.map()).invoke('first item, second item') == ['test' , 'this']
def test_each(snapshot: SnapshotAssertion) ->None: prompt = SystemMessagePromptTemplate.from_template( 'You are a nice assistant.') + '{question}' first_llm = FakeStreamingListLLM(responses=[ 'first item, second item, third item']) parser = FakeSplitIntoListParser() second_llm = FakeStreamingListLLM(responses=['this', 'is', 'a', 'test']) chain = prompt | first_llm | parser | second_llm.map() assert dumps(chain, pretty=True) == snapshot output = chain.invoke({'question': 'What up'}) assert output == ['this', 'is', 'a'] assert (parser | second_llm.map()).invoke('first item, second item') == [ 'test', 'this']
null
run
results = self.results(query, **kwargs) return self._result_as_string(results)
def run(self, query: str, **kwargs: Any) ->str: results = self.results(query, **kwargs) return self._result_as_string(results)
null
test_pickbest_textembedder_w_full_label_w_embed_and_keep
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) str1 = '0' str2 = '1' str3 = '2' encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1)) encoded_str2 = rl_chain.stringify_embedding(list(encoded_keyword + str2)) encoded_str3 = rl_chain.stringify_embedding(list(encoded_keyword + str3)) ctx_str_1 = 'context1' encoded_ctx_str_1 = rl_chain.stringify_embedding(list(encoded_keyword + ctx_str_1)) named_actions = {'action1': rl_chain.EmbedAndKeep([str1, str2, str3])} context = {'context': rl_chain.EmbedAndKeep(ctx_str_1)} expected = f"""shared |context {ctx_str_1 + ' ' + encoded_ctx_str_1} 0:-0.0:1.0 |action1 {str1 + ' ' + encoded_str1} |action1 {str2 + ' ' + encoded_str2} |action1 {str3 + ' ' + encoded_str3} """ selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0 ) event = pick_best_chain.PickBestEvent(inputs={}, to_select_from= named_actions, based_on=context, selected=selected) vw_ex_str = feature_embedder.format(event) assert vw_ex_str == expected
@pytest.mark.requires('vowpal_wabbit_next') def test_pickbest_textembedder_w_full_label_w_embed_and_keep() ->None: feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed= False, model=MockEncoder()) str1 = '0' str2 = '1' str3 = '2' encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1)) encoded_str2 = rl_chain.stringify_embedding(list(encoded_keyword + str2)) encoded_str3 = rl_chain.stringify_embedding(list(encoded_keyword + str3)) ctx_str_1 = 'context1' encoded_ctx_str_1 = rl_chain.stringify_embedding(list(encoded_keyword + ctx_str_1)) named_actions = {'action1': rl_chain.EmbedAndKeep([str1, str2, str3])} context = {'context': rl_chain.EmbedAndKeep(ctx_str_1)} expected = f"""shared |context {ctx_str_1 + ' ' + encoded_ctx_str_1} 0:-0.0:1.0 |action1 {str1 + ' ' + encoded_str1} |action1 {str2 + ' ' + encoded_str2} |action1 {str3 + ' ' + encoded_str3} """ selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0) event = pick_best_chain.PickBestEvent(inputs={}, to_select_from= named_actions, based_on=context, selected=selected) vw_ex_str = feature_embedder.format(event) assert vw_ex_str == expected
null
test_openai_batch
"""Test batch tokens from ChatOpenAI.""" llm = ChatOpenAI(max_tokens=10) result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"]) for token in result: assert isinstance(token.content, str)
@pytest.mark.scheduled def test_openai_batch() ->None: """Test batch tokens from ChatOpenAI.""" llm = ChatOpenAI(max_tokens=10) result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"]) for token in result: assert isinstance(token.content, str)
Test batch tokens from ChatOpenAI.
test_partial_functions_json_output_parser
def input_iter(_: Any) ->Iterator[AIMessageChunk]: for token in STREAMED_TOKENS: yield AIMessageChunk(content='', additional_kwargs={'function_call': {'arguments': token}}) chain = input_iter | JsonOutputFunctionsParser() assert list(chain.stream(None)) == EXPECTED_STREAMED_JSON
def test_partial_functions_json_output_parser() ->None: def input_iter(_: Any) ->Iterator[AIMessageChunk]: for token in STREAMED_TOKENS: yield AIMessageChunk(content='', additional_kwargs={ 'function_call': {'arguments': token}}) chain = input_iter | JsonOutputFunctionsParser() assert list(chain.stream(None)) == EXPECTED_STREAMED_JSON
null
__init__
self.llm = llm self.tools = tools self.chat_planner = load_chat_planner(llm) self.response_generator = load_response_generator(llm) self.task_executor: TaskExecutor
def __init__(self, llm: BaseLanguageModel, tools: List[BaseTool]): self.llm = llm self.tools = tools self.chat_planner = load_chat_planner(llm) self.response_generator = load_response_generator(llm) self.task_executor: TaskExecutor
null
__add__
from langchain_core.prompts.chat import ChatPromptTemplate prompt = ChatPromptTemplate(messages=[self]) return prompt + other
def __add__(self, other: Any) ->ChatPromptTemplate: from langchain_core.prompts.chat import ChatPromptTemplate prompt = ChatPromptTemplate(messages=[self]) return prompt + other
null
return_values
"""Return values of the agent.""" return ['output']
@property def return_values(self) ->List[str]: """Return values of the agent.""" return ['output']
Return values of the agent.
_run
"""Use the tool.""" return self.api_wrapper.run(query)
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Use the tool.""" return self.api_wrapper.run(query)
Use the tool.
__init__
"""Initialize the RedisStore with a Redis connection. Must provide either a Redis client or a redis_url with optional client_kwargs. Args: client: A Redis connection instance redis_url: redis url client_kwargs: Keyword arguments to pass to the Redis client ttl: time to expire keys in seconds if provided, if None keys will never expire namespace: if provided, all keys will be prefixed with this namespace """ try: from redis import Redis except ImportError as e: raise ImportError( 'The RedisStore requires the redis library to be installed. pip install redis' ) from e if client and redis_url or client and client_kwargs: raise ValueError( 'Either a Redis client or a redis_url with optional client_kwargs must be provided, but not both.' ) if client: if not isinstance(client, Redis): raise TypeError( f'Expected Redis client, got {type(client).__name__} instead.') _client = client else: if not redis_url: raise ValueError( 'Either a Redis client or a redis_url must be provided.') _client = get_client(redis_url, **client_kwargs or {}) self.client = _client if not isinstance(ttl, int) and ttl is not None: raise TypeError(f'Expected int or None, got {type(ttl)} instead.') self.ttl = ttl self.namespace = namespace
def __init__(self, *, client: Any=None, redis_url: Optional[str]=None, client_kwargs: Optional[dict]=None, ttl: Optional[int]=None, namespace: Optional[str]=None) ->None: """Initialize the RedisStore with a Redis connection. Must provide either a Redis client or a redis_url with optional client_kwargs. Args: client: A Redis connection instance redis_url: redis url client_kwargs: Keyword arguments to pass to the Redis client ttl: time to expire keys in seconds if provided, if None keys will never expire namespace: if provided, all keys will be prefixed with this namespace """ try: from redis import Redis except ImportError as e: raise ImportError( 'The RedisStore requires the redis library to be installed. pip install redis' ) from e if client and redis_url or client and client_kwargs: raise ValueError( 'Either a Redis client or a redis_url with optional client_kwargs must be provided, but not both.' ) if client: if not isinstance(client, Redis): raise TypeError( f'Expected Redis client, got {type(client).__name__} instead.') _client = client else: if not redis_url: raise ValueError( 'Either a Redis client or a redis_url must be provided.') _client = get_client(redis_url, **client_kwargs or {}) self.client = _client if not isinstance(ttl, int) and ttl is not None: raise TypeError(f'Expected int or None, got {type(ttl)} instead.') self.ttl = ttl self.namespace = namespace
Initialize the RedisStore with a Redis connection. Must provide either a Redis client or a redis_url with optional client_kwargs. Args: client: A Redis connection instance redis_url: redis url client_kwargs: Keyword arguments to pass to the Redis client ttl: time to expire keys in seconds if provided, if None keys will never expire namespace: if provided, all keys will be prefixed with this namespace
__init__
super().__init__(pydantic_object=LineList)
def __init__(self) ->None: super().__init__(pydantic_object=LineList)
null
similarity_search_with_score
"""Return MongoDB documents most similar to the given query and their scores. Uses the knnBeta Operator available in MongoDB Atlas Search. This feature is in early access and available only for evaluation purposes, to validate functionality, and to gather feedback from a small closed group of early access users. It is not recommended for production deployments as we may introduce breaking changes. For more: https://www.mongodb.com/docs/atlas/atlas-search/knn-beta Args: query: Text to look up documents similar to. k: (Optional) number of documents to return. Defaults to 4. pre_filter: (Optional) dictionary of argument(s) to prefilter document fields on. post_filter_pipeline: (Optional) Pipeline of MongoDB aggregation stages following the knnBeta vector search. Returns: List of documents most similar to the query and their scores. """ embedding = self._embedding.embed_query(query) docs = self._similarity_search_with_score(embedding, k=k, pre_filter= pre_filter, post_filter_pipeline=post_filter_pipeline) return docs
def similarity_search_with_score(self, query: str, k: int=4, pre_filter: Optional[Dict]=None, post_filter_pipeline: Optional[List[Dict]]=None ) ->List[Tuple[Document, float]]: """Return MongoDB documents most similar to the given query and their scores. Uses the knnBeta Operator available in MongoDB Atlas Search. This feature is in early access and available only for evaluation purposes, to validate functionality, and to gather feedback from a small closed group of early access users. It is not recommended for production deployments as we may introduce breaking changes. For more: https://www.mongodb.com/docs/atlas/atlas-search/knn-beta Args: query: Text to look up documents similar to. k: (Optional) number of documents to return. Defaults to 4. pre_filter: (Optional) dictionary of argument(s) to prefilter document fields on. post_filter_pipeline: (Optional) Pipeline of MongoDB aggregation stages following the knnBeta vector search. Returns: List of documents most similar to the query and their scores. """ embedding = self._embedding.embed_query(query) docs = self._similarity_search_with_score(embedding, k=k, pre_filter= pre_filter, post_filter_pipeline=post_filter_pipeline) return docs
Return MongoDB documents most similar to the given query and their scores. Uses the knnBeta Operator available in MongoDB Atlas Search. This feature is in early access and available only for evaluation purposes, to validate functionality, and to gather feedback from a small closed group of early access users. It is not recommended for production deployments as we may introduce breaking changes. For more: https://www.mongodb.com/docs/atlas/atlas-search/knn-beta Args: query: Text to look up documents similar to. k: (Optional) number of documents to return. Defaults to 4. pre_filter: (Optional) dictionary of argument(s) to prefilter document fields on. post_filter_pipeline: (Optional) Pipeline of MongoDB aggregation stages following the knnBeta vector search. Returns: List of documents most similar to the query and their scores.
require_inference
return False
def require_inference(self) ->bool: return False
null
__str__
if not self._filter and not self._operator: raise ValueError('Improperly initialized RedisFilterExpression') if self._operator: if not isinstance(self._left, RedisFilterExpression) or not isinstance(self ._right, RedisFilterExpression): raise TypeError( 'Improper combination of filters.Both left and right should be type FilterExpression' ) operator_str = ' | ' if self._operator == RedisFilterOperator.OR else ' ' return self.format_expression(self._left, self._right, operator_str) if not self._filter: raise ValueError('Improperly initialized RedisFilterExpression') return self._filter
def __str__(self) ->str: if not self._filter and not self._operator: raise ValueError('Improperly initialized RedisFilterExpression') if self._operator: if not isinstance(self._left, RedisFilterExpression) or not isinstance( self._right, RedisFilterExpression): raise TypeError( 'Improper combination of filters.Both left and right should be type FilterExpression' ) operator_str = (' | ' if self._operator == RedisFilterOperator.OR else ' ') return self.format_expression(self._left, self._right, operator_str) if not self._filter: raise ValueError('Improperly initialized RedisFilterExpression') return self._filter
null
test_graph_cypher_qa_chain_prompt_selection_1
qa_prompt_template = 'QA Prompt' cypher_prompt_template = 'Cypher Prompt' qa_prompt = PromptTemplate(template=qa_prompt_template, input_variables=[]) cypher_prompt = PromptTemplate(template=cypher_prompt_template, input_variables=[]) chain = GraphCypherQAChain.from_llm(llm=FakeLLM(), graph=FakeGraphStore(), verbose=True, return_intermediate_steps=False, qa_prompt=qa_prompt, cypher_prompt=cypher_prompt) assert chain.qa_chain.prompt == qa_prompt assert chain.cypher_generation_chain.prompt == cypher_prompt
def test_graph_cypher_qa_chain_prompt_selection_1() ->None: qa_prompt_template = 'QA Prompt' cypher_prompt_template = 'Cypher Prompt' qa_prompt = PromptTemplate(template=qa_prompt_template, input_variables=[]) cypher_prompt = PromptTemplate(template=cypher_prompt_template, input_variables=[]) chain = GraphCypherQAChain.from_llm(llm=FakeLLM(), graph=FakeGraphStore (), verbose=True, return_intermediate_steps=False, qa_prompt= qa_prompt, cypher_prompt=cypher_prompt) assert chain.qa_chain.prompt == qa_prompt assert chain.cypher_generation_chain.prompt == cypher_prompt
null
get_knowledge_triplets
chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt) buffer_string = get_buffer_string(self.chat_memory.messages[-self.k * 2:], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix) output = chain.predict(history=buffer_string, input=input_string, verbose=True) knowledge = parse_triples(output) return knowledge
def get_knowledge_triplets(self, input_string: str) ->List[KnowledgeTriple]: chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt) buffer_string = get_buffer_string(self.chat_memory.messages[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix) output = chain.predict(history=buffer_string, input=input_string, verbose=True) knowledge = parse_triples(output) return knowledge
null
flush_tracker
"""Flush the tracker and setup the session. Everything after this will be a new table. Args: name: Name of the performed session so far so it is identifiable langchain_asset: The langchain asset to save. finish: Whether to finish the run. Returns: None """ self._log_session(langchain_asset) if langchain_asset: try: self._log_model(langchain_asset) except Exception: self.comet_ml.LOGGER.error('Failed to export agent or LLM to Comet', exc_info=True, extra={'show_traceback': True}) if finish: self.experiment.end() if reset: self._reset(task_type, workspace, project_name, tags, name, visualizations, complexity_metrics, custom_metrics)
def flush_tracker(self, langchain_asset: Any=None, task_type: Optional[str] ='inference', workspace: Optional[str]=None, project_name: Optional[str ]='comet-langchain-demo', tags: Optional[Sequence]=None, name: Optional [str]=None, visualizations: Optional[List[str]]=None, complexity_metrics: bool=False, custom_metrics: Optional[Callable]=None, finish: bool=False, reset: bool=False) ->None: """Flush the tracker and setup the session. Everything after this will be a new table. Args: name: Name of the performed session so far so it is identifiable langchain_asset: The langchain asset to save. finish: Whether to finish the run. Returns: None """ self._log_session(langchain_asset) if langchain_asset: try: self._log_model(langchain_asset) except Exception: self.comet_ml.LOGGER.error('Failed to export agent or LLM to Comet' , exc_info=True, extra={'show_traceback': True}) if finish: self.experiment.end() if reset: self._reset(task_type, workspace, project_name, tags, name, visualizations, complexity_metrics, custom_metrics)
Flush the tracker and setup the session. Everything after this will be a new table. Args: name: Name of the performed session so far so it is identifiable langchain_asset: The langchain asset to save. finish: Whether to finish the run. Returns: None
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'schema', 'runnable']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'schema', 'runnable']
Get the namespace of the langchain object.
_get_relevant_documents
request = self._prepare_search_request(query, **kwargs) response = self.client.search_documents(request=request) return self._parse_search_response(response=response)
def _get_relevant_documents(self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any) ->List[Document]: request = self._prepare_search_request(query, **kwargs) response = self.client.search_documents(request=request) return self._parse_search_response(response=response)
null
page_create
try: import json except ImportError: raise ImportError( 'json is not installed. Please install it with `pip install json`') params = json.loads(query) return self.confluence.create_page(**dict(params))
def page_create(self, query: str) ->str: try: import json except ImportError: raise ImportError( 'json is not installed. Please install it with `pip install json`') params = json.loads(query) return self.confluence.create_page(**dict(params))
null
input_keys
"""Get input keys. Input refers to user input here.""" return ['input']
@property def input_keys(self) ->List[str]: """Get input keys. Input refers to user input here.""" return ['input']
Get input keys. Input refers to user input here.
_construct_json_body
"""Constructs the request body as a dictionary (JSON).""" raise NotImplementedError
def _construct_json_body(self, prompt: str, params: dict) ->dict: """Constructs the request body as a dictionary (JSON).""" raise NotImplementedError
Constructs the request body as a dictionary (JSON).
_on_chain_error
crumbs = self.get_breadcrumbs(run) run_type = run.run_type.capitalize() self.function_callback(f"{get_colored_text('[chain/error]', color='red')} " + get_bolded_text( f"""[{crumbs}] [{elapsed(run)}] {run_type} run errored with error: """) + f"{try_json_stringify(run.error, '[error]')}")
def _on_chain_error(self, run: Run) ->None: crumbs = self.get_breadcrumbs(run) run_type = run.run_type.capitalize() self.function_callback( f"{get_colored_text('[chain/error]', color='red')} " + get_bolded_text( f"""[{crumbs}] [{elapsed(run)}] {run_type} run errored with error: """ ) + f"{try_json_stringify(run.error, '[error]')}")
null
format
...
@abstractmethod def format(self, event: TEvent) ->str: ...
null
test_exception_handling_callable
expected = 'foo bar' handling = lambda _: expected _tool = _FakeExceptionTool(handle_tool_error=handling) actual = _tool.run({}) assert expected == actual
def test_exception_handling_callable() ->None: expected = 'foo bar' handling = lambda _: expected _tool = _FakeExceptionTool(handle_tool_error=handling) actual = _tool.run({}) assert expected == actual
null
prepare_output
if provider == 'anthropic': response_body = json.loads(response.get('body').read().decode()) return response_body.get('completion') else: response_body = json.loads(response.get('body').read()) if provider == 'ai21': return response_body.get('completions')[0].get('data').get('text') elif provider == 'cohere': return response_body.get('generations')[0].get('text') elif provider == 'meta': return response_body.get('generation') else: return response_body.get('results')[0].get('outputText')
@classmethod def prepare_output(cls, provider: str, response: Any) ->str: if provider == 'anthropic': response_body = json.loads(response.get('body').read().decode()) return response_body.get('completion') else: response_body = json.loads(response.get('body').read()) if provider == 'ai21': return response_body.get('completions')[0].get('data').get('text') elif provider == 'cohere': return response_body.get('generations')[0].get('text') elif provider == 'meta': return response_body.get('generation') else: return response_body.get('results')[0].get('outputText')
null
predict
return None
def predict(self, event: TEvent) ->Any: return None
null
from_llm
""" Create a QAGenerationChain from a language model. Args: llm: a language model prompt: a prompt template **kwargs: additional arguments Returns: a QAGenerationChain class """ _prompt = prompt or PROMPT_SELECTOR.get_prompt(llm) chain = LLMChain(llm=llm, prompt=_prompt) return cls(llm_chain=chain, **kwargs)
@classmethod def from_llm(cls, llm: BaseLanguageModel, prompt: Optional[ BasePromptTemplate]=None, **kwargs: Any) ->QAGenerationChain: """ Create a QAGenerationChain from a language model. Args: llm: a language model prompt: a prompt template **kwargs: additional arguments Returns: a QAGenerationChain class """ _prompt = prompt or PROMPT_SELECTOR.get_prompt(llm) chain = LLMChain(llm=llm, prompt=_prompt) return cls(llm_chain=chain, **kwargs)
Create a QAGenerationChain from a language model. Args: llm: a language model prompt: a prompt template **kwargs: additional arguments Returns: a QAGenerationChain class
_load_prompt
"""Load the prompt template from config.""" config = _load_template('template', config) config = _load_output_parser(config) template_format = config.get('template_format', 'f-string') if template_format == 'jinja2': raise ValueError( f"Loading templates with '{template_format}' format is no longer supported since it can lead to arbitrary code execution. Please migrate to using the 'f-string' template format, which does not suffer from this issue." ) return PromptTemplate(**config)
def _load_prompt(config: dict) ->PromptTemplate: """Load the prompt template from config.""" config = _load_template('template', config) config = _load_output_parser(config) template_format = config.get('template_format', 'f-string') if template_format == 'jinja2': raise ValueError( f"Loading templates with '{template_format}' format is no longer supported since it can lead to arbitrary code execution. Please migrate to using the 'f-string' template format, which does not suffer from this issue." ) return PromptTemplate(**config)
Load the prompt template from config.
get_args
parser = argparse.ArgumentParser() parser.add_argument('--docs_dir', type=str, default=_DOCS_DIR, help= 'Directory where generated markdown files are stored') return parser.parse_args()
def get_args(): parser = argparse.ArgumentParser() parser.add_argument('--docs_dir', type=str, default=_DOCS_DIR, help= 'Directory where generated markdown files are stored') return parser.parse_args()
null
request
headers = {'Authorization': f'Bearer {self.api_token}'} response = requests.request(method=method, url=url, headers=headers, json= request) if not response.ok: raise ValueError(f'HTTP {response.status_code} error: {response.text}') return response.json()
def request(self, method: str, url: str, request: Any) ->Any: headers = {'Authorization': f'Bearer {self.api_token}'} response = requests.request(method=method, url=url, headers=headers, json=request) if not response.ok: raise ValueError(f'HTTP {response.status_code} error: {response.text}') return response.json()
null
_paths_strict
if not self.paths: raise ValueError('No paths found in spec') return self.paths
@property def _paths_strict(self) ->Paths: if not self.paths: raise ValueError('No paths found in spec') return self.paths
null
add_texts
"""Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of unique IDs. Returns: List of ids from adding the texts into the vectorstore. """ if not isinstance(self.docstore, AddableMixin): raise ValueError( f'If trying to add texts, the underlying docstore should support adding items, which {self.docstore} does not' ) embeddings = self.embedding.embed_documents(list(texts)) documents = [] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} documents.append(Document(page_content=text, metadata=metadata)) last_id = int(self.ids[-1]) + 1 if ids is None: ids = np.array([str(last_id + id) for id, _ in enumerate(texts)]) self.index.add(np.array(ids), np.array(embeddings)) self.docstore.add(dict(zip(ids, documents))) self.ids.extend(ids) return ids.tolist()
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[Dict]]= None, ids: Optional[np.ndarray]=None, **kwargs: Any) ->List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of unique IDs. Returns: List of ids from adding the texts into the vectorstore. """ if not isinstance(self.docstore, AddableMixin): raise ValueError( f'If trying to add texts, the underlying docstore should support adding items, which {self.docstore} does not' ) embeddings = self.embedding.embed_documents(list(texts)) documents = [] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} documents.append(Document(page_content=text, metadata=metadata)) last_id = int(self.ids[-1]) + 1 if ids is None: ids = np.array([str(last_id + id) for id, _ in enumerate(texts)]) self.index.add(np.array(ids), np.array(embeddings)) self.docstore.add(dict(zip(ids, documents))) self.ids.extend(ids) return ids.tolist()
Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of unique IDs. Returns: List of ids from adding the texts into the vectorstore.
on_chain_end
"""Do nothing when chain ends.""" pass
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) ->None: """Do nothing when chain ends.""" pass
Do nothing when chain ends.
version_callback
if show_version: typer.echo(f'langchain-cli {__version__}') raise typer.Exit()
def version_callback(show_version: bool) ->None: if show_version: typer.echo(f'langchain-cli {__version__}') raise typer.Exit()
null
_import_json_tool_JsonGetValueTool
from langchain_community.tools.json.tool import JsonGetValueTool return JsonGetValueTool
def _import_json_tool_JsonGetValueTool() ->Any: from langchain_community.tools.json.tool import JsonGetValueTool return JsonGetValueTool
null
output_keys
"""Expect output key. :meta private: """ return [self.output_key]
@property def output_keys(self) ->List[str]: """Expect output key. :meta private: """ return [self.output_key]
Expect output key. :meta private:
_import_office365_events_search
from langchain_community.tools.office365.events_search import O365SearchEvents return O365SearchEvents
def _import_office365_events_search() ->Any: from langchain_community.tools.office365.events_search import O365SearchEvents return O365SearchEvents
null
to_sql_model
"""Convert a BaseMessage instance to a SQLAlchemy model.""" raise NotImplementedError
@abstractmethod def to_sql_model(self, message: BaseMessage, session_id: str) ->Any: """Convert a BaseMessage instance to a SQLAlchemy model.""" raise NotImplementedError
Convert a BaseMessage instance to a SQLAlchemy model.
_get_paths
"""Fetch all relative paths in the navbar.""" return [urlparse(loc.text).path for loc in soup.find_all('loc')]
def _get_paths(self, soup: Any) ->List[str]: """Fetch all relative paths in the navbar.""" return [urlparse(loc.text).path for loc in soup.find_all('loc')]
Fetch all relative paths in the navbar.
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'llms', 'vertexai']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'llms', 'vertexai']
Get the namespace of the langchain object.
_index_name
hashed_index = _hash(llm_string) return f'cache:{hashed_index}'
def _index_name(self, llm_string: str) ->str: hashed_index = _hash(llm_string) return f'cache:{hashed_index}'
null
_parse_chat_history_gemini
from vertexai.preview.generative_models import Content, Image, Part def _convert_to_prompt(part: Union[str, Dict]) ->Part: if isinstance(part, str): return Part.from_text(part) if not isinstance(part, Dict): raise ValueError( f"Message's content is expected to be a dict, got {type(part)}!") if part['type'] == 'text': return Part.from_text(part['text']) elif part['type'] == 'image_url': path = part['image_url']['url'] if path.startswith('gs://'): image = load_image_from_gcs(path=path, project=project) elif path.startswith('data:image/'): try: encoded = re.search('data:image/\\w{2,4};base64,(.*)', path ).group(1) except AttributeError: raise ValueError( 'Invalid image uri. It should be in the format data:image/<image_type>;base64,<base64_encoded_image>.' ) image = Image.from_bytes(base64.b64decode(encoded)) elif _is_url(path): response = requests.get(path) response.raise_for_status() image = Image.from_bytes(response.content) else: image = Image.load_from_file(path) else: raise ValueError('Only text and image_url types are supported!') return Part.from_image(image) vertex_messages = [] for i, message in enumerate(history): if i == 0 and isinstance(message, SystemMessage): raise ValueError('SystemMessages are not yet supported!') elif isinstance(message, AIMessage): role = 'model' elif isinstance(message, HumanMessage): role = 'user' else: raise ValueError( f'Unexpected message with type {type(message)} at the position {i}.' ) raw_content = message.content if isinstance(raw_content, str): raw_content = [raw_content] parts = [_convert_to_prompt(part) for part in raw_content] vertex_message = Content(role=role, parts=parts) vertex_messages.append(vertex_message) return vertex_messages
def _parse_chat_history_gemini(history: List[BaseMessage], project: Optional[str]) ->List['Content']: from vertexai.preview.generative_models import Content, Image, Part def _convert_to_prompt(part: Union[str, Dict]) ->Part: if isinstance(part, str): return Part.from_text(part) if not isinstance(part, Dict): raise ValueError( f"Message's content is expected to be a dict, got {type(part)}!" ) if part['type'] == 'text': return Part.from_text(part['text']) elif part['type'] == 'image_url': path = part['image_url']['url'] if path.startswith('gs://'): image = load_image_from_gcs(path=path, project=project) elif path.startswith('data:image/'): try: encoded = re.search('data:image/\\w{2,4};base64,(.*)', path ).group(1) except AttributeError: raise ValueError( 'Invalid image uri. It should be in the format data:image/<image_type>;base64,<base64_encoded_image>.' ) image = Image.from_bytes(base64.b64decode(encoded)) elif _is_url(path): response = requests.get(path) response.raise_for_status() image = Image.from_bytes(response.content) else: image = Image.load_from_file(path) else: raise ValueError('Only text and image_url types are supported!') return Part.from_image(image) vertex_messages = [] for i, message in enumerate(history): if i == 0 and isinstance(message, SystemMessage): raise ValueError('SystemMessages are not yet supported!') elif isinstance(message, AIMessage): role = 'model' elif isinstance(message, HumanMessage): role = 'user' else: raise ValueError( f'Unexpected message with type {type(message)} at the position {i}.' ) raw_content = message.content if isinstance(raw_content, str): raw_content = [raw_content] parts = [_convert_to_prompt(part) for part in raw_content] vertex_message = Content(role=role, parts=parts) vertex_messages.append(vertex_message) return vertex_messages
null
format_messages
"""Format kwargs into a list of messages. Args: **kwargs: keyword arguments to use for filling in templates in messages. Returns: A list of formatted messages with all template variables filled in. """ examples = self._get_examples(**kwargs) examples = [{k: e[k] for k in self.example_prompt.input_variables} for e in examples] messages = [message for example in examples for message in self. example_prompt.format_messages(**example)] return messages
def format_messages(self, **kwargs: Any) ->List[BaseMessage]: """Format kwargs into a list of messages. Args: **kwargs: keyword arguments to use for filling in templates in messages. Returns: A list of formatted messages with all template variables filled in. """ examples = self._get_examples(**kwargs) examples = [{k: e[k] for k in self.example_prompt.input_variables} for e in examples] messages = [message for example in examples for message in self. example_prompt.format_messages(**example)] return messages
Format kwargs into a list of messages. Args: **kwargs: keyword arguments to use for filling in templates in messages. Returns: A list of formatted messages with all template variables filled in.
similarity_search_with_relevance_scores
"""Perform a similarity search with Rockset Args: query (str): Text to look up documents similar to. distance_func (DistanceFunction): how to compute distance between two vectors in Rockset. k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): Metadata filters supplied as a SQL `where` condition string. Defaults to None. eg. "price<=70.0 AND brand='Nintendo'" NOTE: Please do not let end-user to fill this and always be aware of SQL injection. Returns: List[Tuple[Document, float]]: List of documents with their relevance score """ return self.similarity_search_by_vector_with_relevance_scores(self. _embeddings.embed_query(query), k, distance_func, where_str, **kwargs)
def similarity_search_with_relevance_scores(self, query: str, k: int=4, distance_func: DistanceFunction=DistanceFunction.COSINE_SIM, where_str: Optional[str]=None, **kwargs: Any) ->List[Tuple[Document, float]]: """Perform a similarity search with Rockset Args: query (str): Text to look up documents similar to. distance_func (DistanceFunction): how to compute distance between two vectors in Rockset. k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): Metadata filters supplied as a SQL `where` condition string. Defaults to None. eg. "price<=70.0 AND brand='Nintendo'" NOTE: Please do not let end-user to fill this and always be aware of SQL injection. Returns: List[Tuple[Document, float]]: List of documents with their relevance score """ return self.similarity_search_by_vector_with_relevance_scores(self. _embeddings.embed_query(query), k, distance_func, where_str, **kwargs)
Perform a similarity search with Rockset Args: query (str): Text to look up documents similar to. distance_func (DistanceFunction): how to compute distance between two vectors in Rockset. k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): Metadata filters supplied as a SQL `where` condition string. Defaults to None. eg. "price<=70.0 AND brand='Nintendo'" NOTE: Please do not let end-user to fill this and always be aware of SQL injection. Returns: List[Tuple[Document, float]]: List of documents with their relevance score
assert_query
expected_query = {'query': {'script_score': {'query': {'bool': {'filter': [ {'term': {'metadata.page': 0}}]}}, 'script': {'source': "cosineSimilarity(params.query_vector, 'vector') + 1.0", 'params': { 'query_vector': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]}}}}} assert query_body == expected_query return query_body
def assert_query(query_body: dict, query: str) ->dict: expected_query = {'query': {'script_score': {'query': {'bool': { 'filter': [{'term': {'metadata.page': 0}}]}}, 'script': {'source': "cosineSimilarity(params.query_vector, 'vector') + 1.0", 'params': {'query_vector': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]}}}} } assert query_body == expected_query return query_body
null
load_evaluator
"""Load the requested evaluation chain specified by a string. Parameters ---------- evaluator : EvaluatorType The type of evaluator to load. llm : BaseLanguageModel, optional The language model to use for evaluation, by default None **kwargs : Any Additional keyword arguments to pass to the evaluator. Returns ------- Chain The loaded evaluation chain. Examples -------- >>> from langchain.evaluation import load_evaluator, EvaluatorType >>> evaluator = load_evaluator(EvaluatorType.QA) """ if evaluator not in _EVALUATOR_MAP: raise ValueError( f"""Unknown evaluator type: {evaluator} Valid types are: {list(_EVALUATOR_MAP.keys())}""" ) evaluator_cls = _EVALUATOR_MAP[evaluator] if issubclass(evaluator_cls, LLMEvalChain): try: llm = llm or ChatOpenAI(model='gpt-4', model_kwargs={'seed': 42}, temperature=0) except Exception as e: raise ValueError( f"Evaluation with the {evaluator_cls} requires a language model to function. Failed to create the default 'gpt-4' model. Please manually provide an evaluation LLM or check your openai credentials." ) from e return evaluator_cls.from_llm(llm=llm, **kwargs) else: return evaluator_cls(**kwargs)
def load_evaluator(evaluator: EvaluatorType, *, llm: Optional[ BaseLanguageModel]=None, **kwargs: Any) ->Union[Chain, StringEvaluator]: """Load the requested evaluation chain specified by a string. Parameters ---------- evaluator : EvaluatorType The type of evaluator to load. llm : BaseLanguageModel, optional The language model to use for evaluation, by default None **kwargs : Any Additional keyword arguments to pass to the evaluator. Returns ------- Chain The loaded evaluation chain. Examples -------- >>> from langchain.evaluation import load_evaluator, EvaluatorType >>> evaluator = load_evaluator(EvaluatorType.QA) """ if evaluator not in _EVALUATOR_MAP: raise ValueError( f"""Unknown evaluator type: {evaluator} Valid types are: {list(_EVALUATOR_MAP.keys())}""" ) evaluator_cls = _EVALUATOR_MAP[evaluator] if issubclass(evaluator_cls, LLMEvalChain): try: llm = llm or ChatOpenAI(model='gpt-4', model_kwargs={'seed': 42 }, temperature=0) except Exception as e: raise ValueError( f"Evaluation with the {evaluator_cls} requires a language model to function. Failed to create the default 'gpt-4' model. Please manually provide an evaluation LLM or check your openai credentials." ) from e return evaluator_cls.from_llm(llm=llm, **kwargs) else: return evaluator_cls(**kwargs)
Load the requested evaluation chain specified by a string. Parameters ---------- evaluator : EvaluatorType The type of evaluator to load. llm : BaseLanguageModel, optional The language model to use for evaluation, by default None **kwargs : Any Additional keyword arguments to pass to the evaluator. Returns ------- Chain The loaded evaluation chain. Examples -------- >>> from langchain.evaluation import load_evaluator, EvaluatorType >>> evaluator = load_evaluator(EvaluatorType.QA)
strip_python_markdown_tags
pat = re.compile('```python\\n(.*)```', re.DOTALL) code = pat.match(text) if code: return code.group(1) else: return text
def strip_python_markdown_tags(text: str) ->str: pat = re.compile('```python\\n(.*)```', re.DOTALL) code = pat.match(text) if code: return code.group(1) else: return text
null
test_llm_rails_add_documents
"""Test end to end construction and search.""" docsearch: LLMRails = LLMRails() texts1 = ['large language model', 'information retrieval', 'question answering' ] docsearch.add_texts(texts1) output1 = docsearch.similarity_search('large language model', k=1) print(output1) assert len(output1) == 1 assert output1[0].page_content == 'large language model' output2 = docsearch.similarity_search_with_score('large language model', k=1) assert len(output2) == 1 assert output2[0][0].page_content == 'large language model' assert output2[0][1] > 0
def test_llm_rails_add_documents() ->None: """Test end to end construction and search.""" docsearch: LLMRails = LLMRails() texts1 = ['large language model', 'information retrieval', 'question answering'] docsearch.add_texts(texts1) output1 = docsearch.similarity_search('large language model', k=1) print(output1) assert len(output1) == 1 assert output1[0].page_content == 'large language model' output2 = docsearch.similarity_search_with_score('large language model', k=1) assert len(output2) == 1 assert output2[0][0].page_content == 'large language model' assert output2[0][1] > 0
Test end to end construction and search.
test_mdelete
"""Test that deletion works as expected.""" store = RedisStore(client=redis_client, ttl=None) keys = ['key1', 'key2'] redis_client.mset({'key1': b'value1', 'key2': b'value2'}) store.mdelete(keys) result = redis_client.mget(keys) assert result == [None, None]
def test_mdelete(redis_client: Redis) ->None: """Test that deletion works as expected.""" store = RedisStore(client=redis_client, ttl=None) keys = ['key1', 'key2'] redis_client.mset({'key1': b'value1', 'key2': b'value2'}) store.mdelete(keys) result = redis_client.mget(keys) assert result == [None, None]
Test that deletion works as expected.
set_interface
if not values.get('interface'): values['interface'] = authenticate(network=values.get('network', 'testnet') ) return values
@root_validator(pre=True) def set_interface(cls, values: dict) ->dict: if not values.get('interface'): values['interface'] = authenticate(network=values.get('network', 'testnet')) return values
null
get_operations
"""Return a list of operations.""" return self.operations
def get_operations(self) ->List[dict]: """Return a list of operations.""" return self.operations
Return a list of operations.
_parse_messages
results = [] for message in messages: message_id = message['id'] message_data = self.api_resource.users().messages().get(userId='me', format='raw', id=message_id).execute() raw_message = base64.urlsafe_b64decode(message_data['raw']) email_msg = email.message_from_bytes(raw_message) subject = email_msg['Subject'] sender = email_msg['From'] message_body = '' if email_msg.is_multipart(): for part in email_msg.walk(): ctype = part.get_content_type() cdispo = str(part.get('Content-Disposition')) if ctype == 'text/plain' and 'attachment' not in cdispo: message_body = part.get_payload(decode=True).decode('utf-8') break else: message_body = email_msg.get_payload(decode=True).decode('utf-8') body = clean_email_body(message_body) results.append({'id': message['id'], 'threadId': message_data[ 'threadId'], 'snippet': message_data['snippet'], 'body': body, 'subject': subject, 'sender': sender}) return results
def _parse_messages(self, messages: List[Dict[str, Any]]) ->List[Dict[str, Any] ]: results = [] for message in messages: message_id = message['id'] message_data = self.api_resource.users().messages().get(userId='me', format='raw', id=message_id).execute() raw_message = base64.urlsafe_b64decode(message_data['raw']) email_msg = email.message_from_bytes(raw_message) subject = email_msg['Subject'] sender = email_msg['From'] message_body = '' if email_msg.is_multipart(): for part in email_msg.walk(): ctype = part.get_content_type() cdispo = str(part.get('Content-Disposition')) if ctype == 'text/plain' and 'attachment' not in cdispo: message_body = part.get_payload(decode=True).decode('utf-8' ) break else: message_body = email_msg.get_payload(decode=True).decode('utf-8') body = clean_email_body(message_body) results.append({'id': message['id'], 'threadId': message_data[ 'threadId'], 'snippet': message_data['snippet'], 'body': body, 'subject': subject, 'sender': sender}) return results
null
_Raise
self.fill('raise') if not t.exc: assert not t.cause return self.write(' ') self.dispatch(t.exc) if t.cause: self.write(' from ') self.dispatch(t.cause)
def _Raise(self, t): self.fill('raise') if not t.exc: assert not t.cause return self.write(' ') self.dispatch(t.exc) if t.cause: self.write(' from ') self.dispatch(t.cause)
null
test_appx_search_with_boolean_and_lucene_filter_throws_error
"""Test Approximate Search with Boolean and Lucene Filter throws Error.""" boolean_filter_val = {'bool': {'must': [{'term': {'text': 'baz'}}]}} lucene_filter_val = {'bool': {'must': [{'term': {'text': 'bar'}}]}} docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL, engine='lucene') with pytest.raises(ValueError): docsearch.similarity_search('foo', k=3, boolean_filter= boolean_filter_val, lucene_filter=lucene_filter_val)
def test_appx_search_with_boolean_and_lucene_filter_throws_error() ->None: """Test Approximate Search with Boolean and Lucene Filter throws Error.""" boolean_filter_val = {'bool': {'must': [{'term': {'text': 'baz'}}]}} lucene_filter_val = {'bool': {'must': [{'term': {'text': 'bar'}}]}} docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL, engine='lucene') with pytest.raises(ValueError): docsearch.similarity_search('foo', k=3, boolean_filter= boolean_filter_val, lucene_filter=lucene_filter_val)
Test Approximate Search with Boolean and Lucene Filter throws Error.
_type
return 'output_fixing'
@property def _type(self) ->str: return 'output_fixing'
null
get_name
name = (name or self.name or f"RunnableAssign<{','.join(self.mapper.steps.keys())}>") return super().get_name(suffix, name=name)
def get_name(self, suffix: Optional[str]=None, *, name: Optional[str]=None ) ->str: name = (name or self.name or f"RunnableAssign<{','.join(self.mapper.steps.keys())}>") return super().get_name(suffix, name=name)
null
_extract_scheme_and_domain
"""Extract the scheme + domain from a given URL. Args: url (str): The input URL. Returns: return a 2-tuple of scheme and domain """ parsed_uri = urlparse(url) return parsed_uri.scheme, parsed_uri.netloc
def _extract_scheme_and_domain(url: str) ->Tuple[str, str]: """Extract the scheme + domain from a given URL. Args: url (str): The input URL. Returns: return a 2-tuple of scheme and domain """ parsed_uri = urlparse(url) return parsed_uri.scheme, parsed_uri.netloc
Extract the scheme + domain from a given URL. Args: url (str): The input URL. Returns: return a 2-tuple of scheme and domain
setup_class
if not os.getenv('YDC_API_KEY'): raise ValueError('YDC_API_KEY environment variable is not set')
@classmethod def setup_class(cls) ->None: if not os.getenv('YDC_API_KEY'): raise ValueError('YDC_API_KEY environment variable is not set')
null
_on_llm_error
"""Process the LLM Run upon error.""" self._process_end_trace(run)
def _on_llm_error(self, run: 'Run') ->None: """Process the LLM Run upon error.""" self._process_end_trace(run)
Process the LLM Run upon error.
load_comments
"""Load comments from a HN post.""" comments = soup_info.select("tr[class='athing comtr']") title = soup_info.select_one("tr[id='pagespace']").get('title') return [Document(page_content=comment.text.strip(), metadata={'source': self.web_path, 'title': title}) for comment in comments]
def load_comments(self, soup_info: Any) ->List[Document]: """Load comments from a HN post.""" comments = soup_info.select("tr[class='athing comtr']") title = soup_info.select_one("tr[id='pagespace']").get('title') return [Document(page_content=comment.text.strip(), metadata={'source': self.web_path, 'title': title}) for comment in comments]
Load comments from a HN post.
test_integration_initialization
"""Test chat model initialization.""" ChatGoogleGenerativeAI(model='gemini-nano', google_api_key='...', top_k=2, top_p=1, temperature=0.7, n=2) ChatGoogleGenerativeAI(model='gemini-nano', google_api_key='...', top_k=2, top_p=1, temperature=0.7, candidate_count=2)
def test_integration_initialization() ->None: """Test chat model initialization.""" ChatGoogleGenerativeAI(model='gemini-nano', google_api_key='...', top_k =2, top_p=1, temperature=0.7, n=2) ChatGoogleGenerativeAI(model='gemini-nano', google_api_key='...', top_k =2, top_p=1, temperature=0.7, candidate_count=2)
Test chat model initialization.