method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
evaluate
"""Evaluate question answering examples and predictions.""" inputs = [{'query': example[question_key], 'answer': example[answer_key], 'result': predictions[i][prediction_key]} for i, example in enumerate( examples)] return self.apply(inputs, callbacks=callbacks)
def evaluate(self, examples: Sequence[dict], predictions: Sequence[dict], question_key: str='query', answer_key: str='answer', prediction_key: str='result', *, callbacks: Callbacks=None) ->List[dict]: """Evaluate question answering examples and predictions.""" inputs = [{'query': example[question_key], 'answer': example[answer_key ], 'result': predictions[i][prediction_key]} for i, example in enumerate(examples)] return self.apply(inputs, callbacks=callbacks)
Evaluate question answering examples and predictions.
test_streamlit_callback_agent
import streamlit as st from langchain.agents import AgentType, initialize_agent, load_tools streamlit_callback = StreamlitCallbackHandler(st.container()) llm = OpenAI(temperature=0) tools = load_tools(['serpapi', 'llm-math'], llm=llm) agent = initialize_agent(tools, llm, agent=AgentType. ZERO_SHOT_REACT_DESCRIPTION, verbose=True) agent.run( "Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?" , callbacks=[streamlit_callback])
@pytest.mark.requires('streamlit') def test_streamlit_callback_agent() ->None: import streamlit as st from langchain.agents import AgentType, initialize_agent, load_tools streamlit_callback = StreamlitCallbackHandler(st.container()) llm = OpenAI(temperature=0) tools = load_tools(['serpapi', 'llm-math'], llm=llm) agent = initialize_agent(tools, llm, agent=AgentType. ZERO_SHOT_REACT_DESCRIPTION, verbose=True) agent.run( "Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?" , callbacks=[streamlit_callback])
null
yield_keys
"""Get an iterator over keys that match the given prefix. Args: prefix (str, optional): The prefix to match. Defaults to None. Returns: Iterator[str]: An iterator over keys that match the given prefix. """ if prefix is None: yield from self.store.keys() else: for key in self.store.keys(): if key.startswith(prefix): yield key
def yield_keys(self, prefix: Optional[str]=None) ->Iterator[str]: """Get an iterator over keys that match the given prefix. Args: prefix (str, optional): The prefix to match. Defaults to None. Returns: Iterator[str]: An iterator over keys that match the given prefix. """ if prefix is None: yield from self.store.keys() else: for key in self.store.keys(): if key.startswith(prefix): yield key
Get an iterator over keys that match the given prefix. Args: prefix (str, optional): The prefix to match. Defaults to None. Returns: Iterator[str]: An iterator over keys that match the given prefix.
test_stream
"""Test that stream works.""" chat = ChatZhipuAI(streaming=True) callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) response = chat(messages=[HumanMessage(content='Hello.'), AIMessage(content ='Hello!'), HumanMessage(content='Who are you?')], stream=True, callbacks=callback_manager) assert callback_handler.llm_streams > 0 assert isinstance(response.content, str)
def test_stream() ->None: """Test that stream works.""" chat = ChatZhipuAI(streaming=True) callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) response = chat(messages=[HumanMessage(content='Hello.'), AIMessage( content='Hello!'), HumanMessage(content='Who are you?')], stream= True, callbacks=callback_manager) assert callback_handler.llm_streams > 0 assert isinstance(response.content, str)
Test that stream works.
sitemap_metadata_one
return {**meta, 'mykey': 'Super Important Metadata'}
def sitemap_metadata_one(meta: dict, _content: None) ->dict: return {**meta, 'mykey': 'Super Important Metadata'}
null
validate_environment
google_api_key = get_from_dict_or_env(values, 'google_api_key', 'GOOGLE_API_KEY') if isinstance(google_api_key, SecretStr): google_api_key = google_api_key.get_secret_value() genai.configure(api_key=google_api_key) if values.get('temperature') is not None and not 0 <= values['temperature' ] <= 1: raise ValueError('temperature must be in the range [0.0, 1.0]') if values.get('top_p') is not None and not 0 <= values['top_p'] <= 1: raise ValueError('top_p must be in the range [0.0, 1.0]') if values.get('top_k') is not None and values['top_k'] <= 0: raise ValueError('top_k must be positive') model = values['model'] values['client'] = genai.GenerativeModel(model_name=model) return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: google_api_key = get_from_dict_or_env(values, 'google_api_key', 'GOOGLE_API_KEY') if isinstance(google_api_key, SecretStr): google_api_key = google_api_key.get_secret_value() genai.configure(api_key=google_api_key) if values.get('temperature') is not None and not 0 <= values['temperature' ] <= 1: raise ValueError('temperature must be in the range [0.0, 1.0]') if values.get('top_p') is not None and not 0 <= values['top_p'] <= 1: raise ValueError('top_p must be in the range [0.0, 1.0]') if values.get('top_k') is not None and values['top_k'] <= 0: raise ValueError('top_k must be positive') model = values['model'] values['client'] = genai.GenerativeModel(model_name=model) return values
null
test_run_error
responses.add(responses.POST, api_client.outline_instance_url + api_client. outline_search_endpoint, json=OUTLINE_ERROR_RESPONSE, status=401) try: api_client.run('Testing') except Exception as e: assert 'Outline API returned an error:' in str(e)
@responses.activate def test_run_error(api_client: OutlineAPIWrapper) ->None: responses.add(responses.POST, api_client.outline_instance_url + api_client.outline_search_endpoint, json=OUTLINE_ERROR_RESPONSE, status=401) try: api_client.run('Testing') except Exception as e: assert 'Outline API returned an error:' in str(e)
null
validate_environment
"""Validates the environment.""" try: from google.cloud import discoveryengine_v1beta except ImportError as exc: raise ImportError( 'google.cloud.discoveryengine is not installed.Please install it with pip install google-cloud-discoveryengine>=0.11.0' ) from exc try: from google.api_core.exceptions import InvalidArgument except ImportError as exc: raise ImportError( 'google.api_core.exceptions is not installed. Please install it with pip install google-api-core' ) from exc values['project_id'] = get_from_dict_or_env(values, 'project_id', 'PROJECT_ID') try: search_engine_id = get_from_dict_or_env(values, 'search_engine_id', 'SEARCH_ENGINE_ID') if search_engine_id: import warnings warnings.warn( 'The `search_engine_id` parameter is deprecated. Use `data_store_id` instead.' , DeprecationWarning) values['data_store_id'] = search_engine_id except: pass values['data_store_id'] = get_from_dict_or_env(values, 'data_store_id', 'DATA_STORE_ID') return values
@root_validator(pre=True) def validate_environment(cls, values: Dict) ->Dict: """Validates the environment.""" try: from google.cloud import discoveryengine_v1beta except ImportError as exc: raise ImportError( 'google.cloud.discoveryengine is not installed.Please install it with pip install google-cloud-discoveryengine>=0.11.0' ) from exc try: from google.api_core.exceptions import InvalidArgument except ImportError as exc: raise ImportError( 'google.api_core.exceptions is not installed. Please install it with pip install google-api-core' ) from exc values['project_id'] = get_from_dict_or_env(values, 'project_id', 'PROJECT_ID') try: search_engine_id = get_from_dict_or_env(values, 'search_engine_id', 'SEARCH_ENGINE_ID') if search_engine_id: import warnings warnings.warn( 'The `search_engine_id` parameter is deprecated. Use `data_store_id` instead.' , DeprecationWarning) values['data_store_id'] = search_engine_id except: pass values['data_store_id'] = get_from_dict_or_env(values, 'data_store_id', 'DATA_STORE_ID') return values
Validates the environment.
test_invoke_stream_passthrough_assign_trace
def idchain_sync(__input: dict) ->bool: return False chain = RunnablePassthrough.assign(urls=idchain_sync) tracer = FakeTracer() chain.invoke({'example': [1, 2, 3]}, dict(callbacks=[tracer])) assert tracer.runs[0].name == 'RunnableAssign<urls>' assert tracer.runs[0].child_runs[0].name == 'RunnableParallel<urls>' tracer = FakeTracer() for item in chain.stream({'example': [1, 2, 3]}, dict(callbacks=[tracer])): pass assert tracer.runs[0].name == 'RunnableAssign<urls>' assert tracer.runs[0].child_runs[0].name == 'RunnableParallel<urls>'
def test_invoke_stream_passthrough_assign_trace() ->None: def idchain_sync(__input: dict) ->bool: return False chain = RunnablePassthrough.assign(urls=idchain_sync) tracer = FakeTracer() chain.invoke({'example': [1, 2, 3]}, dict(callbacks=[tracer])) assert tracer.runs[0].name == 'RunnableAssign<urls>' assert tracer.runs[0].child_runs[0].name == 'RunnableParallel<urls>' tracer = FakeTracer() for item in chain.stream({'example': [1, 2, 3]}, dict(callbacks=[tracer])): pass assert tracer.runs[0].name == 'RunnableAssign<urls>' assert tracer.runs[0].child_runs[0].name == 'RunnableParallel<urls>'
null
raise_value_error
"""Raise a value error.""" raise ValueError(f'x is {x}')
def raise_value_error(x: str) ->Any: """Raise a value error.""" raise ValueError(f'x is {x}')
Raise a value error.
__init__
if inspect.iscoroutinefunction(func): afunc = func func = None super().__init__(func=func, afunc=afunc, input_type=input_type, **kwargs)
def __init__(self, func: Optional[Union[Union[Callable[[Other], None], Callable[[Other, RunnableConfig], None]], Union[Callable[[Other], Awaitable[None]], Callable[[Other, RunnableConfig], Awaitable[None]]]]] =None, afunc: Optional[Union[Callable[[Other], Awaitable[None]], Callable[[Other, RunnableConfig], Awaitable[None]]]]=None, *, input_type: Optional[Type[Other]]=None, **kwargs: Any) ->None: if inspect.iscoroutinefunction(func): afunc = func func = None super().__init__(func=func, afunc=afunc, input_type=input_type, **kwargs)
null
get_resized_images
""" Resize images from base64-encoded strings. :param docs: A list of base64-encoded image to be resized. :return: Dict containing a list of resized base64-encoded strings. """ b64_images = [] for doc in docs: if isinstance(doc, Document): doc = doc.page_content b64_images.append(doc) return {'images': b64_images}
def get_resized_images(docs): """ Resize images from base64-encoded strings. :param docs: A list of base64-encoded image to be resized. :return: Dict containing a list of resized base64-encoded strings. """ b64_images = [] for doc in docs: if isinstance(doc, Document): doc = doc.page_content b64_images.append(doc) return {'images': b64_images}
Resize images from base64-encoded strings. :param docs: A list of base64-encoded image to be resized. :return: Dict containing a list of resized base64-encoded strings.
load
"""Load documents.""" try: import pandas as pd except ImportError: raise ImportError( 'pandas is needed for Notebook Loader, please install with `pip install pandas`' ) p = Path(self.file_path) with open(p, encoding='utf8') as f: d = json.load(f) data = pd.json_normalize(d['cells']) filtered_data = data[['cell_type', 'source', 'outputs']] if self.remove_newline: filtered_data = filtered_data.applymap(remove_newlines) text = filtered_data.apply(lambda x: concatenate_cells(x, self. include_outputs, self.max_output_length, self.traceback), axis=1).str.cat( sep=' ') metadata = {'source': str(p)} return [Document(page_content=text, metadata=metadata)]
def load(self) ->List[Document]: """Load documents.""" try: import pandas as pd except ImportError: raise ImportError( 'pandas is needed for Notebook Loader, please install with `pip install pandas`' ) p = Path(self.file_path) with open(p, encoding='utf8') as f: d = json.load(f) data = pd.json_normalize(d['cells']) filtered_data = data[['cell_type', 'source', 'outputs']] if self.remove_newline: filtered_data = filtered_data.applymap(remove_newlines) text = filtered_data.apply(lambda x: concatenate_cells(x, self. include_outputs, self.max_output_length, self.traceback), axis=1 ).str.cat(sep=' ') metadata = {'source': str(p)} return [Document(page_content=text, metadata=metadata)]
Load documents.
_add_child_run
"""Add child run to a chain run or tool run.""" parent_run.child_runs.append(child_run)
@staticmethod def _add_child_run(parent_run: Run, child_run: Run) ->None: """Add child run to a chain run or tool run.""" parent_run.child_runs.append(child_run)
Add child run to a chain run or tool run.
_run
"""Use the tool.""" return str(self.api_wrapper.run(query))
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Use the tool.""" return str(self.api_wrapper.run(query))
Use the tool.
load
"""Load file""" data = self._get_figma_file() text = stringify_dict(data) metadata = {'source': self._construct_figma_api_url()} return [Document(page_content=text, metadata=metadata)]
def load(self) ->List[Document]: """Load file""" data = self._get_figma_file() text = stringify_dict(data) metadata = {'source': self._construct_figma_api_url()} return [Document(page_content=text, metadata=metadata)]
Load file
_llm_type
"""Return type of chat model.""" return 'everlyai-chat'
@property def _llm_type(self) ->str: """Return type of chat model.""" return 'everlyai-chat'
Return type of chat model.
_transform_chat
return response['choices'][0]['message']['content']
def _transform_chat(response: Dict[str, Any]) ->str: return response['choices'][0]['message']['content']
null
embed_documents
embeddings: List[List[float]] = [] for text in texts: embeddings.append([len(text), len(text) + 1]) return embeddings
def embed_documents(self, texts: List[str]) ->List[List[float]]: embeddings: List[List[float]] = [] for text in texts: embeddings.append([len(text), len(text) + 1]) return embeddings
null
test_anthropic_model_param
llm = Anthropic(model='foo') assert llm.model == 'foo'
@pytest.mark.requires('anthropic') def test_anthropic_model_param() ->None: llm = Anthropic(model='foo') assert llm.model == 'foo'
null
get_context_and_action_embeddings
context_emb = base.embed(event.based_on, self.model ) if event.based_on else None to_select_from_var_name, to_select_from = next(iter(event.to_select_from. items()), (None, None)) action_embs = (base.embed(to_select_from, self.model, to_select_from_var_name) if event.to_select_from else None ) if to_select_from else None if not context_emb or not action_embs: raise ValueError( 'Context and to_select_from must be provided in the inputs dictionary') return context_emb, action_embs
def get_context_and_action_embeddings(self, event: PickBestEvent) ->tuple: context_emb = base.embed(event.based_on, self.model ) if event.based_on else None to_select_from_var_name, to_select_from = next(iter(event. to_select_from.items()), (None, None)) action_embs = (base.embed(to_select_from, self.model, to_select_from_var_name) if event.to_select_from else None ) if to_select_from else None if not context_emb or not action_embs: raise ValueError( 'Context and to_select_from must be provided in the inputs dictionary' ) return context_emb, action_embs
null
_import_elevenlabs
try: import elevenlabs except ImportError as e: raise ImportError( 'Cannot import elevenlabs, please install `pip install elevenlabs`.' ) from e return elevenlabs
def _import_elevenlabs() ->Any: try: import elevenlabs except ImportError as e: raise ImportError( 'Cannot import elevenlabs, please install `pip install elevenlabs`.' ) from e return elevenlabs
null
_invocation_params
params = self._default_params if self.stop is not None and stop is not None: raise ValueError('`stop` found in both the input and default params.') elif self.stop is not None: params['stop_sequences'] = self.stop else: params['stop_sequences'] = stop return {**params, **kwargs}
def _invocation_params(self, stop: Optional[List[str]], **kwargs: Any) ->dict: params = self._default_params if self.stop is not None and stop is not None: raise ValueError('`stop` found in both the input and default params.') elif self.stop is not None: params['stop_sequences'] = self.stop else: params['stop_sequences'] = stop return {**params, **kwargs}
null
get_runtime_environment
"""Get information about the LangChain runtime environment.""" from langchain_core import __version__ return {'library_version': __version__, 'library': 'langchain-core', 'platform': platform.platform(), 'runtime': 'python', 'runtime_version': platform.python_version()}
@lru_cache(maxsize=1) def get_runtime_environment() ->dict: """Get information about the LangChain runtime environment.""" from langchain_core import __version__ return {'library_version': __version__, 'library': 'langchain-core', 'platform': platform.platform(), 'runtime': 'python', 'runtime_version': platform.python_version()}
Get information about the LangChain runtime environment.
test_sklearn
"""Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] docsearch = SKLearnVectorStore.from_texts(texts, FakeEmbeddings()) output = docsearch.similarity_search('foo', k=1) assert len(output) == 1 assert output[0].page_content == 'foo'
@pytest.mark.requires('numpy', 'sklearn') def test_sklearn() ->None: """Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] docsearch = SKLearnVectorStore.from_texts(texts, FakeEmbeddings()) output = docsearch.similarity_search('foo', k=1) assert len(output) == 1 assert output[0].page_content == 'foo'
Test end to end construction and search.
test_faiss_similarity_search_with_relevance_scores
"""Test the similarity search with normalized similarities.""" texts = ['foo', 'bar', 'baz'] docsearch = FAISS.from_texts(texts, FakeEmbeddings(), relevance_score_fn=lambda score: 1.0 - score / math.sqrt(2)) outputs = docsearch.similarity_search_with_relevance_scores('foo', k=1) output, score = outputs[0] assert output == Document(page_content='foo') assert score == 1.0
@pytest.mark.requires('faiss') def test_faiss_similarity_search_with_relevance_scores() ->None: """Test the similarity search with normalized similarities.""" texts = ['foo', 'bar', 'baz'] docsearch = FAISS.from_texts(texts, FakeEmbeddings(), relevance_score_fn=lambda score: 1.0 - score / math.sqrt(2)) outputs = docsearch.similarity_search_with_relevance_scores('foo', k=1) output, score = outputs[0] assert output == Document(page_content='foo') assert score == 1.0
Test the similarity search with normalized similarities.
_load_sheet_from_id
"""Load a sheet and all tabs from an ID.""" from googleapiclient.discovery import build creds = self._load_credentials() sheets_service = build('sheets', 'v4', credentials=creds) spreadsheet = sheets_service.spreadsheets().get(spreadsheetId=id).execute() sheets = spreadsheet.get('sheets', []) documents = [] for sheet in sheets: sheet_name = sheet['properties']['title'] result = sheets_service.spreadsheets().values().get(spreadsheetId=id, range=sheet_name).execute() values = result.get('values', []) if not values: continue header = values[0] for i, row in enumerate(values[1:], start=1): metadata = {'source': f"https://docs.google.com/spreadsheets/d/{id}/edit?gid={sheet['properties']['sheetId']}" , 'title': f"{spreadsheet['properties']['title']} - {sheet_name}", 'row': i} content = [] for j, v in enumerate(row): title = header[j].strip() if len(header) > j else '' content.append(f'{title}: {v.strip()}') page_content = '\n'.join(content) documents.append(Document(page_content=page_content, metadata=metadata) ) return documents
def _load_sheet_from_id(self, id: str) ->List[Document]: """Load a sheet and all tabs from an ID.""" from googleapiclient.discovery import build creds = self._load_credentials() sheets_service = build('sheets', 'v4', credentials=creds) spreadsheet = sheets_service.spreadsheets().get(spreadsheetId=id).execute() sheets = spreadsheet.get('sheets', []) documents = [] for sheet in sheets: sheet_name = sheet['properties']['title'] result = sheets_service.spreadsheets().values().get(spreadsheetId= id, range=sheet_name).execute() values = result.get('values', []) if not values: continue header = values[0] for i, row in enumerate(values[1:], start=1): metadata = {'source': f"https://docs.google.com/spreadsheets/d/{id}/edit?gid={sheet['properties']['sheetId']}" , 'title': f"{spreadsheet['properties']['title']} - {sheet_name}", 'row': i} content = [] for j, v in enumerate(row): title = header[j].strip() if len(header) > j else '' content.append(f'{title}: {v.strip()}') page_content = '\n'.join(content) documents.append(Document(page_content=page_content, metadata= metadata)) return documents
Load a sheet and all tabs from an ID.
_call
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() input_text = f"""{inputs[self.input_key]} SQLQuery:""" _run_manager.on_text(input_text, verbose=self.verbose) table_names_to_use = inputs.get('table_names_to_use') table_info = self.database.get_table_info(table_names=table_names_to_use) llm_inputs = {'input': input_text, 'top_k': str(self.top_k), 'dialect': self.database.dialect, 'table_info': table_info, 'stop': ['\nSQLResult:']} intermediate_steps: List = [] try: intermediate_steps.append(llm_inputs) llm_out = self.llm_chain.predict(callbacks=_run_manager.get_child(), ** llm_inputs) sql_cmd = self.sql_cmd_parser.parse(llm_out) if self.return_sql: return {self.output_key: sql_cmd} if not self.use_query_checker: _run_manager.on_text(llm_out, color='green', verbose=self.verbose) intermediate_steps.append(llm_out) intermediate_steps.append({'sql_cmd': llm_out}) result = get_result_from_sqldb(self.database, sql_cmd) intermediate_steps.append(str(result)) else: query_checker_prompt = self.query_checker_prompt or PromptTemplate( template=QUERY_CHECKER, input_variables=['query', 'dialect']) query_checker_chain = LLMChain(llm=self.llm_chain.llm, prompt= query_checker_prompt, output_parser=self.llm_chain.output_parser) query_checker_inputs = {'query': llm_out, 'dialect': self.database. dialect} checked_llm_out = query_checker_chain.predict(callbacks= _run_manager.get_child(), **query_checker_inputs) checked_sql_command = self.sql_cmd_parser.parse(checked_llm_out) intermediate_steps.append(checked_llm_out) _run_manager.on_text(checked_llm_out, color='green', verbose=self. verbose) intermediate_steps.append({'sql_cmd': checked_llm_out}) result = get_result_from_sqldb(self.database, checked_sql_command) intermediate_steps.append(str(result)) llm_out = checked_llm_out sql_cmd = checked_sql_command _run_manager.on_text('\nSQLResult: ', verbose=self.verbose) _run_manager.on_text(str(result), color='yellow', verbose=self.verbose) final_result: Union[str, Sequence[Dict[str, Any]]] if self.return_direct: final_result = result else: _run_manager.on_text('\nAnswer:', verbose=self.verbose) input_text += f'{llm_out}\nSQLResult: {result}\nAnswer:' llm_inputs['input'] = input_text intermediate_steps.append(llm_inputs) final_result = self.llm_chain.predict(callbacks=_run_manager. get_child(), **llm_inputs).strip() intermediate_steps.append(final_result) _run_manager.on_text(final_result, color='green', verbose=self.verbose) chain_result: Dict[str, Any] = {self.output_key: final_result} if self.return_intermediate_steps: chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps return chain_result except Exception as exc: exc.intermediate_steps = intermediate_steps raise exc
def _call(self, inputs: Dict[str, Any], run_manager: Optional[ CallbackManagerForChainRun]=None) ->Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() input_text = f'{inputs[self.input_key]}\nSQLQuery:' _run_manager.on_text(input_text, verbose=self.verbose) table_names_to_use = inputs.get('table_names_to_use') table_info = self.database.get_table_info(table_names=table_names_to_use) llm_inputs = {'input': input_text, 'top_k': str(self.top_k), 'dialect': self.database.dialect, 'table_info': table_info, 'stop': [ '\nSQLResult:']} intermediate_steps: List = [] try: intermediate_steps.append(llm_inputs) llm_out = self.llm_chain.predict(callbacks=_run_manager.get_child(), **llm_inputs) sql_cmd = self.sql_cmd_parser.parse(llm_out) if self.return_sql: return {self.output_key: sql_cmd} if not self.use_query_checker: _run_manager.on_text(llm_out, color='green', verbose=self.verbose) intermediate_steps.append(llm_out) intermediate_steps.append({'sql_cmd': llm_out}) result = get_result_from_sqldb(self.database, sql_cmd) intermediate_steps.append(str(result)) else: query_checker_prompt = self.query_checker_prompt or PromptTemplate( template=QUERY_CHECKER, input_variables=['query', 'dialect']) query_checker_chain = LLMChain(llm=self.llm_chain.llm, prompt= query_checker_prompt, output_parser=self.llm_chain. output_parser) query_checker_inputs = {'query': llm_out, 'dialect': self. database.dialect} checked_llm_out = query_checker_chain.predict(callbacks= _run_manager.get_child(), **query_checker_inputs) checked_sql_command = self.sql_cmd_parser.parse(checked_llm_out) intermediate_steps.append(checked_llm_out) _run_manager.on_text(checked_llm_out, color='green', verbose= self.verbose) intermediate_steps.append({'sql_cmd': checked_llm_out}) result = get_result_from_sqldb(self.database, checked_sql_command) intermediate_steps.append(str(result)) llm_out = checked_llm_out sql_cmd = checked_sql_command _run_manager.on_text('\nSQLResult: ', verbose=self.verbose) _run_manager.on_text(str(result), color='yellow', verbose=self.verbose) final_result: Union[str, Sequence[Dict[str, Any]]] if self.return_direct: final_result = result else: _run_manager.on_text('\nAnswer:', verbose=self.verbose) input_text += f'{llm_out}\nSQLResult: {result}\nAnswer:' llm_inputs['input'] = input_text intermediate_steps.append(llm_inputs) final_result = self.llm_chain.predict(callbacks=_run_manager. get_child(), **llm_inputs).strip() intermediate_steps.append(final_result) _run_manager.on_text(final_result, color='green', verbose=self. verbose) chain_result: Dict[str, Any] = {self.output_key: final_result} if self.return_intermediate_steps: chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps return chain_result except Exception as exc: exc.intermediate_steps = intermediate_steps raise exc
null
_import_dashvector
from langchain_community.vectorstores.dashvector import DashVector return DashVector
def _import_dashvector() ->Any: from langchain_community.vectorstores.dashvector import DashVector return DashVector
null
_document_details_for_docset_id
"""Gets all document details for the given docset ID""" url = f'{self.api}/docsets/{docset_id}/documents' all_documents = [] while url: response = requests.get(url, headers={'Authorization': f'Bearer {self.access_token}'}) if response.ok: data = response.json() all_documents.extend(data['documents']) url = data.get('next', None) else: raise Exception( f'Failed to download {url} (status: {response.status_code})') return all_documents
def _document_details_for_docset_id(self, docset_id: str) ->List[Dict]: """Gets all document details for the given docset ID""" url = f'{self.api}/docsets/{docset_id}/documents' all_documents = [] while url: response = requests.get(url, headers={'Authorization': f'Bearer {self.access_token}'}) if response.ok: data = response.json() all_documents.extend(data['documents']) url = data.get('next', None) else: raise Exception( f'Failed to download {url} (status: {response.status_code})') return all_documents
Gets all document details for the given docset ID
test_with_metadatas_with_scores
"""Test end to end construction and scored search.""" texts = ['hello bagel', 'hello langchain'] metadatas = [{'page': str(i)} for i in range(len(texts))] txt_search = Bagel.from_texts(cluster_name='testing', texts=texts, metadatas=metadatas) output = txt_search.similarity_search_with_score('hello bagel', k=1) assert output == [(Document(page_content='hello bagel', metadata={'page': '0'}), 0.0)] txt_search.delete_cluster()
def test_with_metadatas_with_scores() ->None: """Test end to end construction and scored search.""" texts = ['hello bagel', 'hello langchain'] metadatas = [{'page': str(i)} for i in range(len(texts))] txt_search = Bagel.from_texts(cluster_name='testing', texts=texts, metadatas=metadatas) output = txt_search.similarity_search_with_score('hello bagel', k=1) assert output == [(Document(page_content='hello bagel', metadata={ 'page': '0'}), 0.0)] txt_search.delete_cluster()
Test end to end construction and scored search.
validate_environment
"""Validate that python package exists in environment.""" try: from vllm import LLM as VLLModel except ImportError: raise ImportError( 'Could not import vllm python package. Please install it with `pip install vllm`.' ) values['client'] = VLLModel(model=values['model'], tensor_parallel_size= values['tensor_parallel_size'], trust_remote_code=values[ 'trust_remote_code'], dtype=values['dtype'], download_dir=values[ 'download_dir'], **values['vllm_kwargs']) return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that python package exists in environment.""" try: from vllm import LLM as VLLModel except ImportError: raise ImportError( 'Could not import vllm python package. Please install it with `pip install vllm`.' ) values['client'] = VLLModel(model=values['model'], tensor_parallel_size =values['tensor_parallel_size'], trust_remote_code=values[ 'trust_remote_code'], dtype=values['dtype'], download_dir=values[ 'download_dir'], **values['vllm_kwargs']) return values
Validate that python package exists in environment.
max_marginal_relevance_search
embeddings = self._embedding.embed_query(query) docs = self.max_marginal_relevance_search_by_vector(embeddings, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult) return docs
def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int= 20, lambda_mult: float=0.5, **kwargs: Any) ->List[Document]: embeddings = self._embedding.embed_query(query) docs = self.max_marginal_relevance_search_by_vector(embeddings, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult) return docs
null
test_loadnotebookwithimage_notehasplaintextonlywithresourcesremoved
documents = EverNoteLoader(self.example_notebook_path( 'sample_notebook_with_media.enex'), False).load() note = documents[0] assert note.page_content == """When you pick this mug up with your thumb on top and middle finger through the loop, your ring finger slides into the mug under the loop where it is too hot to touch and burns you. If you try and pick it up with your thumb and index finger you can’t hold the mug."""
def test_loadnotebookwithimage_notehasplaintextonlywithresourcesremoved(self ) ->None: documents = EverNoteLoader(self.example_notebook_path( 'sample_notebook_with_media.enex'), False).load() note = documents[0] assert note.page_content == """When you pick this mug up with your thumb on top and middle finger through the loop, your ring finger slides into the mug under the loop where it is too hot to touch and burns you. If you try and pick it up with your thumb and index finger you can’t hold the mug."""
null
test_redis_from_documents
"""Test from_documents constructor.""" docs = [Document(page_content=t, metadata={'a': 'b'}) for t in texts] docsearch = Redis.from_documents(docs, FakeEmbeddings(), redis_url= TEST_REDIS_URL) output = docsearch.similarity_search('foo', k=1, return_metadata=True) assert 'a' in output[0].metadata.keys() assert 'b' in output[0].metadata.values() assert drop(docsearch.index_name)
def test_redis_from_documents(texts: List[str]) ->None: """Test from_documents constructor.""" docs = [Document(page_content=t, metadata={'a': 'b'}) for t in texts] docsearch = Redis.from_documents(docs, FakeEmbeddings(), redis_url= TEST_REDIS_URL) output = docsearch.similarity_search('foo', k=1, return_metadata=True) assert 'a' in output[0].metadata.keys() assert 'b' in output[0].metadata.values() assert drop(docsearch.index_name)
Test from_documents constructor.
test_delete_fail_no_ids
index = mock_index(DIRECT_ACCESS_INDEX) vectorsearch = default_databricks_vector_search(index) with pytest.raises(ValueError) as ex: vectorsearch.delete() assert 'ids must be provided.' in str(ex.value)
@pytest.mark.requires('databricks', 'databricks.vector_search') def test_delete_fail_no_ids() ->None: index = mock_index(DIRECT_ACCESS_INDEX) vectorsearch = default_databricks_vector_search(index) with pytest.raises(ValueError) as ex: vectorsearch.delete() assert 'ids must be provided.' in str(ex.value)
null
_call
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() response = self.chain.run(**inputs, callbacks=_run_manager.get_child( 'original')) initial_response = response input_prompt = self.chain.prompt.format(**inputs) _run_manager.on_text(text='Initial response: ' + response + '\n\n', verbose =self.verbose, color='yellow') fallacy_critiques_and_revisions = [] for logical_fallacy in self.logical_fallacies: fallacy_raw_critique = self.fallacy_critique_chain.run(input_prompt= input_prompt, output_from_model=response, fallacy_critique_request= logical_fallacy.fallacy_critique_request, callbacks=_run_manager. get_child('fallacy_critique')) fallacy_critique = self._parse_critique(output_string=fallacy_raw_critique ).strip() if 'no fallacy critique needed' in fallacy_critique.lower(): fallacy_critiques_and_revisions.append((fallacy_critique, '')) continue fallacy_revision = self.fallacy_revision_chain.run(input_prompt= input_prompt, output_from_model=response, fallacy_critique_request= logical_fallacy.fallacy_critique_request, fallacy_critique= fallacy_critique, revision_request=logical_fallacy. fallacy_revision_request, callbacks=_run_manager.get_child( 'fallacy_revision')).strip() response = fallacy_revision fallacy_critiques_and_revisions.append((fallacy_critique, fallacy_revision) ) _run_manager.on_text(text=f'Applying {logical_fallacy.name}...' + '\n\n', verbose=self.verbose, color='green') _run_manager.on_text(text='Logical Fallacy: ' + fallacy_critique + '\n\n', verbose=self.verbose, color='blue') _run_manager.on_text(text='Updated response: ' + fallacy_revision + '\n\n', verbose=self.verbose, color='yellow') final_output: Dict[str, Any] = {'output': response} if self.return_intermediate_steps: final_output['initial_output'] = initial_response final_output['fallacy_critiques_and_revisions' ] = fallacy_critiques_and_revisions return final_output
def _call(self, inputs: Dict[str, Any], run_manager: Optional[ CallbackManagerForChainRun]=None) ->Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() response = self.chain.run(**inputs, callbacks=_run_manager.get_child( 'original')) initial_response = response input_prompt = self.chain.prompt.format(**inputs) _run_manager.on_text(text='Initial response: ' + response + '\n\n', verbose=self.verbose, color='yellow') fallacy_critiques_and_revisions = [] for logical_fallacy in self.logical_fallacies: fallacy_raw_critique = self.fallacy_critique_chain.run(input_prompt =input_prompt, output_from_model=response, fallacy_critique_request=logical_fallacy. fallacy_critique_request, callbacks=_run_manager.get_child( 'fallacy_critique')) fallacy_critique = self._parse_critique(output_string= fallacy_raw_critique).strip() if 'no fallacy critique needed' in fallacy_critique.lower(): fallacy_critiques_and_revisions.append((fallacy_critique, '')) continue fallacy_revision = self.fallacy_revision_chain.run(input_prompt= input_prompt, output_from_model=response, fallacy_critique_request=logical_fallacy. fallacy_critique_request, fallacy_critique=fallacy_critique, revision_request=logical_fallacy.fallacy_revision_request, callbacks=_run_manager.get_child('fallacy_revision')).strip() response = fallacy_revision fallacy_critiques_and_revisions.append((fallacy_critique, fallacy_revision)) _run_manager.on_text(text=f'Applying {logical_fallacy.name}...' + '\n\n', verbose=self.verbose, color='green') _run_manager.on_text(text='Logical Fallacy: ' + fallacy_critique + '\n\n', verbose=self.verbose, color='blue') _run_manager.on_text(text='Updated response: ' + fallacy_revision + '\n\n', verbose=self.verbose, color='yellow') final_output: Dict[str, Any] = {'output': response} if self.return_intermediate_steps: final_output['initial_output'] = initial_response final_output['fallacy_critiques_and_revisions' ] = fallacy_critiques_and_revisions return final_output
null
_fetch_valid_connection_docs
if self.ignore_load_errors: try: return self.session.get(url, **self.requests_kwargs) except Exception as e: warnings.warn(str(e)) return None return self.session.get(url, **self.requests_kwargs)
def _fetch_valid_connection_docs(self, url: str) ->Any: if self.ignore_load_errors: try: return self.session.get(url, **self.requests_kwargs) except Exception as e: warnings.warn(str(e)) return None return self.session.get(url, **self.requests_kwargs)
null
test_timescalevector_with_filter_no_match
"""Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = TimescaleVector.from_texts(texts=texts, collection_name= 'test_collection_filter', embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection=True) output = docsearch.similarity_search_with_score('foo', k=1, filter={'page': '5'}) assert output == []
def test_timescalevector_with_filter_no_match() ->None: """Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = TimescaleVector.from_texts(texts=texts, collection_name= 'test_collection_filter', embedding=FakeEmbeddingsWithAdaDimension( ), metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection=True) output = docsearch.similarity_search_with_score('foo', k=1, filter={ 'page': '5'}) assert output == []
Test end to end construction and search.
_run
"""Use the tool. Add run_manager: Optional[CallbackManagerForToolRun] = None to child implementations to enable tracing, """
@abstractmethod def _run(self, *args: Any, **kwargs: Any) ->Any: """Use the tool. Add run_manager: Optional[CallbackManagerForToolRun] = None to child implementations to enable tracing, """
Use the tool. Add run_manager: Optional[CallbackManagerForToolRun] = None to child implementations to enable tracing,
_run
"""Get the schema for a specific table.""" return ', '.join(self.db.get_usable_table_names())
def _run(self, tool_input: str='', run_manager: Optional[ CallbackManagerForToolRun]=None) ->str: """Get the schema for a specific table.""" return ', '.join(self.db.get_usable_table_names())
Get the schema for a specific table.
_model_is_anthropic
return self._get_provider() == 'anthropic'
@property def _model_is_anthropic(self) ->bool: return self._get_provider() == 'anthropic'
null
load_prompt
"""Unified method for loading a prompt from LangChainHub or local fs.""" if (hub_result := try_load_from_hub(path, _load_prompt_from_file, 'prompts', {'py', 'json', 'yaml'})): return hub_result else: return _load_prompt_from_file(path)
def load_prompt(path: Union[str, Path]) ->BasePromptTemplate: """Unified method for loading a prompt from LangChainHub or local fs.""" if (hub_result := try_load_from_hub(path, _load_prompt_from_file, 'prompts', {'py', 'json', 'yaml'})): return hub_result else: return _load_prompt_from_file(path)
Unified method for loading a prompt from LangChainHub or local fs.
check_dependency
for dep_id in task.dep: if dep_id == -1: continue dep_task = self.id_task_map[dep_id] if dep_task.failed() or dep_task.pending(): return False return True
def check_dependency(self, task: Task) ->bool: for dep_id in task.dep: if dep_id == -1: continue dep_task = self.id_task_map[dep_id] if dep_task.failed() or dep_task.pending(): return False return True
null
new
""" Create a new LangServe application. """ has_packages = package is not None and len(package) > 0 if noninteractive: if name is None: raise typer.BadParameter( 'name is required when --non-interactive is set') name_str = name pip_bool = bool(pip) else: name_str = name if name else typer.prompt( 'What folder would you like to create?') if not has_packages: package = [] package_prompt = ( 'What package would you like to add? (leave blank to skip)') while True: package_str = typer.prompt(package_prompt, default='', show_default=False) if not package_str: break package.append(package_str) package_prompt = ( f'{len(package)} added. Any more packages (leave blank to end)?' ) has_packages = len(package) > 0 pip_bool = False if pip is None and has_packages: pip_bool = typer.confirm( 'Would you like to install these templates into your environment with pip?' , default=False) project_template_dir = Path(__file__).parents[1] / 'project_template' destination_dir = Path.cwd() / name_str if name_str != '.' else Path.cwd() app_name = name_str if name_str != '.' else Path.cwd().name shutil.copytree(project_template_dir, destination_dir, dirs_exist_ok=name == '.') readme = destination_dir / 'README.md' readme_contents = readme.read_text() readme.write_text(readme_contents.replace('__app_name__', app_name)) pyproject = destination_dir / 'pyproject.toml' pyproject_contents = pyproject.read_text() pyproject.write_text(pyproject_contents.replace('__app_name__', app_name)) if has_packages: add(package, project_dir=destination_dir, pip=pip_bool)
@app_cli.command() def new(name: Annotated[Optional[str], typer.Argument(help= 'The name of the folder to create')]=None, *, package: Annotated[ Optional[List[str]], typer.Option(help= 'Packages to seed the project with')]=None, pip: Annotated[Optional[ bool], typer.Option('--pip/--no-pip', help= 'Pip install the template(s) as editable dependencies', is_flag=True)]= None, noninteractive: Annotated[bool, typer.Option( '--non-interactive/--interactive', help="Don't prompt for any input", is_flag=True)]=False): """ Create a new LangServe application. """ has_packages = package is not None and len(package) > 0 if noninteractive: if name is None: raise typer.BadParameter( 'name is required when --non-interactive is set') name_str = name pip_bool = bool(pip) else: name_str = name if name else typer.prompt( 'What folder would you like to create?') if not has_packages: package = [] package_prompt = ( 'What package would you like to add? (leave blank to skip)') while True: package_str = typer.prompt(package_prompt, default='', show_default=False) if not package_str: break package.append(package_str) package_prompt = ( f'{len(package)} added. Any more packages (leave blank to end)?' ) has_packages = len(package) > 0 pip_bool = False if pip is None and has_packages: pip_bool = typer.confirm( 'Would you like to install these templates into your environment with pip?' , default=False) project_template_dir = Path(__file__).parents[1] / 'project_template' destination_dir = Path.cwd() / name_str if name_str != '.' else Path.cwd() app_name = name_str if name_str != '.' else Path.cwd().name shutil.copytree(project_template_dir, destination_dir, dirs_exist_ok= name == '.') readme = destination_dir / 'README.md' readme_contents = readme.read_text() readme.write_text(readme_contents.replace('__app_name__', app_name)) pyproject = destination_dir / 'pyproject.toml' pyproject_contents = pyproject.read_text() pyproject.write_text(pyproject_contents.replace('__app_name__', app_name)) if has_packages: add(package, project_dir=destination_dir, pip=pip_bool)
Create a new LangServe application.
_default_params
"""Default parameters for the model.""" raise NotImplementedError
@property def _default_params(self) ->Dict[str, Any]: """Default parameters for the model.""" raise NotImplementedError
Default parameters for the model.
_generate
completion = '' if self.streaming: for chunk in self._stream(messages, stop, run_manager, **kwargs): completion += chunk.text else: provider = self._get_provider() prompt = ChatPromptAdapter.convert_messages_to_prompt(provider=provider, messages=messages) params: Dict[str, Any] = {**kwargs} if stop: params['stop_sequences'] = stop completion = self._prepare_input_and_invoke(prompt=prompt, stop=stop, run_manager=run_manager, **params) message = AIMessage(content=completion) return ChatResult(generations=[ChatGeneration(message=message)])
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]= None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->ChatResult: completion = '' if self.streaming: for chunk in self._stream(messages, stop, run_manager, **kwargs): completion += chunk.text else: provider = self._get_provider() prompt = ChatPromptAdapter.convert_messages_to_prompt(provider= provider, messages=messages) params: Dict[str, Any] = {**kwargs} if stop: params['stop_sequences'] = stop completion = self._prepare_input_and_invoke(prompt=prompt, stop= stop, run_manager=run_manager, **params) message = AIMessage(content=completion) return ChatResult(generations=[ChatGeneration(message=message)])
null
lc_serializable
return True
@property def lc_serializable(self) ->bool: return True
null
__init__
"""Create a new MutableExpander. Parameters ---------- parent_container The `st.container` that the expander will be created inside. The expander transparently deletes and recreates its underlying `st.expander` instance when its label changes, and it uses `parent_container` to ensure it recreates this underlying expander in the same location onscreen. label The expander's initial label. expanded The expander's initial `expanded` value. """ self._label = label self._expanded = expanded self._parent_cursor = parent_container.empty() self._container = self._parent_cursor.expander(label, expanded) self._child_records: List[ChildRecord] = []
def __init__(self, parent_container: DeltaGenerator, label: str, expanded: bool ): """Create a new MutableExpander. Parameters ---------- parent_container The `st.container` that the expander will be created inside. The expander transparently deletes and recreates its underlying `st.expander` instance when its label changes, and it uses `parent_container` to ensure it recreates this underlying expander in the same location onscreen. label The expander's initial label. expanded The expander's initial `expanded` value. """ self._label = label self._expanded = expanded self._parent_cursor = parent_container.empty() self._container = self._parent_cursor.expander(label, expanded) self._child_records: List[ChildRecord] = []
Create a new MutableExpander. Parameters ---------- parent_container The `st.container` that the expander will be created inside. The expander transparently deletes and recreates its underlying `st.expander` instance when its label changes, and it uses `parent_container` to ensure it recreates this underlying expander in the same location onscreen. label The expander's initial label. expanded The expander's initial `expanded` value.