id
stringlengths
14
16
text
stringlengths
31
2.41k
source
stringlengths
53
121
a6076a070fdd-0
Source code for langchain.callbacks.arize_callback from datetime import datetime from typing import Any, Dict, List, Optional, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.callbacks.utils import import_pandas from langchain.schema import AgentAction, AgentFinish, LLMResult [docs]class ArizeCallbackHandler(BaseCallbackHandler): """Callback Handler that logs to Arize.""" def __init__( self, model_id: Optional[str] = None, model_version: Optional[str] = None, SPACE_KEY: Optional[str] = None, API_KEY: Optional[str] = None, ) -> None: """Initialize callback handler.""" super().__init__() self.model_id = model_id self.model_version = model_version self.space_key = SPACE_KEY self.api_key = API_KEY self.prompt_records: List[str] = [] self.response_records: List[str] = [] self.prediction_ids: List[str] = [] self.pred_timestamps: List[int] = [] self.response_embeddings: List[float] = [] self.prompt_embeddings: List[float] = [] self.prompt_tokens = 0 self.completion_tokens = 0 self.total_tokens = 0 self.step = 0 from arize.pandas.embeddings import EmbeddingGenerator, UseCases from arize.pandas.logger import Client self.generator = EmbeddingGenerator.from_use_case( use_case=UseCases.NLP.SEQUENCE_CLASSIFICATION, model_name="distilbert-base-uncased", tokenizer_max_length=512, batch_size=256, ) self.arize_client = Client(space_key=SPACE_KEY, api_key=API_KEY)
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/arize_callback.html
a6076a070fdd-1
self.arize_client = Client(space_key=SPACE_KEY, api_key=API_KEY) if SPACE_KEY == "SPACE_KEY" or API_KEY == "API_KEY": raise ValueError("❌ CHANGE SPACE AND API KEYS") else: print("✅ Arize client setup done! Now you can start using Arize!") [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: for prompt in prompts: self.prompt_records.append(prompt.replace("\n", "")) [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Do nothing.""" pass [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: pd = import_pandas() from arize.utils.types import ( EmbeddingColumnNames, Environments, ModelTypes, Schema, ) # Safe check if 'llm_output' and 'token_usage' exist if response.llm_output and "token_usage" in response.llm_output: self.prompt_tokens = response.llm_output["token_usage"].get( "prompt_tokens", 0 ) self.total_tokens = response.llm_output["token_usage"].get( "total_tokens", 0 ) self.completion_tokens = response.llm_output["token_usage"].get( "completion_tokens", 0 ) else: self.prompt_tokens = ( self.total_tokens ) = self.completion_tokens = 0 # assign default value for generations in response.generations: for generation in generations:
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/arize_callback.html
a6076a070fdd-2
for generations in response.generations: for generation in generations: prompt = self.prompt_records[self.step] self.step = self.step + 1 prompt_embedding = pd.Series( self.generator.generate_embeddings( text_col=pd.Series(prompt.replace("\n", " ")) ).reset_index(drop=True) ) # Assigning text to response_text instead of response response_text = generation.text.replace("\n", " ") response_embedding = pd.Series( self.generator.generate_embeddings( text_col=pd.Series(generation.text.replace("\n", " ")) ).reset_index(drop=True) ) pred_timestamp = datetime.now().timestamp() # Define the columns and data columns = [ "prediction_ts", "response", "prompt", "response_vector", "prompt_vector", "prompt_token", "completion_token", "total_token", ] data = [ [ pred_timestamp, response_text, prompt, response_embedding[0], prompt_embedding[0], self.prompt_tokens, self.total_tokens, self.completion_tokens, ] ] # Create the DataFrame df = pd.DataFrame(data, columns=columns) # Declare prompt and response columns prompt_columns = EmbeddingColumnNames( vector_column_name="prompt_vector", data_column_name="prompt" ) response_columns = EmbeddingColumnNames( vector_column_name="response_vector", data_column_name="response" ) schema = Schema( timestamp_column_name="prediction_ts", tag_column_names=[ "prompt_token", "completion_token", "total_token", ],
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/arize_callback.html
a6076a070fdd-3
"completion_token", "total_token", ], prompt_column_names=prompt_columns, response_column_names=response_columns, ) response_from_arize = self.arize_client.log( dataframe=df, schema=schema, model_id=self.model_id, model_version=self.model_version, model_type=ModelTypes.GENERATIVE_LLM, environment=Environments.PRODUCTION, ) if response_from_arize.status_code == 200: print("✅ Successfully logged data to Arize!") else: print(f'❌ Logging failed "{response_from_arize.text}"') [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing.""" pass [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: pass [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Do nothing.""" pass [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing.""" pass [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any, ) -> None: pass [docs] def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Do nothing.""" pass [docs] def on_tool_end( self,
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/arize_callback.html
a6076a070fdd-4
pass [docs] def on_tool_end( self, output: str, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: pass [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: pass [docs] def on_text(self, text: str, **kwargs: Any) -> None: pass [docs] def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: pass
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/arize_callback.html
d819d7118f27-0
Source code for langchain.callbacks.clearml_callback import tempfile from copy import deepcopy from pathlib import Path from typing import Any, Dict, List, Optional, Sequence, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.callbacks.utils import ( BaseMetadataCallbackHandler, flatten_dict, hash_string, import_pandas, import_spacy, import_textstat, load_json, ) from langchain.schema import AgentAction, AgentFinish, LLMResult def import_clearml() -> Any: """Import the clearml python package and raise an error if it is not installed.""" try: import clearml # noqa: F401 except ImportError: raise ImportError( "To use the clearml callback manager you need to have the `clearml` python " "package installed. Please install it with `pip install clearml`" ) return clearml [docs]class ClearMLCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler): """Callback Handler that logs to ClearML. Parameters: job_type (str): The type of clearml task such as "inference", "testing" or "qc" project_name (str): The clearml project name tags (list): Tags to add to the task task_name (str): Name of the clearml task visualize (bool): Whether to visualize the run. complexity_metrics (bool): Whether to log complexity metrics stream_logs (bool): Whether to stream callback actions to ClearML This handler will utilize the associated callback method and formats the input of each callback function with metadata regarding the state of LLM run, and adds the response to the list of records for both the {method}_records and
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/clearml_callback.html
d819d7118f27-1
and adds the response to the list of records for both the {method}_records and action. It then logs the response to the ClearML console. """ def __init__( self, task_type: Optional[str] = "inference", project_name: Optional[str] = "langchain_callback_demo", tags: Optional[Sequence] = None, task_name: Optional[str] = None, visualize: bool = False, complexity_metrics: bool = False, stream_logs: bool = False, ) -> None: """Initialize callback handler.""" clearml = import_clearml() spacy = import_spacy() super().__init__() self.task_type = task_type self.project_name = project_name self.tags = tags self.task_name = task_name self.visualize = visualize self.complexity_metrics = complexity_metrics self.stream_logs = stream_logs self.temp_dir = tempfile.TemporaryDirectory() # Check if ClearML task already exists (e.g. in pipeline) if clearml.Task.current_task(): self.task = clearml.Task.current_task() else: self.task = clearml.Task.init( # type: ignore task_type=self.task_type, project_name=self.project_name, tags=self.tags, task_name=self.task_name, output_uri=True, ) self.logger = self.task.get_logger() warning = ( "The clearml callback is currently in beta and is subject to change " "based on updates to `langchain`. Please report any issues to " "https://github.com/allegroai/clearml/issues with the tag `langchain`." )
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/clearml_callback.html
d819d7118f27-2
) self.logger.report_text(warning, level=30, print_console=True) self.callback_columns: list = [] self.action_records: list = [] self.complexity_metrics = complexity_metrics self.visualize = visualize self.nlp = spacy.load("en_core_web_sm") def _init_resp(self) -> Dict: return {k: None for k in self.callback_columns} [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Run when LLM starts.""" self.step += 1 self.llm_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({"action": "on_llm_start"}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) for prompt in prompts: prompt_resp = deepcopy(resp) prompt_resp["prompts"] = prompt self.on_llm_start_records.append(prompt_resp) self.action_records.append(prompt_resp) if self.stream_logs: self.logger.report_text(prompt_resp) [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Run when LLM generates a new token.""" self.step += 1 self.llm_streams += 1 resp = self._init_resp() resp.update({"action": "on_llm_new_token", "token": token}) resp.update(self.get_custom_callback_meta()) self.on_llm_token_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.logger.report_text(resp)
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/clearml_callback.html
d819d7118f27-3
if self.stream_logs: self.logger.report_text(resp) [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running.""" self.step += 1 self.llm_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({"action": "on_llm_end"}) resp.update(flatten_dict(response.llm_output or {})) resp.update(self.get_custom_callback_meta()) for generations in response.generations: for generation in generations: generation_resp = deepcopy(resp) generation_resp.update(flatten_dict(generation.dict())) generation_resp.update(self.analyze_text(generation.text)) self.on_llm_end_records.append(generation_resp) self.action_records.append(generation_resp) if self.stream_logs: self.logger.report_text(generation_resp) [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when LLM errors.""" self.step += 1 self.errors += 1 [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Run when chain starts running.""" self.step += 1 self.chain_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({"action": "on_chain_start"}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) chain_input = inputs["input"] if isinstance(chain_input, str):
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/clearml_callback.html
d819d7118f27-4
chain_input = inputs["input"] if isinstance(chain_input, str): input_resp = deepcopy(resp) input_resp["input"] = chain_input self.on_chain_start_records.append(input_resp) self.action_records.append(input_resp) if self.stream_logs: self.logger.report_text(input_resp) elif isinstance(chain_input, list): for inp in chain_input: input_resp = deepcopy(resp) input_resp.update(inp) self.on_chain_start_records.append(input_resp) self.action_records.append(input_resp) if self.stream_logs: self.logger.report_text(input_resp) else: raise ValueError("Unexpected data format provided!") [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running.""" self.step += 1 self.chain_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({"action": "on_chain_end", "outputs": outputs["output"]}) resp.update(self.get_custom_callback_meta()) self.on_chain_end_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.logger.report_text(resp) [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when chain errors.""" self.step += 1 self.errors += 1 [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any ) -> None: """Run when tool starts running.""" self.step += 1 self.tool_starts += 1
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/clearml_callback.html
d819d7118f27-5
self.step += 1 self.tool_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({"action": "on_tool_start", "input_str": input_str}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) self.on_tool_start_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.logger.report_text(resp) [docs] def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running.""" self.step += 1 self.tool_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({"action": "on_tool_end", "output": output}) resp.update(self.get_custom_callback_meta()) self.on_tool_end_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.logger.report_text(resp) [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when tool errors.""" self.step += 1 self.errors += 1 [docs] def on_text(self, text: str, **kwargs: Any) -> None: """ Run when agent is ending. """ self.step += 1 self.text_ctr += 1 resp = self._init_resp() resp.update({"action": "on_text", "text": text}) resp.update(self.get_custom_callback_meta()) self.on_text_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.logger.report_text(resp)
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/clearml_callback.html
d819d7118f27-6
if self.stream_logs: self.logger.report_text(resp) [docs] def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Run when agent ends running.""" self.step += 1 self.agent_ends += 1 self.ends += 1 resp = self._init_resp() resp.update( { "action": "on_agent_finish", "output": finish.return_values["output"], "log": finish.log, } ) resp.update(self.get_custom_callback_meta()) self.on_agent_finish_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.logger.report_text(resp) [docs] def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run on agent action.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp = self._init_resp() resp.update( { "action": "on_agent_action", "tool": action.tool, "tool_input": action.tool_input, "log": action.log, } ) resp.update(self.get_custom_callback_meta()) self.on_agent_action_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.logger.report_text(resp) [docs] def analyze_text(self, text: str) -> dict: """Analyze text using textstat and spacy. Parameters: text (str): The text to analyze. Returns: (dict): A dictionary containing the complexity metrics. """ resp = {} textstat = import_textstat()
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/clearml_callback.html
d819d7118f27-7
""" resp = {} textstat = import_textstat() spacy = import_spacy() if self.complexity_metrics: text_complexity_metrics = { "flesch_reading_ease": textstat.flesch_reading_ease(text), "flesch_kincaid_grade": textstat.flesch_kincaid_grade(text), "smog_index": textstat.smog_index(text), "coleman_liau_index": textstat.coleman_liau_index(text), "automated_readability_index": textstat.automated_readability_index( text ), "dale_chall_readability_score": textstat.dale_chall_readability_score( text ), "difficult_words": textstat.difficult_words(text), "linsear_write_formula": textstat.linsear_write_formula(text), "gunning_fog": textstat.gunning_fog(text), "text_standard": textstat.text_standard(text), "fernandez_huerta": textstat.fernandez_huerta(text), "szigriszt_pazos": textstat.szigriszt_pazos(text), "gutierrez_polini": textstat.gutierrez_polini(text), "crawford": textstat.crawford(text), "gulpease_index": textstat.gulpease_index(text), "osman": textstat.osman(text), } resp.update(text_complexity_metrics) if self.visualize and self.nlp and self.temp_dir.name is not None: doc = self.nlp(text) dep_out = spacy.displacy.render( # type: ignore
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/clearml_callback.html
d819d7118f27-8
dep_out = spacy.displacy.render( # type: ignore doc, style="dep", jupyter=False, page=True ) dep_output_path = Path( self.temp_dir.name, hash_string(f"dep-{text}") + ".html" ) dep_output_path.open("w", encoding="utf-8").write(dep_out) ent_out = spacy.displacy.render( # type: ignore doc, style="ent", jupyter=False, page=True ) ent_output_path = Path( self.temp_dir.name, hash_string(f"ent-{text}") + ".html" ) ent_output_path.open("w", encoding="utf-8").write(ent_out) self.logger.report_media( "Dependencies Plot", text, local_path=dep_output_path ) self.logger.report_media("Entities Plot", text, local_path=ent_output_path) return resp def _create_session_analysis_df(self) -> Any: """Create a dataframe with all the information from the session.""" pd = import_pandas() on_llm_start_records_df = pd.DataFrame(self.on_llm_start_records) on_llm_end_records_df = pd.DataFrame(self.on_llm_end_records) llm_input_prompts_df = ( on_llm_start_records_df[["step", "prompts", "name"]] .dropna(axis=1) .rename({"step": "prompt_step"}, axis=1) ) complexity_metrics_columns = [] visualizations_columns: List = [] if self.complexity_metrics: complexity_metrics_columns = [ "flesch_reading_ease", "flesch_kincaid_grade",
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/clearml_callback.html
d819d7118f27-9
"flesch_kincaid_grade", "smog_index", "coleman_liau_index", "automated_readability_index", "dale_chall_readability_score", "difficult_words", "linsear_write_formula", "gunning_fog", "text_standard", "fernandez_huerta", "szigriszt_pazos", "gutierrez_polini", "crawford", "gulpease_index", "osman", ] llm_outputs_df = ( on_llm_end_records_df[ [ "step", "text", "token_usage_total_tokens", "token_usage_prompt_tokens", "token_usage_completion_tokens", ] + complexity_metrics_columns + visualizations_columns ] .dropna(axis=1) .rename({"step": "output_step", "text": "output"}, axis=1) ) session_analysis_df = pd.concat([llm_input_prompts_df, llm_outputs_df], axis=1) # session_analysis_df["chat_html"] = session_analysis_df[ # ["prompts", "output"] # ].apply( # lambda row: construct_html_from_prompt_and_generation( # row["prompts"], row["output"] # ), # axis=1, # ) return session_analysis_df [docs] def flush_tracker( self, name: Optional[str] = None, langchain_asset: Any = None, finish: bool = False, ) -> None:
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/clearml_callback.html
d819d7118f27-10
finish: bool = False, ) -> None: """Flush the tracker and setup the session. Everything after this will be a new table. Args: name: Name of the preformed session so far so it is identifyable langchain_asset: The langchain asset to save. finish: Whether to finish the run. Returns: None """ pd = import_pandas() clearml = import_clearml() # Log the action records self.logger.report_table( "Action Records", name, table_plot=pd.DataFrame(self.action_records) ) # Session analysis session_analysis_df = self._create_session_analysis_df() self.logger.report_table( "Session Analysis", name, table_plot=session_analysis_df ) if self.stream_logs: self.logger.report_text( { "action_records": pd.DataFrame(self.action_records), "session_analysis": session_analysis_df, } ) if langchain_asset: langchain_asset_path = Path(self.temp_dir.name, "model.json") try: langchain_asset.save(langchain_asset_path) # Create output model and connect it to the task output_model = clearml.OutputModel( task=self.task, config_text=load_json(langchain_asset_path) ) output_model.update_weights( weights_filename=str(langchain_asset_path), auto_delete_file=False, target_filename=name, ) except ValueError: langchain_asset.save_agent(langchain_asset_path) output_model = clearml.OutputModel( task=self.task, config_text=load_json(langchain_asset_path) ) output_model.update_weights(
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/clearml_callback.html
d819d7118f27-11
) output_model.update_weights( weights_filename=str(langchain_asset_path), auto_delete_file=False, target_filename=name, ) except NotImplementedError as e: print("Could not save model.") print(repr(e)) pass # Cleanup after adding everything to ClearML self.task.flush(wait_for_uploads=True) self.temp_dir.cleanup() self.temp_dir = tempfile.TemporaryDirectory() self.reset_callback_meta() if finish: self.task.close()
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/clearml_callback.html
d46655feb9e6-0
Source code for langchain.callbacks.mlflow_callback import random import string import tempfile import traceback from copy import deepcopy from pathlib import Path from typing import Any, Dict, List, Optional, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.callbacks.utils import ( BaseMetadataCallbackHandler, flatten_dict, hash_string, import_pandas, import_spacy, import_textstat, ) from langchain.schema import AgentAction, AgentFinish, LLMResult from langchain.utils import get_from_dict_or_env def import_mlflow() -> Any: """Import the mlflow python package and raise an error if it is not installed.""" try: import mlflow except ImportError: raise ImportError( "To use the mlflow callback manager you need to have the `mlflow` python " "package installed. Please install it with `pip install mlflow>=2.3.0`" ) return mlflow def analyze_text( text: str, nlp: Any = None, ) -> dict: """Analyze text using textstat and spacy. Parameters: text (str): The text to analyze. nlp (spacy.lang): The spacy language model to use for visualization. Returns: (dict): A dictionary containing the complexity metrics and visualization files serialized to HTML string. """ resp: Dict[str, Any] = {} textstat = import_textstat() spacy = import_spacy() text_complexity_metrics = { "flesch_reading_ease": textstat.flesch_reading_ease(text),
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/mlflow_callback.html
d46655feb9e6-1
"flesch_reading_ease": textstat.flesch_reading_ease(text), "flesch_kincaid_grade": textstat.flesch_kincaid_grade(text), "smog_index": textstat.smog_index(text), "coleman_liau_index": textstat.coleman_liau_index(text), "automated_readability_index": textstat.automated_readability_index(text), "dale_chall_readability_score": textstat.dale_chall_readability_score(text), "difficult_words": textstat.difficult_words(text), "linsear_write_formula": textstat.linsear_write_formula(text), "gunning_fog": textstat.gunning_fog(text), # "text_standard": textstat.text_standard(text), "fernandez_huerta": textstat.fernandez_huerta(text), "szigriszt_pazos": textstat.szigriszt_pazos(text), "gutierrez_polini": textstat.gutierrez_polini(text), "crawford": textstat.crawford(text), "gulpease_index": textstat.gulpease_index(text), "osman": textstat.osman(text), } resp.update({"text_complexity_metrics": text_complexity_metrics}) resp.update(text_complexity_metrics) if nlp is not None: doc = nlp(text) dep_out = spacy.displacy.render( # type: ignore doc, style="dep", jupyter=False, page=True ) ent_out = spacy.displacy.render( # type: ignore doc, style="ent", jupyter=False, page=True )
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/mlflow_callback.html
d46655feb9e6-2
doc, style="ent", jupyter=False, page=True ) text_visualizations = { "dependency_tree": dep_out, "entities": ent_out, } resp.update(text_visualizations) return resp def construct_html_from_prompt_and_generation(prompt: str, generation: str) -> Any: """Construct an html element from a prompt and a generation. Parameters: prompt (str): The prompt. generation (str): The generation. Returns: (str): The html string.""" formatted_prompt = prompt.replace("\n", "<br>") formatted_generation = generation.replace("\n", "<br>") return f""" <p style="color:black;">{formatted_prompt}:</p> <blockquote> <p style="color:green;"> {formatted_generation} </p> </blockquote> """ class MlflowLogger: """Callback Handler that logs metrics and artifacts to mlflow server. Parameters: name (str): Name of the run. experiment (str): Name of the experiment. tags (dict): Tags to be attached for the run. tracking_uri (str): MLflow tracking server uri. This handler implements the helper functions to initialize, log metrics and artifacts to the mlflow server. """ def __init__(self, **kwargs: Any): self.mlflow = import_mlflow() tracking_uri = get_from_dict_or_env( kwargs, "tracking_uri", "MLFLOW_TRACKING_URI", "" ) self.mlflow.set_tracking_uri(tracking_uri) # User can set other env variables described here
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/mlflow_callback.html
d46655feb9e6-3
# User can set other env variables described here # > https://www.mlflow.org/docs/latest/tracking.html#logging-to-a-tracking-server experiment_name = get_from_dict_or_env( kwargs, "experiment_name", "MLFLOW_EXPERIMENT_NAME" ) self.mlf_exp = self.mlflow.get_experiment_by_name(experiment_name) if self.mlf_exp is not None: self.mlf_expid = self.mlf_exp.experiment_id else: self.mlf_expid = self.mlflow.create_experiment(experiment_name) self.start_run(kwargs["run_name"], kwargs["run_tags"]) def start_run(self, name: str, tags: Dict[str, str]) -> None: """To start a new run, auto generates the random suffix for name""" if name.endswith("-%"): rname = "".join(random.choices(string.ascii_uppercase + string.digits, k=7)) name = name.replace("%", rname) self.run = self.mlflow.MlflowClient().create_run( self.mlf_expid, run_name=name, tags=tags ) def finish_run(self) -> None: """To finish the run.""" with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.end_run() def metric(self, key: str, value: float) -> None: """To log metric to mlflow server.""" with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.log_metric(key, value) def metrics(
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/mlflow_callback.html
d46655feb9e6-4
): self.mlflow.log_metric(key, value) def metrics( self, data: Union[Dict[str, float], Dict[str, int]], step: Optional[int] = 0 ) -> None: """To log all metrics in the input dict.""" with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.log_metrics(data) def jsonf(self, data: Dict[str, Any], filename: str) -> None: """To log the input data as json file artifact.""" with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.log_dict(data, f"{filename}.json") def table(self, name: str, dataframe) -> None: # type: ignore """To log the input pandas dataframe as a html table""" self.html(dataframe.to_html(), f"table_{name}") def html(self, html: str, filename: str) -> None: """To log the input html string as html file artifact.""" with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.log_text(html, f"{filename}.html") def text(self, text: str, filename: str) -> None: """To log the input text as text file artifact.""" with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.log_text(text, f"{filename}.txt") def artifact(self, path: str) -> None:
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/mlflow_callback.html
d46655feb9e6-5
def artifact(self, path: str) -> None: """To upload the file from given path as artifact.""" with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.log_artifact(path) def langchain_artifact(self, chain: Any) -> None: with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.langchain.log_model(chain, "langchain-model") [docs]class MlflowCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler): """Callback Handler that logs metrics and artifacts to mlflow server. Parameters: name (str): Name of the run. experiment (str): Name of the experiment. tags (dict): Tags to be attached for the run. tracking_uri (str): MLflow tracking server uri. This handler will utilize the associated callback method called and formats the input of each callback function with metadata regarding the state of LLM run, and adds the response to the list of records for both the {method}_records and action. It then logs the response to mlflow server. """ def __init__( self, name: Optional[str] = "langchainrun-%", experiment: Optional[str] = "langchain", tags: Optional[Dict] = {}, tracking_uri: Optional[str] = None, ) -> None: """Initialize callback handler.""" import_pandas() import_textstat() import_mlflow() spacy = import_spacy() super().__init__() self.name = name self.experiment = experiment
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/mlflow_callback.html
d46655feb9e6-6
super().__init__() self.name = name self.experiment = experiment self.tags = tags self.tracking_uri = tracking_uri self.temp_dir = tempfile.TemporaryDirectory() self.mlflg = MlflowLogger( tracking_uri=self.tracking_uri, experiment_name=self.experiment, run_name=self.name, run_tags=self.tags, ) self.action_records: list = [] self.nlp = spacy.load("en_core_web_sm") self.metrics = { "step": 0, "starts": 0, "ends": 0, "errors": 0, "text_ctr": 0, "chain_starts": 0, "chain_ends": 0, "llm_starts": 0, "llm_ends": 0, "llm_streams": 0, "tool_starts": 0, "tool_ends": 0, "agent_ends": 0, } self.records: Dict[str, Any] = { "on_llm_start_records": [], "on_llm_token_records": [], "on_llm_end_records": [], "on_chain_start_records": [], "on_chain_end_records": [], "on_tool_start_records": [], "on_tool_end_records": [], "on_text_records": [], "on_agent_finish_records": [], "on_agent_action_records": [], "action_records": [], } def _reset(self) -> None: for k, v in self.metrics.items(): self.metrics[k] = 0 for k, v in self.records.items():
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/mlflow_callback.html
d46655feb9e6-7
self.metrics[k] = 0 for k, v in self.records.items(): self.records[k] = [] [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Run when LLM starts.""" self.metrics["step"] += 1 self.metrics["llm_starts"] += 1 self.metrics["starts"] += 1 llm_starts = self.metrics["llm_starts"] resp: Dict[str, Any] = {} resp.update({"action": "on_llm_start"}) resp.update(flatten_dict(serialized)) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) for idx, prompt in enumerate(prompts): prompt_resp = deepcopy(resp) prompt_resp["prompt"] = prompt self.records["on_llm_start_records"].append(prompt_resp) self.records["action_records"].append(prompt_resp) self.mlflg.jsonf(prompt_resp, f"llm_start_{llm_starts}_prompt_{idx}") [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Run when LLM generates a new token.""" self.metrics["step"] += 1 self.metrics["llm_streams"] += 1 llm_streams = self.metrics["llm_streams"] resp: Dict[str, Any] = {} resp.update({"action": "on_llm_new_token", "token": token}) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) self.records["on_llm_token_records"].append(resp)
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/mlflow_callback.html
d46655feb9e6-8
self.records["on_llm_token_records"].append(resp) self.records["action_records"].append(resp) self.mlflg.jsonf(resp, f"llm_new_tokens_{llm_streams}") [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running.""" self.metrics["step"] += 1 self.metrics["llm_ends"] += 1 self.metrics["ends"] += 1 llm_ends = self.metrics["llm_ends"] resp: Dict[str, Any] = {} resp.update({"action": "on_llm_end"}) resp.update(flatten_dict(response.llm_output or {})) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) for generations in response.generations: for idx, generation in enumerate(generations): generation_resp = deepcopy(resp) generation_resp.update(flatten_dict(generation.dict())) generation_resp.update( analyze_text( generation.text, nlp=self.nlp, ) ) complexity_metrics: Dict[str, float] = generation_resp.pop("text_complexity_metrics") # type: ignore # noqa: E501 self.mlflg.metrics( complexity_metrics, step=self.metrics["step"], ) self.records["on_llm_end_records"].append(generation_resp) self.records["action_records"].append(generation_resp) self.mlflg.jsonf(resp, f"llm_end_{llm_ends}_generation_{idx}") dependency_tree = generation_resp["dependency_tree"] entities = generation_resp["entities"]
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/mlflow_callback.html
d46655feb9e6-9
dependency_tree = generation_resp["dependency_tree"] entities = generation_resp["entities"] self.mlflg.html(dependency_tree, "dep-" + hash_string(generation.text)) self.mlflg.html(entities, "ent-" + hash_string(generation.text)) [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when LLM errors.""" self.metrics["step"] += 1 self.metrics["errors"] += 1 [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Run when chain starts running.""" self.metrics["step"] += 1 self.metrics["chain_starts"] += 1 self.metrics["starts"] += 1 chain_starts = self.metrics["chain_starts"] resp: Dict[str, Any] = {} resp.update({"action": "on_chain_start"}) resp.update(flatten_dict(serialized)) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) chain_input = ",".join([f"{k}={v}" for k, v in inputs.items()]) input_resp = deepcopy(resp) input_resp["inputs"] = chain_input self.records["on_chain_start_records"].append(input_resp) self.records["action_records"].append(input_resp) self.mlflg.jsonf(input_resp, f"chain_start_{chain_starts}") [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running.""" self.metrics["step"] += 1
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/mlflow_callback.html
d46655feb9e6-10
"""Run when chain ends running.""" self.metrics["step"] += 1 self.metrics["chain_ends"] += 1 self.metrics["ends"] += 1 chain_ends = self.metrics["chain_ends"] resp: Dict[str, Any] = {} chain_output = ",".join([f"{k}={v}" for k, v in outputs.items()]) resp.update({"action": "on_chain_end", "outputs": chain_output}) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) self.records["on_chain_end_records"].append(resp) self.records["action_records"].append(resp) self.mlflg.jsonf(resp, f"chain_end_{chain_ends}") [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when chain errors.""" self.metrics["step"] += 1 self.metrics["errors"] += 1 [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any ) -> None: """Run when tool starts running.""" self.metrics["step"] += 1 self.metrics["tool_starts"] += 1 self.metrics["starts"] += 1 tool_starts = self.metrics["tool_starts"] resp: Dict[str, Any] = {} resp.update({"action": "on_tool_start", "input_str": input_str}) resp.update(flatten_dict(serialized)) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) self.records["on_tool_start_records"].append(resp)
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/mlflow_callback.html
d46655feb9e6-11
self.records["on_tool_start_records"].append(resp) self.records["action_records"].append(resp) self.mlflg.jsonf(resp, f"tool_start_{tool_starts}") [docs] def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running.""" self.metrics["step"] += 1 self.metrics["tool_ends"] += 1 self.metrics["ends"] += 1 tool_ends = self.metrics["tool_ends"] resp: Dict[str, Any] = {} resp.update({"action": "on_tool_end", "output": output}) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) self.records["on_tool_end_records"].append(resp) self.records["action_records"].append(resp) self.mlflg.jsonf(resp, f"tool_end_{tool_ends}") [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when tool errors.""" self.metrics["step"] += 1 self.metrics["errors"] += 1 [docs] def on_text(self, text: str, **kwargs: Any) -> None: """ Run when agent is ending. """ self.metrics["step"] += 1 self.metrics["text_ctr"] += 1 text_ctr = self.metrics["text_ctr"] resp: Dict[str, Any] = {} resp.update({"action": "on_text", "text": text}) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) self.records["on_text_records"].append(resp)
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/mlflow_callback.html
d46655feb9e6-12
self.records["on_text_records"].append(resp) self.records["action_records"].append(resp) self.mlflg.jsonf(resp, f"on_text_{text_ctr}") [docs] def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Run when agent ends running.""" self.metrics["step"] += 1 self.metrics["agent_ends"] += 1 self.metrics["ends"] += 1 agent_ends = self.metrics["agent_ends"] resp: Dict[str, Any] = {} resp.update( { "action": "on_agent_finish", "output": finish.return_values["output"], "log": finish.log, } ) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) self.records["on_agent_finish_records"].append(resp) self.records["action_records"].append(resp) self.mlflg.jsonf(resp, f"agent_finish_{agent_ends}") [docs] def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run on agent action.""" self.metrics["step"] += 1 self.metrics["tool_starts"] += 1 self.metrics["starts"] += 1 tool_starts = self.metrics["tool_starts"] resp: Dict[str, Any] = {} resp.update( { "action": "on_agent_action", "tool": action.tool, "tool_input": action.tool_input, "log": action.log, } ) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"])
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/mlflow_callback.html
d46655feb9e6-13
self.mlflg.metrics(self.metrics, step=self.metrics["step"]) self.records["on_agent_action_records"].append(resp) self.records["action_records"].append(resp) self.mlflg.jsonf(resp, f"agent_action_{tool_starts}") def _create_session_analysis_df(self) -> Any: """Create a dataframe with all the information from the session.""" pd = import_pandas() on_llm_start_records_df = pd.DataFrame(self.records["on_llm_start_records"]) on_llm_end_records_df = pd.DataFrame(self.records["on_llm_end_records"]) llm_input_prompts_df = ( on_llm_start_records_df[["step", "prompt", "name"]] .dropna(axis=1) .rename({"step": "prompt_step"}, axis=1) ) complexity_metrics_columns = [] visualizations_columns = [] complexity_metrics_columns = [ "flesch_reading_ease", "flesch_kincaid_grade", "smog_index", "coleman_liau_index", "automated_readability_index", "dale_chall_readability_score", "difficult_words", "linsear_write_formula", "gunning_fog", # "text_standard", "fernandez_huerta", "szigriszt_pazos", "gutierrez_polini", "crawford", "gulpease_index", "osman", ] visualizations_columns = ["dependency_tree", "entities"] llm_outputs_df = ( on_llm_end_records_df[ [ "step", "text",
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/mlflow_callback.html
d46655feb9e6-14
[ "step", "text", "token_usage_total_tokens", "token_usage_prompt_tokens", "token_usage_completion_tokens", ] + complexity_metrics_columns + visualizations_columns ] .dropna(axis=1) .rename({"step": "output_step", "text": "output"}, axis=1) ) session_analysis_df = pd.concat([llm_input_prompts_df, llm_outputs_df], axis=1) session_analysis_df["chat_html"] = session_analysis_df[ ["prompt", "output"] ].apply( lambda row: construct_html_from_prompt_and_generation( row["prompt"], row["output"] ), axis=1, ) return session_analysis_df [docs] def flush_tracker(self, langchain_asset: Any = None, finish: bool = False) -> None: pd = import_pandas() self.mlflg.table("action_records", pd.DataFrame(self.records["action_records"])) session_analysis_df = self._create_session_analysis_df() chat_html = session_analysis_df.pop("chat_html") chat_html = chat_html.replace("\n", "", regex=True) self.mlflg.table("session_analysis", pd.DataFrame(session_analysis_df)) self.mlflg.html("".join(chat_html.tolist()), "chat_html") if langchain_asset: # To avoid circular import error # mlflow only supports LLMChain asset if "langchain.chains.llm.LLMChain" in str(type(langchain_asset)): self.mlflg.langchain_artifact(langchain_asset) else: langchain_asset_path = str(Path(self.temp_dir.name, "model.json")) try:
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/mlflow_callback.html
d46655feb9e6-15
try: langchain_asset.save(langchain_asset_path) self.mlflg.artifact(langchain_asset_path) except ValueError: try: langchain_asset.save_agent(langchain_asset_path) self.mlflg.artifact(langchain_asset_path) except AttributeError: print("Could not save model.") traceback.print_exc() pass except NotImplementedError: print("Could not save model.") traceback.print_exc() pass except NotImplementedError: print("Could not save model.") traceback.print_exc() pass if finish: self.mlflg.finish_run() self._reset()
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/mlflow_callback.html
f49d93018420-0
Source code for langchain.callbacks.infino_callback import time from typing import Any, Dict, List, Optional, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.schema import AgentAction, AgentFinish, LLMResult def import_infino() -> Any: try: from infinopy import InfinoClient except ImportError: raise ImportError( "To use the Infino callbacks manager you need to have the" " `infinopy` python package installed." "Please install it with `pip install infinopy`" ) return InfinoClient() [docs]class InfinoCallbackHandler(BaseCallbackHandler): """Callback Handler that logs to Infino.""" def __init__( self, model_id: Optional[str] = None, model_version: Optional[str] = None, verbose: bool = False, ) -> None: # Set Infino client self.client = import_infino() self.model_id = model_id self.model_version = model_version self.verbose = verbose def _send_to_infino( self, key: str, value: Any, is_ts: bool = True, ) -> None: """Send the key-value to Infino. Parameters: key (str): the key to send to Infino. value (Any): the value to send to Infino. is_ts (bool): if True, the value is part of a time series, else it is sent as a log message. """ payload = { "date": int(time.time()), key: value, "labels": { "model_id": self.model_id,
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/infino_callback.html
f49d93018420-1
"labels": { "model_id": self.model_id, "model_version": self.model_version, }, } if self.verbose: print(f"Tracking {key} with Infino: {payload}") # Append to Infino time series only if is_ts is True, otherwise # append to Infino log. if is_ts: self.client.append_ts(payload) else: self.client.append_log(payload) [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any, ) -> None: """Log the prompts to Infino, and set start time and error flag.""" for prompt in prompts: self._send_to_infino("prompt", prompt, is_ts=False) # Set the error flag to indicate no error (this will get overridden # in on_llm_error if an error occurs). self.error = 0 # Set the start time (so that we can calculate the request # duration in on_llm_end). self.start_time = time.time() [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Do nothing when a new token is generated.""" pass [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Log the latency, error, token usage, and response to Infino.""" # Calculate and track the request latency. self.end_time = time.time() duration = self.end_time - self.start_time self._send_to_infino("latency", duration) # Track success or error flag.
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/infino_callback.html
f49d93018420-2
# Track success or error flag. self._send_to_infino("error", self.error) # Track token usage. if (response.llm_output is not None) and isinstance(response.llm_output, Dict): token_usage = response.llm_output["token_usage"] if token_usage is not None: prompt_tokens = token_usage["prompt_tokens"] total_tokens = token_usage["total_tokens"] completion_tokens = token_usage["completion_tokens"] self._send_to_infino("prompt_tokens", prompt_tokens) self._send_to_infino("total_tokens", total_tokens) self._send_to_infino("completion_tokens", completion_tokens) # Track prompt response. for generations in response.generations: for generation in generations: self._send_to_infino("prompt_response", generation.text, is_ts=False) [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Set the error flag.""" self.error = 1 [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Do nothing when LLM chain starts.""" pass [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Do nothing when LLM chain ends.""" pass [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Need to log the error.""" pass [docs] def on_tool_start( self, serialized: Dict[str, Any],
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/infino_callback.html
f49d93018420-3
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any, ) -> None: """Do nothing when tool starts.""" pass [docs] def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Do nothing when agent takes a specific action.""" pass [docs] def on_tool_end( self, output: str, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: """Do nothing when tool ends.""" pass [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing when tool outputs an error.""" pass [docs] def on_text(self, text: str, **kwargs: Any) -> None: """Do nothing.""" pass [docs] def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Do nothing.""" pass
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/infino_callback.html
0322921a2eaf-0
Source code for langchain.callbacks.comet_ml_callback import tempfile from copy import deepcopy from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Sequence, Union import langchain from langchain.callbacks.base import BaseCallbackHandler from langchain.callbacks.utils import ( BaseMetadataCallbackHandler, flatten_dict, import_pandas, import_spacy, import_textstat, ) from langchain.schema import AgentAction, AgentFinish, Generation, LLMResult LANGCHAIN_MODEL_NAME = "langchain-model" def import_comet_ml() -> Any: try: import comet_ml # noqa: F401 except ImportError: raise ImportError( "To use the comet_ml callback manager you need to have the " "`comet_ml` python package installed. Please install it with" " `pip install comet_ml`" ) return comet_ml def _get_experiment( workspace: Optional[str] = None, project_name: Optional[str] = None ) -> Any: comet_ml = import_comet_ml() experiment = comet_ml.Experiment( # type: ignore workspace=workspace, project_name=project_name, ) return experiment def _fetch_text_complexity_metrics(text: str) -> dict: textstat = import_textstat() text_complexity_metrics = { "flesch_reading_ease": textstat.flesch_reading_ease(text), "flesch_kincaid_grade": textstat.flesch_kincaid_grade(text), "smog_index": textstat.smog_index(text), "coleman_liau_index": textstat.coleman_liau_index(text),
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/comet_ml_callback.html
0322921a2eaf-1
"automated_readability_index": textstat.automated_readability_index(text), "dale_chall_readability_score": textstat.dale_chall_readability_score(text), "difficult_words": textstat.difficult_words(text), "linsear_write_formula": textstat.linsear_write_formula(text), "gunning_fog": textstat.gunning_fog(text), "text_standard": textstat.text_standard(text), "fernandez_huerta": textstat.fernandez_huerta(text), "szigriszt_pazos": textstat.szigriszt_pazos(text), "gutierrez_polini": textstat.gutierrez_polini(text), "crawford": textstat.crawford(text), "gulpease_index": textstat.gulpease_index(text), "osman": textstat.osman(text), } return text_complexity_metrics def _summarize_metrics_for_generated_outputs(metrics: Sequence) -> dict: pd = import_pandas() metrics_df = pd.DataFrame(metrics) metrics_summary = metrics_df.describe() return metrics_summary.to_dict() [docs]class CometCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler): """Callback Handler that logs to Comet. Parameters: job_type (str): The type of comet_ml task such as "inference", "testing" or "qc" project_name (str): The comet_ml project name tags (list): Tags to add to the task task_name (str): Name of the comet_ml task visualize (bool): Whether to visualize the run. complexity_metrics (bool): Whether to log complexity metrics stream_logs (bool): Whether to stream callback actions to Comet
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/comet_ml_callback.html
0322921a2eaf-2
stream_logs (bool): Whether to stream callback actions to Comet This handler will utilize the associated callback method and formats the input of each callback function with metadata regarding the state of LLM run, and adds the response to the list of records for both the {method}_records and action. It then logs the response to Comet. """ def __init__( self, task_type: Optional[str] = "inference", workspace: Optional[str] = None, project_name: Optional[str] = None, tags: Optional[Sequence] = None, name: Optional[str] = None, visualizations: Optional[List[str]] = None, complexity_metrics: bool = False, custom_metrics: Optional[Callable] = None, stream_logs: bool = True, ) -> None: """Initialize callback handler.""" self.comet_ml = import_comet_ml() super().__init__() self.task_type = task_type self.workspace = workspace self.project_name = project_name self.tags = tags self.visualizations = visualizations self.complexity_metrics = complexity_metrics self.custom_metrics = custom_metrics self.stream_logs = stream_logs self.temp_dir = tempfile.TemporaryDirectory() self.experiment = _get_experiment(workspace, project_name) self.experiment.log_other("Created from", "langchain") if tags: self.experiment.add_tags(tags) self.name = name if self.name: self.experiment.set_name(self.name) warning = ( "The comet_ml callback is currently in beta and is subject to change " "based on updates to `langchain`. Please report any issues to "
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/comet_ml_callback.html
0322921a2eaf-3
"based on updates to `langchain`. Please report any issues to " "https://github.com/comet-ml/issue-tracking/issues with the tag " "`langchain`." ) self.comet_ml.LOGGER.warning(warning) self.callback_columns: list = [] self.action_records: list = [] self.complexity_metrics = complexity_metrics if self.visualizations: spacy = import_spacy() self.nlp = spacy.load("en_core_web_sm") else: self.nlp = None def _init_resp(self) -> Dict: return {k: None for k in self.callback_columns} [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Run when LLM starts.""" self.step += 1 self.llm_starts += 1 self.starts += 1 metadata = self._init_resp() metadata.update({"action": "on_llm_start"}) metadata.update(flatten_dict(serialized)) metadata.update(self.get_custom_callback_meta()) for prompt in prompts: prompt_resp = deepcopy(metadata) prompt_resp["prompts"] = prompt self.on_llm_start_records.append(prompt_resp) self.action_records.append(prompt_resp) if self.stream_logs: self._log_stream(prompt, metadata, self.step) [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Run when LLM generates a new token.""" self.step += 1 self.llm_streams += 1 resp = self._init_resp()
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/comet_ml_callback.html
0322921a2eaf-4
self.llm_streams += 1 resp = self._init_resp() resp.update({"action": "on_llm_new_token", "token": token}) resp.update(self.get_custom_callback_meta()) self.action_records.append(resp) [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running.""" self.step += 1 self.llm_ends += 1 self.ends += 1 metadata = self._init_resp() metadata.update({"action": "on_llm_end"}) metadata.update(flatten_dict(response.llm_output or {})) metadata.update(self.get_custom_callback_meta()) output_complexity_metrics = [] output_custom_metrics = [] for prompt_idx, generations in enumerate(response.generations): for gen_idx, generation in enumerate(generations): text = generation.text generation_resp = deepcopy(metadata) generation_resp.update(flatten_dict(generation.dict())) complexity_metrics = self._get_complexity_metrics(text) if complexity_metrics: output_complexity_metrics.append(complexity_metrics) generation_resp.update(complexity_metrics) custom_metrics = self._get_custom_metrics( generation, prompt_idx, gen_idx ) if custom_metrics: output_custom_metrics.append(custom_metrics) generation_resp.update(custom_metrics) if self.stream_logs: self._log_stream(text, metadata, self.step) self.action_records.append(generation_resp) self.on_llm_end_records.append(generation_resp) self._log_text_metrics(output_complexity_metrics, step=self.step) self._log_text_metrics(output_custom_metrics, step=self.step) [docs] def on_llm_error(
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/comet_ml_callback.html
0322921a2eaf-5
[docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when LLM errors.""" self.step += 1 self.errors += 1 [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Run when chain starts running.""" self.step += 1 self.chain_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({"action": "on_chain_start"}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) for chain_input_key, chain_input_val in inputs.items(): if isinstance(chain_input_val, str): input_resp = deepcopy(resp) if self.stream_logs: self._log_stream(chain_input_val, resp, self.step) input_resp.update({chain_input_key: chain_input_val}) self.action_records.append(input_resp) else: self.comet_ml.LOGGER.warning( f"Unexpected data format provided! " f"Input Value for {chain_input_key} will not be logged" ) [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running.""" self.step += 1 self.chain_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({"action": "on_chain_end"}) resp.update(self.get_custom_callback_meta()) for chain_output_key, chain_output_val in outputs.items(): if isinstance(chain_output_val, str):
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/comet_ml_callback.html
0322921a2eaf-6
if isinstance(chain_output_val, str): output_resp = deepcopy(resp) if self.stream_logs: self._log_stream(chain_output_val, resp, self.step) output_resp.update({chain_output_key: chain_output_val}) self.action_records.append(output_resp) else: self.comet_ml.LOGGER.warning( f"Unexpected data format provided! " f"Output Value for {chain_output_key} will not be logged" ) [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when chain errors.""" self.step += 1 self.errors += 1 [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any ) -> None: """Run when tool starts running.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({"action": "on_tool_start"}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) if self.stream_logs: self._log_stream(input_str, resp, self.step) resp.update({"input_str": input_str}) self.action_records.append(resp) [docs] def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running.""" self.step += 1 self.tool_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({"action": "on_tool_end"}) resp.update(self.get_custom_callback_meta())
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/comet_ml_callback.html
0322921a2eaf-7
resp.update(self.get_custom_callback_meta()) if self.stream_logs: self._log_stream(output, resp, self.step) resp.update({"output": output}) self.action_records.append(resp) [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when tool errors.""" self.step += 1 self.errors += 1 [docs] def on_text(self, text: str, **kwargs: Any) -> None: """ Run when agent is ending. """ self.step += 1 self.text_ctr += 1 resp = self._init_resp() resp.update({"action": "on_text"}) resp.update(self.get_custom_callback_meta()) if self.stream_logs: self._log_stream(text, resp, self.step) resp.update({"text": text}) self.action_records.append(resp) [docs] def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Run when agent ends running.""" self.step += 1 self.agent_ends += 1 self.ends += 1 resp = self._init_resp() output = finish.return_values["output"] log = finish.log resp.update({"action": "on_agent_finish", "log": log}) resp.update(self.get_custom_callback_meta()) if self.stream_logs: self._log_stream(output, resp, self.step) resp.update({"output": output}) self.action_records.append(resp) [docs] def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run on agent action."""
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/comet_ml_callback.html
0322921a2eaf-8
"""Run on agent action.""" self.step += 1 self.tool_starts += 1 self.starts += 1 tool = action.tool tool_input = str(action.tool_input) log = action.log resp = self._init_resp() resp.update({"action": "on_agent_action", "log": log, "tool": tool}) resp.update(self.get_custom_callback_meta()) if self.stream_logs: self._log_stream(tool_input, resp, self.step) resp.update({"tool_input": tool_input}) self.action_records.append(resp) def _get_complexity_metrics(self, text: str) -> dict: """Compute text complexity metrics using textstat. Parameters: text (str): The text to analyze. Returns: (dict): A dictionary containing the complexity metrics. """ resp = {} if self.complexity_metrics: text_complexity_metrics = _fetch_text_complexity_metrics(text) resp.update(text_complexity_metrics) return resp def _get_custom_metrics( self, generation: Generation, prompt_idx: int, gen_idx: int ) -> dict: """Compute Custom Metrics for an LLM Generated Output Args: generation (LLMResult): Output generation from an LLM prompt_idx (int): List index of the input prompt gen_idx (int): List index of the generated output Returns: dict: A dictionary containing the custom metrics. """ resp = {} if self.custom_metrics: custom_metrics = self.custom_metrics(generation, prompt_idx, gen_idx) resp.update(custom_metrics) return resp [docs] def flush_tracker( self,
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/comet_ml_callback.html
0322921a2eaf-9
return resp [docs] def flush_tracker( self, langchain_asset: Any = None, task_type: Optional[str] = "inference", workspace: Optional[str] = None, project_name: Optional[str] = "comet-langchain-demo", tags: Optional[Sequence] = None, name: Optional[str] = None, visualizations: Optional[List[str]] = None, complexity_metrics: bool = False, custom_metrics: Optional[Callable] = None, finish: bool = False, reset: bool = False, ) -> None: """Flush the tracker and setup the session. Everything after this will be a new table. Args: name: Name of the preformed session so far so it is identifyable langchain_asset: The langchain asset to save. finish: Whether to finish the run. Returns: None """ self._log_session(langchain_asset) if langchain_asset: try: self._log_model(langchain_asset) except Exception: self.comet_ml.LOGGER.error( "Failed to export agent or LLM to Comet", exc_info=True, extra={"show_traceback": True}, ) if finish: self.experiment.end() if reset: self._reset( task_type, workspace, project_name, tags, name, visualizations, complexity_metrics, custom_metrics, ) def _log_stream(self, prompt: str, metadata: dict, step: int) -> None: self.experiment.log_text(prompt, metadata=metadata, step=step)
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/comet_ml_callback.html
0322921a2eaf-10
self.experiment.log_text(prompt, metadata=metadata, step=step) def _log_model(self, langchain_asset: Any) -> None: model_parameters = self._get_llm_parameters(langchain_asset) self.experiment.log_parameters(model_parameters, prefix="model") langchain_asset_path = Path(self.temp_dir.name, "model.json") model_name = self.name if self.name else LANGCHAIN_MODEL_NAME try: if hasattr(langchain_asset, "save"): langchain_asset.save(langchain_asset_path) self.experiment.log_model(model_name, str(langchain_asset_path)) except (ValueError, AttributeError, NotImplementedError) as e: if hasattr(langchain_asset, "save_agent"): langchain_asset.save_agent(langchain_asset_path) self.experiment.log_model(model_name, str(langchain_asset_path)) else: self.comet_ml.LOGGER.error( f"{e}" " Could not save Langchain Asset " f"for {langchain_asset.__class__.__name__}" ) def _log_session(self, langchain_asset: Optional[Any] = None) -> None: try: llm_session_df = self._create_session_analysis_dataframe(langchain_asset) # Log the cleaned dataframe as a table self.experiment.log_table("langchain-llm-session.csv", llm_session_df) except Exception: self.comet_ml.LOGGER.warning( "Failed to log session data to Comet", exc_info=True, extra={"show_traceback": True}, ) try: metadata = {"langchain_version": str(langchain.__version__)} # Log the langchain low-level records as a JSON file directly
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/comet_ml_callback.html
0322921a2eaf-11
# Log the langchain low-level records as a JSON file directly self.experiment.log_asset_data( self.action_records, "langchain-action_records.json", metadata=metadata ) except Exception: self.comet_ml.LOGGER.warning( "Failed to log session data to Comet", exc_info=True, extra={"show_traceback": True}, ) try: self._log_visualizations(llm_session_df) except Exception: self.comet_ml.LOGGER.warning( "Failed to log visualizations to Comet", exc_info=True, extra={"show_traceback": True}, ) def _log_text_metrics(self, metrics: Sequence[dict], step: int) -> None: if not metrics: return metrics_summary = _summarize_metrics_for_generated_outputs(metrics) for key, value in metrics_summary.items(): self.experiment.log_metrics(value, prefix=key, step=step) def _log_visualizations(self, session_df: Any) -> None: if not (self.visualizations and self.nlp): return spacy = import_spacy() prompts = session_df["prompts"].tolist() outputs = session_df["text"].tolist() for idx, (prompt, output) in enumerate(zip(prompts, outputs)): doc = self.nlp(output) sentence_spans = list(doc.sents) for visualization in self.visualizations: try: html = spacy.displacy.render( sentence_spans, style=visualization, options={"compact": True}, jupyter=False, page=True, ) self.experiment.log_asset_data( html,
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/comet_ml_callback.html
0322921a2eaf-12
) self.experiment.log_asset_data( html, name=f"langchain-viz-{visualization}-{idx}.html", metadata={"prompt": prompt}, step=idx, ) except Exception as e: self.comet_ml.LOGGER.warning( e, exc_info=True, extra={"show_traceback": True} ) return def _reset( self, task_type: Optional[str] = None, workspace: Optional[str] = None, project_name: Optional[str] = None, tags: Optional[Sequence] = None, name: Optional[str] = None, visualizations: Optional[List[str]] = None, complexity_metrics: bool = False, custom_metrics: Optional[Callable] = None, ) -> None: _task_type = task_type if task_type else self.task_type _workspace = workspace if workspace else self.workspace _project_name = project_name if project_name else self.project_name _tags = tags if tags else self.tags _name = name if name else self.name _visualizations = visualizations if visualizations else self.visualizations _complexity_metrics = ( complexity_metrics if complexity_metrics else self.complexity_metrics ) _custom_metrics = custom_metrics if custom_metrics else self.custom_metrics self.__init__( # type: ignore task_type=_task_type, workspace=_workspace, project_name=_project_name, tags=_tags, name=_name, visualizations=_visualizations, complexity_metrics=_complexity_metrics, custom_metrics=_custom_metrics, ) self.reset_callback_meta() self.temp_dir = tempfile.TemporaryDirectory()
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/comet_ml_callback.html
0322921a2eaf-13
self.reset_callback_meta() self.temp_dir = tempfile.TemporaryDirectory() def _create_session_analysis_dataframe(self, langchain_asset: Any = None) -> dict: pd = import_pandas() llm_parameters = self._get_llm_parameters(langchain_asset) num_generations_per_prompt = llm_parameters.get("n", 1) llm_start_records_df = pd.DataFrame(self.on_llm_start_records) # Repeat each input row based on the number of outputs generated per prompt llm_start_records_df = llm_start_records_df.loc[ llm_start_records_df.index.repeat(num_generations_per_prompt) ].reset_index(drop=True) llm_end_records_df = pd.DataFrame(self.on_llm_end_records) llm_session_df = pd.merge( llm_start_records_df, llm_end_records_df, left_index=True, right_index=True, suffixes=["_llm_start", "_llm_end"], ) return llm_session_df def _get_llm_parameters(self, langchain_asset: Any = None) -> dict: if not langchain_asset: return {} try: if hasattr(langchain_asset, "agent"): llm_parameters = langchain_asset.agent.llm_chain.llm.dict() elif hasattr(langchain_asset, "llm_chain"): llm_parameters = langchain_asset.llm_chain.llm.dict() elif hasattr(langchain_asset, "llm"): llm_parameters = langchain_asset.llm.dict() else: llm_parameters = langchain_asset.dict() except Exception: return {} return llm_parameters
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/comet_ml_callback.html
7e2dbe72a59d-0
Source code for langchain.callbacks.streaming_stdout """Callback Handler streams to stdout on new llm token.""" import sys from typing import Any, Dict, List, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.schema import AgentAction, AgentFinish, LLMResult [docs]class StreamingStdOutCallbackHandler(BaseCallbackHandler): """Callback handler for streaming. Only works with LLMs that support streaming.""" [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Run when LLM starts running.""" [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Run on new LLM token. Only available when streaming is enabled.""" sys.stdout.write(token) sys.stdout.flush() [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running.""" [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when LLM errors.""" [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Run when chain starts running.""" [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running.""" [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when chain errors."""
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/streaming_stdout.html
7e2dbe72a59d-1
) -> None: """Run when chain errors.""" [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any ) -> None: """Run when tool starts running.""" [docs] def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run on agent action.""" pass [docs] def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running.""" [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when tool errors.""" [docs] def on_text(self, text: str, **kwargs: Any) -> None: """Run on arbitrary text.""" [docs] def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Run on agent end."""
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/streaming_stdout.html
c319f8d04476-0
Source code for langchain.callbacks.human from typing import Any, Callable, Dict, Optional from uuid import UUID from langchain.callbacks.base import BaseCallbackHandler def _default_approve(_input: str) -> bool: msg = ( "Do you approve of the following input? " "Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no." ) msg += "\n\n" + _input + "\n" resp = input(msg) return resp.lower() in ("yes", "y") def _default_true(_: Dict[str, Any]) -> bool: return True class HumanRejectedException(Exception): """Exception to raise when a person manually review and rejects a value.""" [docs]class HumanApprovalCallbackHandler(BaseCallbackHandler): """Callback for manually validating values.""" raise_error: bool = True def __init__( self, approve: Callable[[Any], bool] = _default_approve, should_check: Callable[[Dict[str, Any]], bool] = _default_true, ): self._approve = approve self._should_check = should_check [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: if self._should_check(serialized) and not self._approve(input_str): raise HumanRejectedException( f"Inputs {input_str} to tool {serialized} were rejected." )
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/human.html
af1e87a33343-0
Source code for langchain.callbacks.manager from __future__ import annotations import asyncio import functools import logging import os import warnings from contextlib import asynccontextmanager, contextmanager from contextvars import ContextVar from typing import ( Any, AsyncGenerator, Dict, Generator, List, Optional, Type, TypeVar, Union, cast, ) from uuid import UUID, uuid4 import langchain from langchain.callbacks.base import ( BaseCallbackHandler, BaseCallbackManager, ChainManagerMixin, LLMManagerMixin, RunManagerMixin, ToolManagerMixin, ) from langchain.callbacks.openai_info import OpenAICallbackHandler from langchain.callbacks.stdout import StdOutCallbackHandler from langchain.callbacks.tracers.langchain import LangChainTracer from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1 from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler from langchain.callbacks.tracers.wandb import WandbTracer from langchain.schema import ( AgentAction, AgentFinish, BaseMessage, LLMResult, get_buffer_string, ) logger = logging.getLogger(__name__) Callbacks = Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar( "openai_callback", default=None ) tracing_callback_var: ContextVar[ Optional[LangChainTracerV1] ] = ContextVar( # noqa: E501 "tracing_callback", default=None ) wandb_tracing_callback_var: ContextVar[ Optional[WandbTracer]
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-1
wandb_tracing_callback_var: ContextVar[ Optional[WandbTracer] ] = ContextVar( # noqa: E501 "tracing_wandb_callback", default=None ) tracing_v2_callback_var: ContextVar[ Optional[LangChainTracer] ] = ContextVar( # noqa: E501 "tracing_callback_v2", default=None ) def _get_debug() -> bool: return langchain.debug [docs]@contextmanager def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]: """Get the OpenAI callback handler in a context manager. which conveniently exposes token and cost information. Returns: OpenAICallbackHandler: The OpenAI callback handler. Example: >>> with get_openai_callback() as cb: ... # Use the OpenAI callback handler """ cb = OpenAICallbackHandler() openai_callback_var.set(cb) yield cb openai_callback_var.set(None) [docs]@contextmanager def tracing_enabled( session_name: str = "default", ) -> Generator[TracerSessionV1, None, None]: """Get the Deprecated LangChainTracer in a context manager. Args: session_name (str, optional): The name of the session. Defaults to "default". Returns: TracerSessionV1: The LangChainTracer session. Example: >>> with tracing_enabled() as session: ... # Use the LangChainTracer session """ cb = LangChainTracerV1() session = cast(TracerSessionV1, cb.load_session(session_name)) tracing_callback_var.set(cb)
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-2
tracing_callback_var.set(cb) yield session tracing_callback_var.set(None) [docs]@contextmanager def wandb_tracing_enabled( session_name: str = "default", ) -> Generator[None, None, None]: """Get the WandbTracer in a context manager. Args: session_name (str, optional): The name of the session. Defaults to "default". Returns: None Example: >>> with wandb_tracing_enabled() as session: ... # Use the WandbTracer session """ cb = WandbTracer() wandb_tracing_callback_var.set(cb) yield None wandb_tracing_callback_var.set(None) @contextmanager def tracing_v2_enabled( project_name: Optional[str] = None, *, example_id: Optional[Union[str, UUID]] = None, ) -> Generator[None, None, None]: """Instruct LangChain to log all runs in context to LangSmith. Args: project_name (str, optional): The name of the project. Defaults to "default". example_id (str or UUID, optional): The ID of the example. Defaults to None. Returns: None Example: >>> with tracing_v2_enabled(): ... # LangChain code will automatically be traced """ # Issue a warning that this is experimental warnings.warn( "The tracing v2 API is in development. " "This is not yet stable and may change in the future." ) if isinstance(example_id, str): example_id = UUID(example_id) cb = LangChainTracer(
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-3
example_id = UUID(example_id) cb = LangChainTracer( example_id=example_id, project_name=project_name, ) tracing_v2_callback_var.set(cb) yield tracing_v2_callback_var.set(None) @contextmanager def trace_as_chain_group( group_name: str, *, project_name: Optional[str] = None, example_id: Optional[Union[str, UUID]] = None, tags: Optional[List[str]] = None, ) -> Generator[CallbackManager, None, None]: """Get a callback manager for a chain group in a context manager. Useful for grouping different calls together as a single run even if they aren't composed in a single chain. Args: group_name (str): The name of the chain group. project_name (str, optional): The name of the project. Defaults to None. example_id (str or UUID, optional): The ID of the example. Defaults to None. tags (List[str], optional): The inheritable tags to apply to all runs. Defaults to None. Returns: CallbackManager: The callback manager for the chain group. Example: >>> with trace_as_chain_group("group_name") as manager: ... # Use the callback manager for the chain group ... llm.predict("Foo", callbacks=manager) """ cb = LangChainTracer( project_name=project_name, example_id=example_id, ) cm = CallbackManager.configure( inheritable_callbacks=[cb], inheritable_tags=tags, ) run_manager = cm.on_chain_start({"name": group_name}, {})
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-4
) run_manager = cm.on_chain_start({"name": group_name}, {}) yield run_manager.get_child() run_manager.on_chain_end({}) @asynccontextmanager async def atrace_as_chain_group( group_name: str, *, project_name: Optional[str] = None, example_id: Optional[Union[str, UUID]] = None, tags: Optional[List[str]] = None, ) -> AsyncGenerator[AsyncCallbackManager, None]: """Get an async callback manager for a chain group in a context manager. Useful for grouping different async calls together as a single run even if they aren't composed in a single chain. Args: group_name (str): The name of the chain group. project_name (str, optional): The name of the project. Defaults to None. example_id (str or UUID, optional): The ID of the example. Defaults to None. tags (List[str], optional): The inheritable tags to apply to all runs. Defaults to None. Returns: AsyncCallbackManager: The async callback manager for the chain group. Example: >>> async with atrace_as_chain_group("group_name") as manager: ... # Use the async callback manager for the chain group ... await llm.apredict("Foo", callbacks=manager) """ cb = LangChainTracer( project_name=project_name, example_id=example_id, ) cm = AsyncCallbackManager.configure( inheritable_callbacks=[cb], inheritable_tags=tags ) run_manager = await cm.on_chain_start({"name": group_name}, {}) try: yield run_manager.get_child() finally:
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-5
try: yield run_manager.get_child() finally: await run_manager.on_chain_end({}) def _handle_event( handlers: List[BaseCallbackHandler], event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: """Generic event handler for CallbackManager.""" message_strings: Optional[List[str]] = None for handler in handlers: try: if ignore_condition_name is None or not getattr( handler, ignore_condition_name ): getattr(handler, event_name)(*args, **kwargs) except NotImplementedError as e: if event_name == "on_chat_model_start": if message_strings is None: message_strings = [get_buffer_string(m) for m in args[1]] _handle_event( [handler], "on_llm_start", "ignore_llm", args[0], message_strings, *args[2:], **kwargs, ) else: logger.warning( f"Error in {handler.__class__.__name__}.{event_name} callback: {e}" ) except Exception as e: logger.warning( f"Error in {handler.__class__.__name__}.{event_name} callback: {e}" ) if handler.raise_error: raise e async def _ahandle_event_for_handler( handler: BaseCallbackHandler, event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: try: if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-6
if ignore_condition_name is None or not getattr(handler, ignore_condition_name): event = getattr(handler, event_name) if asyncio.iscoroutinefunction(event): await event(*args, **kwargs) else: if handler.run_inline: event(*args, **kwargs) else: await asyncio.get_event_loop().run_in_executor( None, functools.partial(event, *args, **kwargs) ) except NotImplementedError as e: if event_name == "on_chat_model_start": message_strings = [get_buffer_string(m) for m in args[1]] await _ahandle_event_for_handler( handler, "on_llm_start", "ignore_llm", args[0], message_strings, *args[2:], **kwargs, ) else: logger.warning( f"Error in {handler.__class__.__name__}.{event_name} callback: {e}" ) except Exception as e: logger.warning( f"Error in {handler.__class__.__name__}.{event_name} callback: {e}" ) if handler.raise_error: raise e async def _ahandle_event( handlers: List[BaseCallbackHandler], event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: """Generic event handler for AsyncCallbackManager.""" for handler in [h for h in handlers if h.run_inline]: await _ahandle_event_for_handler( handler, event_name, ignore_condition_name, *args, **kwargs ) await asyncio.gather( *(
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-7
) await asyncio.gather( *( _ahandle_event_for_handler( handler, event_name, ignore_condition_name, *args, **kwargs ) for handler in handlers if not handler.run_inline ) ) BRM = TypeVar("BRM", bound="BaseRunManager") class BaseRunManager(RunManagerMixin): """Base class for run manager (a bound callback manager).""" def __init__( self, *, run_id: UUID, handlers: List[BaseCallbackHandler], inheritable_handlers: List[BaseCallbackHandler], parent_run_id: Optional[UUID] = None, tags: List[str], inheritable_tags: List[str], ) -> None: """Initialize the run manager. Args: run_id (UUID): The ID of the run. handlers (List[BaseCallbackHandler]): The list of handlers. inheritable_handlers (List[BaseCallbackHandler]): The list of inheritable handlers. parent_run_id (UUID, optional): The ID of the parent run. Defaults to None. tags (List[str]): The list of tags. inheritable_tags (List[str]): The list of inheritable tags. """ self.run_id = run_id self.handlers = handlers self.inheritable_handlers = inheritable_handlers self.tags = tags self.inheritable_tags = inheritable_tags self.parent_run_id = parent_run_id @classmethod def get_noop_manager(cls: Type[BRM]) -> BRM: """Return a manager that doesn't perform any operations. Returns: BaseRunManager: The noop manager. """
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-8
Returns: BaseRunManager: The noop manager. """ return cls( run_id=uuid4(), handlers=[], inheritable_handlers=[], tags=[], inheritable_tags=[], ) class RunManager(BaseRunManager): """Sync Run Manager.""" def on_text( self, text: str, **kwargs: Any, ) -> Any: """Run when text is received. Args: text (str): The received text. Returns: Any: The result of the callback. """ _handle_event( self.handlers, "on_text", None, text, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncRunManager(BaseRunManager): """Async Run Manager.""" async def on_text( self, text: str, **kwargs: Any, ) -> Any: """Run when text is received. Args: text (str): The received text. Returns: Any: The result of the callback. """ await _ahandle_event( self.handlers, "on_text", None, text, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class CallbackManagerForLLMRun(RunManager, LLMManagerMixin): """Callback manager for LLM run.""" def on_llm_new_token( self, token: str, **kwargs: Any, ) -> None: """Run when LLM generates a new token.
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-9
) -> None: """Run when LLM generates a new token. Args: token (str): The new token. """ _handle_event( self.handlers, "on_llm_new_token", "ignore_llm", token=token, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running. Args: response (LLMResult): The LLM result. """ _handle_event( self.handlers, "on_llm_end", "ignore_llm", response, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when LLM errors. Args: error (Exception or KeyboardInterrupt): The error. """ _handle_event( self.handlers, "on_llm_error", "ignore_llm", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin): """Async callback manager for LLM run.""" async def on_llm_new_token( self, token: str, **kwargs: Any, ) -> None: """Run when LLM generates a new token. Args:
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-10
"""Run when LLM generates a new token. Args: token (str): The new token. """ await _ahandle_event( self.handlers, "on_llm_new_token", "ignore_llm", token, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running. Args: response (LLMResult): The LLM result. """ await _ahandle_event( self.handlers, "on_llm_end", "ignore_llm", response, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when LLM errors. Args: error (Exception or KeyboardInterrupt): The error. """ await _ahandle_event( self.handlers, "on_llm_error", "ignore_llm", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class CallbackManagerForChainRun(RunManager, ChainManagerMixin): """Callback manager for chain run.""" def get_child(self, tag: Optional[str] = None) -> CallbackManager: """Get a child callback manager. Args: tag (str, optional): The tag for the child callback manager. Defaults to None.
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-11
Defaults to None. Returns: CallbackManager: The child callback manager. """ manager = CallbackManager(handlers=[], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) if tag is not None: manager.add_tags([tag], False) return manager def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running. Args: outputs (Dict[str, Any]): The outputs of the chain. """ _handle_event( self.handlers, "on_chain_end", "ignore_chain", outputs, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when chain errors. Args: error (Exception or KeyboardInterrupt): The error. """ _handle_event( self.handlers, "on_chain_error", "ignore_chain", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run when agent action is received. Args: action (AgentAction): The agent action. Returns: Any: The result of the callback. """ _handle_event( self.handlers, "on_agent_action", "ignore_agent", action,
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-12
"on_agent_action", "ignore_agent", action, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: """Run when agent finish is received. Args: finish (AgentFinish): The agent finish. Returns: Any: The result of the callback. """ _handle_event( self.handlers, "on_agent_finish", "ignore_agent", finish, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncCallbackManagerForChainRun(AsyncRunManager, ChainManagerMixin): """Async callback manager for chain run.""" def get_child(self, tag: Optional[str] = None) -> AsyncCallbackManager: """Get a child callback manager. Args: tag (str, optional): The tag for the child callback manager. Defaults to None. Returns: AsyncCallbackManager: The child callback manager. """ manager = AsyncCallbackManager(handlers=[], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) if tag is not None: manager.add_tags([tag], False) return manager async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running. Args: outputs (Dict[str, Any]): The outputs of the chain. """ await _ahandle_event( self.handlers, "on_chain_end", "ignore_chain",
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-13
self.handlers, "on_chain_end", "ignore_chain", outputs, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when chain errors. Args: error (Exception or KeyboardInterrupt): The error. """ await _ahandle_event( self.handlers, "on_chain_error", "ignore_chain", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run when agent action is received. Args: action (AgentAction): The agent action. Returns: Any: The result of the callback. """ await _ahandle_event( self.handlers, "on_agent_action", "ignore_agent", action, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: """Run when agent finish is received. Args: finish (AgentFinish): The agent finish. Returns: Any: The result of the callback. """ await _ahandle_event( self.handlers, "on_agent_finish", "ignore_agent", finish, run_id=self.run_id, parent_run_id=self.parent_run_id,
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-14
run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class CallbackManagerForToolRun(RunManager, ToolManagerMixin): """Callback manager for tool run.""" def get_child(self, tag: Optional[str] = None) -> CallbackManager: """Get a child callback manager. Args: tag (str, optional): The tag for the child callback manager. Defaults to None. Returns: CallbackManager: The child callback manager. """ manager = CallbackManager(handlers=[], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) if tag is not None: manager.add_tags([tag], False) return manager def on_tool_end( self, output: str, **kwargs: Any, ) -> None: """Run when tool ends running. Args: output (str): The output of the tool. """ _handle_event( self.handlers, "on_tool_end", "ignore_agent", output, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when tool errors. Args: error (Exception or KeyboardInterrupt): The error. """ _handle_event( self.handlers, "on_tool_error", "ignore_agent", error, run_id=self.run_id, parent_run_id=self.parent_run_id,
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-15
run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncCallbackManagerForToolRun(AsyncRunManager, ToolManagerMixin): """Async callback manager for tool run.""" def get_child(self, tag: Optional[str] = None) -> AsyncCallbackManager: """Get a child callback manager. Args: tag (str, optional): The tag to add to the child callback manager. Defaults to None. Returns: AsyncCallbackManager: The child callback manager. """ manager = AsyncCallbackManager(handlers=[], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) if tag is not None: manager.add_tags([tag], False) return manager async def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running. Args: output (str): The output of the tool. """ await _ahandle_event( self.handlers, "on_tool_end", "ignore_agent", output, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when tool errors. Args: error (Exception or KeyboardInterrupt): The error. """ await _ahandle_event( self.handlers, "on_tool_error", "ignore_agent", error, run_id=self.run_id, parent_run_id=self.parent_run_id,
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-16
run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class CallbackManager(BaseCallbackManager): """Callback manager that can be used to handle callbacks from langchain.""" def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any, ) -> List[CallbackManagerForLLMRun]: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. prompts (List[str]): The list of prompts. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: List[CallbackManagerForLLMRun]: A callback manager for each prompt as an LLM run. """ managers = [] for prompt in prompts: run_id_ = uuid4() _handle_event( self.handlers, "on_llm_start", "ignore_llm", serialized, [prompt], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) managers.append( CallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) ) return managers def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs: Any,
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-17
messages: List[List[BaseMessage]], **kwargs: Any, ) -> List[CallbackManagerForLLMRun]: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. messages (List[List[BaseMessage]]): The list of messages. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: List[CallbackManagerForLLMRun]: A callback manager for each list of messages as an LLM run. """ managers = [] for message_list in messages: run_id_ = uuid4() _handle_event( self.handlers, "on_chat_model_start", "ignore_chat_model", serialized, [message_list], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) managers.append( CallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) ) return managers def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForChainRun: """Run when chain starts running. Args: serialized (Dict[str, Any]): The serialized chain. inputs (Dict[str, Any]): The inputs to the chain.
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-18
inputs (Dict[str, Any]): The inputs to the chain. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: CallbackManagerForChainRun: The callback manager for the chain run. """ if run_id is None: run_id = uuid4() _handle_event( self.handlers, "on_chain_start", "ignore_chain", serialized, inputs, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) return CallbackManagerForChainRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) def on_tool_start( self, serialized: Dict[str, Any], input_str: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForToolRun: """Run when tool starts running. Args: serialized (Dict[str, Any]): The serialized tool. input_str (str): The input to the tool. run_id (UUID, optional): The ID of the run. Defaults to None. parent_run_id (UUID, optional): The ID of the parent run. Defaults to None. Returns: CallbackManagerForToolRun: The callback manager for the tool run. """ if run_id is None: run_id = uuid4() _handle_event( self.handlers,
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-19
run_id = uuid4() _handle_event( self.handlers, "on_tool_start", "ignore_agent", serialized, input_str, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) return CallbackManagerForToolRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) @classmethod def configure( cls, inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, inheritable_tags: Optional[List[str]] = None, local_tags: Optional[List[str]] = None, ) -> CallbackManager: """Configure the callback manager. Args: inheritable_callbacks (Optional[Callbacks], optional): The inheritable callbacks. Defaults to None. local_callbacks (Optional[Callbacks], optional): The local callbacks. Defaults to None. verbose (bool, optional): Whether to enable verbose mode. Defaults to False. inheritable_tags (Optional[List[str]], optional): The inheritable tags. Defaults to None. local_tags (Optional[List[str]], optional): The local tags. Defaults to None. Returns: CallbackManager: The configured callback manager. """ return _configure( cls, inheritable_callbacks, local_callbacks, verbose, inheritable_tags, local_tags, ) class AsyncCallbackManager(BaseCallbackManager):
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-20
local_tags, ) class AsyncCallbackManager(BaseCallbackManager): """Async callback manager that can be used to handle callbacks from LangChain.""" @property def is_async(self) -> bool: """Return whether the handler is async.""" return True async def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any, ) -> List[AsyncCallbackManagerForLLMRun]: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. prompts (List[str]): The list of prompts. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: List[AsyncCallbackManagerForLLMRun]: The list of async callback managers, one for each LLM Run corresponding to each prompt. """ tasks = [] managers = [] for prompt in prompts: run_id_ = uuid4() tasks.append( _ahandle_event( self.handlers, "on_llm_start", "ignore_llm", serialized, [prompt], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) ) managers.append( AsyncCallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) ) await asyncio.gather(*tasks) return managers
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-21
) ) await asyncio.gather(*tasks) return managers async def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs: Any, ) -> Any: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. messages (List[List[BaseMessage]]): The list of messages. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: List[AsyncCallbackManagerForLLMRun]: The list of async callback managers, one for each LLM Run corresponding to each inner message list. """ tasks = [] managers = [] for message_list in messages: run_id_ = uuid4() tasks.append( _ahandle_event( self.handlers, "on_chat_model_start", "ignore_chat_model", serialized, [message_list], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) ) managers.append( AsyncCallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) ) await asyncio.gather(*tasks) return managers async def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any],
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-22
serialized: Dict[str, Any], inputs: Dict[str, Any], run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForChainRun: """Run when chain starts running. Args: serialized (Dict[str, Any]): The serialized chain. inputs (Dict[str, Any]): The inputs to the chain. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: AsyncCallbackManagerForChainRun: The async callback manager for the chain run. """ if run_id is None: run_id = uuid4() await _ahandle_event( self.handlers, "on_chain_start", "ignore_chain", serialized, inputs, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) return AsyncCallbackManagerForChainRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) async def on_tool_start( self, serialized: Dict[str, Any], input_str: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForToolRun: """Run when tool starts running. Args: serialized (Dict[str, Any]): The serialized tool. input_str (str): The input to the tool.
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-23
input_str (str): The input to the tool. run_id (UUID, optional): The ID of the run. Defaults to None. parent_run_id (UUID, optional): The ID of the parent run. Defaults to None. Returns: AsyncCallbackManagerForToolRun: The async callback manager for the tool run. """ if run_id is None: run_id = uuid4() await _ahandle_event( self.handlers, "on_tool_start", "ignore_agent", serialized, input_str, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) return AsyncCallbackManagerForToolRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) @classmethod def configure( cls, inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, inheritable_tags: Optional[List[str]] = None, local_tags: Optional[List[str]] = None, ) -> AsyncCallbackManager: """Configure the async callback manager. Args: inheritable_callbacks (Optional[Callbacks], optional): The inheritable callbacks. Defaults to None. local_callbacks (Optional[Callbacks], optional): The local callbacks. Defaults to None. verbose (bool, optional): Whether to enable verbose mode. Defaults to False. inheritable_tags (Optional[List[str]], optional): The inheritable tags. Defaults to None.
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-24
Defaults to None. local_tags (Optional[List[str]], optional): The local tags. Defaults to None. Returns: AsyncCallbackManager: The configured async callback manager. """ return _configure( cls, inheritable_callbacks, local_callbacks, verbose, inheritable_tags, local_tags, ) T = TypeVar("T", CallbackManager, AsyncCallbackManager) def env_var_is_set(env_var: str) -> bool: """Check if an environment variable is set. Args: env_var (str): The name of the environment variable. Returns: bool: True if the environment variable is set, False otherwise. """ return env_var in os.environ and os.environ[env_var] not in ( "", "0", "false", "False", ) def _configure( callback_manager_cls: Type[T], inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, inheritable_tags: Optional[List[str]] = None, local_tags: Optional[List[str]] = None, ) -> T: """Configure the callback manager. Args: callback_manager_cls (Type[T]): The callback manager class. inheritable_callbacks (Optional[Callbacks], optional): The inheritable callbacks. Defaults to None. local_callbacks (Optional[Callbacks], optional): The local callbacks. Defaults to None. verbose (bool, optional): Whether to enable verbose mode. Defaults to False. inheritable_tags (Optional[List[str]], optional): The inheritable tags. Defaults to None.
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-25
Defaults to None. local_tags (Optional[List[str]], optional): The local tags. Defaults to None. Returns: T: The configured callback manager. """ callback_manager = callback_manager_cls(handlers=[]) if inheritable_callbacks or local_callbacks: if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None: inheritable_callbacks_ = inheritable_callbacks or [] callback_manager = callback_manager_cls( handlers=inheritable_callbacks_.copy(), inheritable_handlers=inheritable_callbacks_.copy(), ) else: callback_manager = callback_manager_cls( handlers=inheritable_callbacks.handlers, inheritable_handlers=inheritable_callbacks.inheritable_handlers, parent_run_id=inheritable_callbacks.parent_run_id, tags=inheritable_callbacks.tags, inheritable_tags=inheritable_callbacks.inheritable_tags, ) local_handlers_ = ( local_callbacks if isinstance(local_callbacks, list) else (local_callbacks.handlers if local_callbacks else []) ) for handler in local_handlers_: callback_manager.add_handler(handler, False) if inheritable_tags or local_tags: callback_manager.add_tags(inheritable_tags or []) callback_manager.add_tags(local_tags or [], False) tracer = tracing_callback_var.get() wandb_tracer = wandb_tracing_callback_var.get() open_ai = openai_callback_var.get() tracing_enabled_ = ( env_var_is_set("LANGCHAIN_TRACING") or tracer is not None or env_var_is_set("LANGCHAIN_HANDLER") ) wandb_tracing_enabled_ = ( env_var_is_set("LANGCHAIN_WANDB_TRACING") or wandb_tracer is not None
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-26
) tracer_v2 = tracing_v2_callback_var.get() tracing_v2_enabled_ = ( env_var_is_set("LANGCHAIN_TRACING_V2") or tracer_v2 is not None ) tracer_project = os.environ.get( "LANGCHAIN_PROJECT", os.environ.get("LANGCHAIN_SESSION", "default") ) debug = _get_debug() if ( verbose or debug or tracing_enabled_ or tracing_v2_enabled_ or wandb_tracing_enabled_ or open_ai is not None ): if verbose and not any( isinstance(handler, StdOutCallbackHandler) for handler in callback_manager.handlers ): if debug: pass else: callback_manager.add_handler(StdOutCallbackHandler(), False) if debug and not any( isinstance(handler, ConsoleCallbackHandler) for handler in callback_manager.handlers ): callback_manager.add_handler(ConsoleCallbackHandler(), True) if tracing_enabled_ and not any( isinstance(handler, LangChainTracerV1) for handler in callback_manager.handlers ): if tracer: callback_manager.add_handler(tracer, True) else: handler = LangChainTracerV1() handler.load_session(tracer_project) callback_manager.add_handler(handler, True) if wandb_tracing_enabled_ and not any( isinstance(handler, WandbTracer) for handler in callback_manager.handlers ): if wandb_tracer: callback_manager.add_handler(wandb_tracer, True) else: handler = WandbTracer() callback_manager.add_handler(handler, True) if tracing_v2_enabled_ and not any(
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
af1e87a33343-27
if tracing_v2_enabled_ and not any( isinstance(handler, LangChainTracer) for handler in callback_manager.handlers ): if tracer_v2: callback_manager.add_handler(tracer_v2, True) else: try: handler = LangChainTracer(project_name=tracer_project) callback_manager.add_handler(handler, True) except Exception as e: logger.warning( "Unable to load requested LangChainTracer." " To disable this warning," " unset the LANGCHAIN_TRACING_V2 environment variables.", e, ) if open_ai is not None and not any( isinstance(handler, OpenAICallbackHandler) for handler in callback_manager.handlers ): callback_manager.add_handler(open_ai, True) return callback_manager
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/manager.html
c630145c3a58-0
Source code for langchain.callbacks.argilla_callback import os import warnings from typing import Any, Dict, List, Optional, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.schema import AgentAction, AgentFinish, LLMResult [docs]class ArgillaCallbackHandler(BaseCallbackHandler): """Callback Handler that logs into Argilla. Args: dataset_name: name of the `FeedbackDataset` in Argilla. Note that it must exist in advance. If you need help on how to create a `FeedbackDataset` in Argilla, please visit https://docs.argilla.io/en/latest/guides/llms/practical_guides/use_argilla_callback_in_langchain.html. workspace_name: name of the workspace in Argilla where the specified `FeedbackDataset` lives in. Defaults to `None`, which means that the default workspace will be used. api_url: URL of the Argilla Server that we want to use, and where the `FeedbackDataset` lives in. Defaults to `None`, which means that either `ARGILLA_API_URL` environment variable or the default http://localhost:6900 will be used. api_key: API Key to connect to the Argilla Server. Defaults to `None`, which means that either `ARGILLA_API_KEY` environment variable or the default `argilla.apikey` will be used. Raises: ImportError: if the `argilla` package is not installed. ConnectionError: if the connection to Argilla fails. FileNotFoundError: if the `FeedbackDataset` retrieval from Argilla fails. Examples: >>> from langchain.llms import OpenAI >>> from langchain.callbacks import ArgillaCallbackHandler >>> argilla_callback = ArgillaCallbackHandler(
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/argilla_callback.html
c630145c3a58-1
>>> argilla_callback = ArgillaCallbackHandler( ... dataset_name="my-dataset", ... workspace_name="my-workspace", ... api_url="http://localhost:6900", ... api_key="argilla.apikey", ... ) >>> llm = OpenAI( ... temperature=0, ... callbacks=[argilla_callback], ... verbose=True, ... openai_api_key="API_KEY_HERE", ... ) >>> llm.generate([ ... "What is the best NLP-annotation tool out there? (no bias at all)", ... ]) "Argilla, no doubt about it." """ def __init__( self, dataset_name: str, workspace_name: Optional[str] = None, api_url: Optional[str] = None, api_key: Optional[str] = None, ) -> None: """Initializes the `ArgillaCallbackHandler`. Args: dataset_name: name of the `FeedbackDataset` in Argilla. Note that it must exist in advance. If you need help on how to create a `FeedbackDataset` in Argilla, please visit https://docs.argilla.io/en/latest/guides/llms/practical_guides/use_argilla_callback_in_langchain.html. workspace_name: name of the workspace in Argilla where the specified `FeedbackDataset` lives in. Defaults to `None`, which means that the default workspace will be used. api_url: URL of the Argilla Server that we want to use, and where the `FeedbackDataset` lives in. Defaults to `None`, which means that either
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/argilla_callback.html
c630145c3a58-2
`FeedbackDataset` lives in. Defaults to `None`, which means that either `ARGILLA_API_URL` environment variable or the default http://localhost:6900 will be used. api_key: API Key to connect to the Argilla Server. Defaults to `None`, which means that either `ARGILLA_API_KEY` environment variable or the default `argilla.apikey` will be used. Raises: ImportError: if the `argilla` package is not installed. ConnectionError: if the connection to Argilla fails. FileNotFoundError: if the `FeedbackDataset` retrieval from Argilla fails. """ super().__init__() # Import Argilla (not via `import_argilla` to keep hints in IDEs) try: import argilla as rg # noqa: F401 except ImportError: raise ImportError( "To use the Argilla callback manager you need to have the `argilla` " "Python package installed. Please install it with `pip install argilla`" ) # Show a warning message if Argilla will assume the default values will be used if api_url is None and os.getenv("ARGILLA_API_URL") is None: warnings.warn( ( "Since `api_url` is None, and the env var `ARGILLA_API_URL` is not" " set, it will default to `http://localhost:6900`." ), ) if api_key is None and os.getenv("ARGILLA_API_KEY") is None: warnings.warn( ( "Since `api_key` is None, and the env var `ARGILLA_API_KEY` is not" " set, it will default to `argilla.apikey`."
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/argilla_callback.html
c630145c3a58-3
" set, it will default to `argilla.apikey`." ), ) # Connect to Argilla with the provided credentials, if applicable try: rg.init( api_key=api_key, api_url=api_url, ) except Exception as e: raise ConnectionError( f"Could not connect to Argilla with exception: '{e}'.\n" "Please check your `api_key` and `api_url`, and make sure that " "the Argilla server is up and running. If the problem persists " "please report it to https://github.com/argilla-io/argilla/issues " "with the label `langchain`." ) from e # Set the Argilla variables self.dataset_name = dataset_name self.workspace_name = workspace_name or rg.get_workspace() # Retrieve the `FeedbackDataset` from Argilla (without existing records) try: self.dataset = rg.FeedbackDataset.from_argilla( name=self.dataset_name, workspace=self.workspace_name, with_records=False, ) except Exception as e: raise FileNotFoundError( "`FeedbackDataset` retrieval from Argilla failed with exception:" f" '{e}'.\nPlease check that the dataset with" f" name={self.dataset_name} in the" f" workspace={self.workspace_name} exists in advance. If you need help" " on how to create a `langchain`-compatible `FeedbackDataset` in" " Argilla, please visit" " https://docs.argilla.io/en/latest/guides/llms/practical_guides/use_argilla_callback_in_langchain.html." # noqa: E501
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/argilla_callback.html
c630145c3a58-4
" If the problem persists please report it to" " https://github.com/argilla-io/argilla/issues with the label" " `langchain`." ) from e supported_fields = ["prompt", "response"] if supported_fields != [field.name for field in self.dataset.fields]: raise ValueError( f"`FeedbackDataset` with name={self.dataset_name} in the" f" workspace={self.workspace_name} " "had fields that are not supported yet for the `langchain` integration." " Supported fields are: " f"{supported_fields}, and the current `FeedbackDataset` fields are" f" {[field.name for field in self.dataset.fields]}. " "For more information on how to create a `langchain`-compatible" " `FeedbackDataset` in Argilla, please visit" " https://docs.argilla.io/en/latest/guides/llms/practical_guides/use_argilla_callback_in_langchain.html." # noqa: E501 ) self.prompts: Dict[str, List[str]] = {} warnings.warn( ( "The `ArgillaCallbackHandler` is currently in beta and is subject to " "change based on updates to `langchain`. Please report any issues to " "https://github.com/argilla-io/argilla/issues with the tag `langchain`." ), ) [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Save the prompts in memory when an LLM starts.""" self.prompts.update({str(kwargs["parent_run_id"] or kwargs["run_id"]): prompts})
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/argilla_callback.html
c630145c3a58-5
[docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Do nothing when a new token is generated.""" pass [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Log records to Argilla when an LLM ends.""" # Do nothing if there's a parent_run_id, since we will log the records when # the chain ends if kwargs["parent_run_id"]: return # Creates the records and adds them to the `FeedbackDataset` prompts = self.prompts[str(kwargs["run_id"])] for prompt, generations in zip(prompts, response.generations): self.dataset.add_records( records=[ { "fields": { "prompt": prompt, "response": generation.text.strip(), }, } for generation in generations ] ) # Push the records to Argilla self.dataset.push_to_argilla() # Pop current run from `self.runs` self.prompts.pop(str(kwargs["run_id"])) [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing when LLM outputs an error.""" pass [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """If the key `input` is in `inputs`, then save it in `self.prompts` using either the `parent_run_id` or the `run_id` as the key. This is done so that
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/argilla_callback.html
c630145c3a58-6
we don't log the same input prompt twice, once when the LLM starts and once when the chain starts. """ if "input" in inputs: self.prompts.update( { str(kwargs["parent_run_id"] or kwargs["run_id"]): ( inputs["input"] if isinstance(inputs["input"], list) else [inputs["input"]] ) } ) [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """If either the `parent_run_id` or the `run_id` is in `self.prompts`, then log the outputs to Argilla, and pop the run from `self.prompts`. The behavior differs if the output is a list or not. """ if not any( key in self.prompts for key in [str(kwargs["parent_run_id"]), str(kwargs["run_id"])] ): return prompts = self.prompts.get(str(kwargs["parent_run_id"])) or self.prompts.get( str(kwargs["run_id"]) ) for chain_output_key, chain_output_val in outputs.items(): if isinstance(chain_output_val, list): # Creates the records and adds them to the `FeedbackDataset` self.dataset.add_records( records=[ { "fields": { "prompt": prompt, "response": output["text"].strip(), }, } for prompt, output in zip( prompts, chain_output_val # type: ignore ) ] ) else: # Creates the records and adds them to the `FeedbackDataset` self.dataset.add_records( records=[
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/argilla_callback.html
c630145c3a58-7
self.dataset.add_records( records=[ { "fields": { "prompt": " ".join(prompts), # type: ignore "response": chain_output_val.strip(), }, } ] ) # Push the records to Argilla self.dataset.push_to_argilla() # Pop current run from `self.runs` if str(kwargs["parent_run_id"]) in self.prompts: self.prompts.pop(str(kwargs["parent_run_id"])) if str(kwargs["run_id"]) in self.prompts: self.prompts.pop(str(kwargs["run_id"])) [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing when LLM chain outputs an error.""" pass [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any, ) -> None: """Do nothing when tool starts.""" pass [docs] def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Do nothing when agent takes a specific action.""" pass [docs] def on_tool_end( self, output: str, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: """Do nothing when tool ends.""" pass [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing when tool outputs an error.""" pass
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/argilla_callback.html
c630145c3a58-8
) -> None: """Do nothing when tool outputs an error.""" pass [docs] def on_text(self, text: str, **kwargs: Any) -> None: """Do nothing""" pass [docs] def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Do nothing""" pass
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/argilla_callback.html
aa6f172b81d3-0
Source code for langchain.callbacks.aim_callback from copy import deepcopy from typing import Any, Dict, List, Optional, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.schema import AgentAction, AgentFinish, LLMResult def import_aim() -> Any: """Import the aim python package and raise an error if it is not installed.""" try: import aim except ImportError: raise ImportError( "To use the Aim callback manager you need to have the" " `aim` python package installed." "Please install it with `pip install aim`" ) return aim class BaseMetadataCallbackHandler: """This class handles the metadata and associated function states for callbacks. Attributes: step (int): The current step. starts (int): The number of times the start method has been called. ends (int): The number of times the end method has been called. errors (int): The number of times the error method has been called. text_ctr (int): The number of times the text method has been called. ignore_llm_ (bool): Whether to ignore llm callbacks. ignore_chain_ (bool): Whether to ignore chain callbacks. ignore_agent_ (bool): Whether to ignore agent callbacks. always_verbose_ (bool): Whether to always be verbose. chain_starts (int): The number of times the chain start method has been called. chain_ends (int): The number of times the chain end method has been called. llm_starts (int): The number of times the llm start method has been called. llm_ends (int): The number of times the llm end method has been called.
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/aim_callback.html
aa6f172b81d3-1
llm_streams (int): The number of times the text method has been called. tool_starts (int): The number of times the tool start method has been called. tool_ends (int): The number of times the tool end method has been called. agent_ends (int): The number of times the agent end method has been called. """ def __init__(self) -> None: self.step = 0 self.starts = 0 self.ends = 0 self.errors = 0 self.text_ctr = 0 self.ignore_llm_ = False self.ignore_chain_ = False self.ignore_agent_ = False self.always_verbose_ = False self.chain_starts = 0 self.chain_ends = 0 self.llm_starts = 0 self.llm_ends = 0 self.llm_streams = 0 self.tool_starts = 0 self.tool_ends = 0 self.agent_ends = 0 @property def always_verbose(self) -> bool: """Whether to call verbose callbacks even if verbose is False.""" return self.always_verbose_ @property def ignore_llm(self) -> bool: """Whether to ignore LLM callbacks.""" return self.ignore_llm_ @property def ignore_chain(self) -> bool: """Whether to ignore chain callbacks.""" return self.ignore_chain_ @property def ignore_agent(self) -> bool: """Whether to ignore agent callbacks.""" return self.ignore_agent_ def get_custom_callback_meta(self) -> Dict[str, Any]: return { "step": self.step, "starts": self.starts,
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/aim_callback.html
aa6f172b81d3-2
"step": self.step, "starts": self.starts, "ends": self.ends, "errors": self.errors, "text_ctr": self.text_ctr, "chain_starts": self.chain_starts, "chain_ends": self.chain_ends, "llm_starts": self.llm_starts, "llm_ends": self.llm_ends, "llm_streams": self.llm_streams, "tool_starts": self.tool_starts, "tool_ends": self.tool_ends, "agent_ends": self.agent_ends, } def reset_callback_meta(self) -> None: """Reset the callback metadata.""" self.step = 0 self.starts = 0 self.ends = 0 self.errors = 0 self.text_ctr = 0 self.ignore_llm_ = False self.ignore_chain_ = False self.ignore_agent_ = False self.always_verbose_ = False self.chain_starts = 0 self.chain_ends = 0 self.llm_starts = 0 self.llm_ends = 0 self.llm_streams = 0 self.tool_starts = 0 self.tool_ends = 0 self.agent_ends = 0 return None [docs]class AimCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler): """Callback Handler that logs to Aim. Parameters: repo (:obj:`str`, optional): Aim repository path or Repo object to which Run object is bound. If skipped, default Repo is used. experiment_name (:obj:`str`, optional): Sets Run's `experiment` property. 'default' if not specified. Can be used later to query runs/sequences.
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/aim_callback.html
aa6f172b81d3-3
'default' if not specified. Can be used later to query runs/sequences. system_tracking_interval (:obj:`int`, optional): Sets the tracking interval in seconds for system usage metrics (CPU, Memory, etc.). Set to `None` to disable system metrics tracking. log_system_params (:obj:`bool`, optional): Enable/Disable logging of system params such as installed packages, git info, environment variables, etc. This handler will utilize the associated callback method called and formats the input of each callback function with metadata regarding the state of LLM run and then logs the response to Aim. """ def __init__( self, repo: Optional[str] = None, experiment_name: Optional[str] = None, system_tracking_interval: Optional[int] = 10, log_system_params: bool = True, ) -> None: """Initialize callback handler.""" super().__init__() aim = import_aim() self.repo = repo self.experiment_name = experiment_name self.system_tracking_interval = system_tracking_interval self.log_system_params = log_system_params self._run = aim.Run( repo=self.repo, experiment=self.experiment_name, system_tracking_interval=self.system_tracking_interval, log_system_params=self.log_system_params, ) self._run_hash = self._run.hash self.action_records: list = [] [docs] def setup(self, **kwargs: Any) -> None: aim = import_aim() if not self._run: if self._run_hash: self._run = aim.Run( self._run_hash, repo=self.repo, system_tracking_interval=self.system_tracking_interval, )
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/aim_callback.html
aa6f172b81d3-4
repo=self.repo, system_tracking_interval=self.system_tracking_interval, ) else: self._run = aim.Run( repo=self.repo, experiment=self.experiment_name, system_tracking_interval=self.system_tracking_interval, log_system_params=self.log_system_params, ) self._run_hash = self._run.hash if kwargs: for key, value in kwargs.items(): self._run.set(key, value, strict=False) [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Run when LLM starts.""" aim = import_aim() self.step += 1 self.llm_starts += 1 self.starts += 1 resp = {"action": "on_llm_start"} resp.update(self.get_custom_callback_meta()) prompts_res = deepcopy(prompts) self._run.track( [aim.Text(prompt) for prompt in prompts_res], name="on_llm_start", context=resp, ) [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running.""" aim = import_aim() self.step += 1 self.llm_ends += 1 self.ends += 1 resp = {"action": "on_llm_end"} resp.update(self.get_custom_callback_meta()) response_res = deepcopy(response) generated = [ aim.Text(generation.text) for generations in response_res.generations for generation in generations ] self._run.track( generated,
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/aim_callback.html
aa6f172b81d3-5
for generation in generations ] self._run.track( generated, name="on_llm_end", context=resp, ) [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Run when LLM generates a new token.""" self.step += 1 self.llm_streams += 1 [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when LLM errors.""" self.step += 1 self.errors += 1 [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Run when chain starts running.""" aim = import_aim() self.step += 1 self.chain_starts += 1 self.starts += 1 resp = {"action": "on_chain_start"} resp.update(self.get_custom_callback_meta()) inputs_res = deepcopy(inputs) self._run.track( aim.Text(inputs_res["input"]), name="on_chain_start", context=resp ) [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running.""" aim = import_aim() self.step += 1 self.chain_ends += 1 self.ends += 1 resp = {"action": "on_chain_end"} resp.update(self.get_custom_callback_meta()) outputs_res = deepcopy(outputs) self._run.track(
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/aim_callback.html
aa6f172b81d3-6
outputs_res = deepcopy(outputs) self._run.track( aim.Text(outputs_res["output"]), name="on_chain_end", context=resp ) [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when chain errors.""" self.step += 1 self.errors += 1 [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any ) -> None: """Run when tool starts running.""" aim = import_aim() self.step += 1 self.tool_starts += 1 self.starts += 1 resp = {"action": "on_tool_start"} resp.update(self.get_custom_callback_meta()) self._run.track(aim.Text(input_str), name="on_tool_start", context=resp) [docs] def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running.""" aim = import_aim() self.step += 1 self.tool_ends += 1 self.ends += 1 resp = {"action": "on_tool_end"} resp.update(self.get_custom_callback_meta()) self._run.track(aim.Text(output), name="on_tool_end", context=resp) [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when tool errors.""" self.step += 1 self.errors += 1 [docs] def on_text(self, text: str, **kwargs: Any) -> None: """
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/aim_callback.html
aa6f172b81d3-7
""" Run when agent is ending. """ self.step += 1 self.text_ctr += 1 [docs] def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Run when agent ends running.""" aim = import_aim() self.step += 1 self.agent_ends += 1 self.ends += 1 resp = {"action": "on_agent_finish"} resp.update(self.get_custom_callback_meta()) finish_res = deepcopy(finish) text = "OUTPUT:\n{}\n\nLOG:\n{}".format( finish_res.return_values["output"], finish_res.log ) self._run.track(aim.Text(text), name="on_agent_finish", context=resp) [docs] def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run on agent action.""" aim = import_aim() self.step += 1 self.tool_starts += 1 self.starts += 1 resp = { "action": "on_agent_action", "tool": action.tool, } resp.update(self.get_custom_callback_meta()) action_res = deepcopy(action) text = "TOOL INPUT:\n{}\n\nLOG:\n{}".format( action_res.tool_input, action_res.log ) self._run.track(aim.Text(text), name="on_agent_action", context=resp) [docs] def flush_tracker( self, repo: Optional[str] = None, experiment_name: Optional[str] = None, system_tracking_interval: Optional[int] = 10, log_system_params: bool = True,
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/aim_callback.html
aa6f172b81d3-8
log_system_params: bool = True, langchain_asset: Any = None, reset: bool = True, finish: bool = False, ) -> None: """Flush the tracker and reset the session. Args: repo (:obj:`str`, optional): Aim repository path or Repo object to which Run object is bound. If skipped, default Repo is used. experiment_name (:obj:`str`, optional): Sets Run's `experiment` property. 'default' if not specified. Can be used later to query runs/sequences. system_tracking_interval (:obj:`int`, optional): Sets the tracking interval in seconds for system usage metrics (CPU, Memory, etc.). Set to `None` to disable system metrics tracking. log_system_params (:obj:`bool`, optional): Enable/Disable logging of system params such as installed packages, git info, environment variables, etc. langchain_asset: The langchain asset to save. reset: Whether to reset the session. finish: Whether to finish the run. Returns: None """ if langchain_asset: try: for key, value in langchain_asset.dict().items(): self._run.set(key, value, strict=False) except Exception: pass if finish or reset: self._run.close() self.reset_callback_meta() if reset: self.__init__( # type: ignore repo=repo if repo else self.repo, experiment_name=experiment_name if experiment_name else self.experiment_name, system_tracking_interval=system_tracking_interval if system_tracking_interval else self.system_tracking_interval, log_system_params=log_system_params if log_system_params
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/aim_callback.html