Spaces:
Sleeping
Sleeping
| import os | |
| from autogen import AssistantAgent, UserProxyAgent | |
| import streamlit as st | |
| from autogen import ConversableAgent, UserProxyAgent | |
| from autogen.agentchat.contrib.capabilities.teachability import Teachability | |
| class TeachableAgent: | |
| def __init__(self,llm_config,problem): | |
| self.llm_config = llm_config | |
| self.problem = problem | |
| def start_chat(self): | |
| llm_config= st.session_state['llm_config'] | |
| problem = self.problem | |
| # Start by instantiating any agent that inherits from ConversableAgent. | |
| teachable_agent = ConversableAgent( | |
| name="teachable_agent", # The name is flexible, but should not contain spaces to work in group chat. | |
| llm_config=llm_config | |
| ) | |
| # Instantiate the Teachability capability. Its parameters are all optional. | |
| teachability = Teachability( | |
| verbosity=0, # 0 for basic info, 1 to add memory operations, 2 for analyzer messages, 3 for memo lists. | |
| reset_db=False, | |
| path_to_db_dir="./teachability_db", | |
| recall_threshold=1.5, # Higher numbers allow more (but less relevant) memos to be recalled. | |
| ) | |
| # Now add the Teachability capability to the agent. | |
| teachability.add_to_agent(teachable_agent) | |
| # Instantiate a UserProxyAgent to represent the user. But in this notebook, all user input will be simulated. | |
| user = UserProxyAgent( | |
| name="user", | |
| human_input_mode="NEVER", | |
| is_termination_msg=lambda x: True if "TERMINATE" in x.get("content") else False, | |
| max_consecutive_auto_reply=0, | |
| code_execution_config={ | |
| "use_docker": False | |
| }, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly. | |
| ) | |
| #clear_history = False - Teach | |
| response = user.initiate_chat(teachable_agent, message=problem, clear_history=st.session_state["Chat_Purpose"]) | |
| return response |