CrewAI / app.py
genaitiwari's picture
removed langchain groq
2eed61a
from src.agents.agents import CustomHandler
from configfile import Config
from src.llmconfig.groqllm import GroqLLM
from src.streamlitUI.loadui import LoadStreamlitUI
import os
import streamlit as st
from crewai import Crew, Process, Agent, Task
from langchain_core.callbacks import BaseCallbackHandler
from typing import Any, Dict
from src.task.tasks import CrewAITasks
from src.agents.agents import CrewAIAgents
from src.supportingtools.tools import SupportingTools
# MAIN Function START
if __name__ == "__main__":
# config
obj_config = Config()
# load ui
ui = LoadStreamlitUI()
user_input = ui.load_streamlit_ui()
# Initialize the message log in session state if not already present
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "What do you want us to write?"}]
# Display existing messages
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
# Handle user input
if prompt := st.chat_input():
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
# Configure LLM
obj_llm_config = GroqLLM(user_controls_input=user_input)
llm = obj_llm_config.groq_llm_config()
if user_input["selected_usecase"] == 'MultiAgent Coder':
# agents
obj_crewai_agents = CrewAIAgents(llm=llm)
lst_agents = obj_crewai_agents.crewai_agents()
# Define tasks for each agent
obj_crewai_tasks = CrewAITasks(llm=llm,prompt=prompt,lst_agents=lst_agents)
lst_tasks = obj_crewai_tasks.create_tasks()
# Set up the crew and process tasks hierarchically
project_crew = Crew(
tasks=lst_tasks,
agents=lst_agents,
process=Process.hierarchical,
manager_llm=llm,
manager_callbacks=[CustomHandler("Crew Manager")]
)
final = project_crew.kickoff()
elif user_input['selected_usecase']=='MultiAgent SearchTool':
print('ContentGen with Image')
# tools
# API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
obj_supporting_tools = SupportingTools()
search_tool = obj_supporting_tools.duckduckgosearchtool()
# agents
obj_crewai_agents = CrewAIAgents(llm=llm)
lst_agents = obj_crewai_agents.gen_with_crewai_agents(search_tool=search_tool)
# Define tasks for each agent
obj_crewai_tasks = CrewAITasks(llm=llm,prompt=prompt,lst_agents=lst_agents)
lst_tasks = obj_crewai_tasks.create_tasks_gen()
# Set up the crew and process tasks hierarchically
project_crew = Crew(
tasks=lst_tasks,
agents=lst_agents,
process=Process.hierarchical,
manager_llm=llm,
manager_callbacks=[CustomHandler("Crew Manager")]
)
final = project_crew.kickoff()
elif user_input['selected_usecase']=='MultiAgent Image':
print('ContentGen with Image')
# tools
API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
obj_supporting_tools = SupportingTools(API_URL=API_URL)
search_tool = obj_supporting_tools.duckduckgosearchtool()
# agents
obj_crewai_agents = CrewAIAgents(llm=llm)
lst_agents = obj_crewai_agents.gen_with_image_crewai_agents(image_generate=SupportingTools.image_generate)
# Define tasks for each agent
obj_crewai_tasks = CrewAITasks(llm=llm,prompt=prompt,lst_agents=lst_agents)
lst_tasks = obj_crewai_tasks.create_tasks_image_gen()
# Set up the crew and process tasks
project_crew = Crew(
tasks=[lst_tasks[1]],
agents=[lst_agents[1]],
process=Process.sequential,
manager_llm=llm,
manager_callbacks=[CustomHandler("Crew Manager")]
)
final = project_crew.kickoff({'input':prompt})
# Display the final result
result = f"## Here is the Final Result \n\n {final}"
st.session_state.messages.append({"role": "assistant", "content": result})
st.chat_message("assistant").write(result)