|
import gradio as gr |
|
|
|
|
|
def generate_outputs(user_prompt): |
|
report, recommendations, visualization = produce_outputs(combined_data) |
|
return report, recommendations, visualization |
|
|
|
from langchain.community import OpenAI |
|
from langchain.agents import TextProcessingAgent |
|
from dspy.agents import Agent |
|
from dspy.utils import spawn_processes |
|
|
|
|
|
openai = OpenAI(api_key="KEY") |
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_synthetic_data(prompt): |
|
response = openai.complete(prompt=prompt, engine="text-davinci-003", max_tokens=100) |
|
return response.choices[0].text |
|
|
|
|
|
|
|
class DataProcessingAgent(Agent): |
|
def __init__(self): |
|
super().__init__() |
|
|
|
def process(self, data): |
|
|
|
processed_data = data.lower().strip() |
|
return processed_data |
|
|
|
|
|
|
|
team = [ |
|
OpenAI(api_key="YOUR_OPENAI_API_KEY", engine="text-davinci-003"), |
|
DataProcessingAgent(), |
|
] |
|
|
|
|
|
|
|
message = f"{user_prompt}\n{generate_synthetic_data(f'Simulate scenarios for {user_prompt}')}" |
|
|
|
|
|
class GroupChatFSM: |
|
def __init__(self, teams_config): |
|
""" |
|
Initialize with configurations for teams. |
|
""" |
|
self.teams = {team_name: self.Team(team_agents) for team_name, team_agents in teams_config.items()} |
|
self.states = ["waiting", "interacting", "finalizing"] |
|
self.current_state = "waiting" |
|
|
|
def transition(self, to_state): |
|
""" |
|
Transition the state of the group chat based on FSM rules. |
|
""" |
|
if to_state in self.states: |
|
self.current_state = to_state |
|
else: |
|
raise ValueError("Invalid state transition attempted.") |
|
|
|
def broadcast(self, message): |
|
""" |
|
Broadcast a message to all teams based on the current FSM state. |
|
""" |
|
if self.current_state == "interacting": |
|
responses = {team_name: team.broadcast(message) for team_name, team in self.teams.items()} |
|
return responses |
|
else: |
|
return "The group chat is not in an interacting state." |
|
|
|
class Team: |
|
def __init__(self, agents_config): |
|
self.agents = [self.Agent(agent_config) for agent_config in agents_config] |
|
|
|
def broadcast(self, message): |
|
responses = [agent.respond(message) for agent in self.agents] |
|
return responses |
|
|
|
class Agent: |
|
def __init__(self, config): |
|
self.agent_name = config['agent_name'] |
|
self.api_key = config['api_key'] |
|
self.model = config['model'] |
|
|
|
def respond(self, message): |
|
return f"{self.agent_name} responding with {self.model}" |
|
|
|
|
|
def produce_outputs(processed_data): |
|
|
|
analysis = openai.complete(prompt=f"Analyze {processed_data}", engine="text-davinci-003", max_tokens=200) |
|
recommendations = openai.complete(prompt=f"Recommend strategies based on {processed_data}", engine="text-davinci-003", max_tokens=100) |
|
|
|
visualization = None |
|
return analysis.choices[0].text, recommendations.choices[0].text, visualization |
|
|
|
|
|
|
|
def generate_synthetic_data_distributed(prompt, num_nodes=3): |
|
|
|
processes = [spawn_processes(generate_synthetic_data, [f"Simulate scenarios for {prompt}"]) for _ in range(num_nodes)] |
|
|
|
|
|
synthetic_data_list = [] |
|
for process in processes: |
|
synthetic_data_list.extend(process.get()) |
|
|
|
|
|
return "\n".join(synthetic_data_list) |
|
|
|
|
|
|
|
synthetic_data = generate_synthetic_data_distributed(user_prompt) |
|
|
|
|
|
|
|
report, recommendations, visualization = produce_outputs(combined_data) |
|
|
|
|
|
|
|
print("Report:") |
|
print(report) |
|
print("\nRecommendations:") |
|
print(recommendations) |
|
print("\nVisualization:") |
|
print(visualization) |
|
|
|
|
|
gr.Interface(fn=generate_outputs, inputs=user_prompt, outputs=["text", "text", "image"]).launch() |
|
|