Spaces:
Sleeping
Sleeping
File size: 7,701 Bytes
d8d14f1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 |
import json
import os
from contextlib import suppress
from typing import Any, Callable, Dict, Optional, Type, Union
from dotenv import load_dotenv
from pydantic import BaseModel, Field, ValidationError, create_model
from swarm_models.openai_function_caller import OpenAIFunctionCaller
class DynamicParser:
@staticmethod
def extract_fields(model: Type[BaseModel]) -> Dict[str, Any]:
return {
field_name: (
field.annotation,
... if field.is_required() else None,
)
for field_name, field in model.model_fields.items()
}
@staticmethod
def create_partial_model(
model: Type[BaseModel], data: Dict[str, Any]
) -> Type[BaseModel]:
fields = {
field_name: (
field.annotation,
... if field.is_required() else None,
)
for field_name, field in model.model_fields.items()
if field_name in data
}
return create_model(f"Partial{model.__name__}", **fields)
@classmethod
def parse(
cls, data: Union[str, Dict[str, Any]], model: Type[BaseModel]
) -> Optional[BaseModel]:
if isinstance(data, str):
try:
data = json.loads(data)
except json.JSONDecodeError:
return None
# Try full model first
with suppress(ValidationError):
return model.model_validate(data)
# Create and try partial model
partial_model = cls.create_partial_model(model, data)
with suppress(ValidationError):
return partial_model.model_validate(data)
return None
load_dotenv()
# Define the Thoughts schema
class Thoughts(BaseModel):
text: str = Field(
...,
description="Current thoughts or observations regarding the task.",
)
reasoning: str = Field(
...,
description="Logical reasoning behind the thought process.",
)
plan: str = Field(
...,
description="A short bulleted list that conveys the immediate and long-term plan.",
)
criticism: str = Field(
...,
description="Constructive self-criticism to improve future responses.",
)
speak: str = Field(
...,
description="A concise summary of thoughts intended for the user.",
)
# Define the Command schema
class Command(BaseModel):
name: str = Field(
...,
description="Command name to execute from the provided list of commands.",
)
args: Dict[str, Any] = Field(
..., description="Arguments required to execute the command."
)
# Define the AgentResponse schema
class AgentResponse(BaseModel):
thoughts: Thoughts = Field(
..., description="The agent's current thoughts and reasoning."
)
command: Command = Field(
...,
description="The command to execute along with its arguments.",
)
# Define tool functions
def fluid_api_command(task: str):
"""Execute a fluid API request."""
# response = fluid_api_request(task)
print(response.model_dump_json(indent=4))
return response
def send_tweet_command(text: str):
"""Simulate sending a tweet."""
print(f"Tweet sent: {text}")
return {"status": "success", "message": f"Tweet sent: {text}"}
def do_nothing_command():
"""Do nothing."""
print("Doing nothing...")
return {"status": "success", "message": "No action taken."}
def task_complete_command(reason: str):
"""Mark the task as complete and provide a reason."""
print(f"Task completed: {reason}")
return {
"status": "success",
"message": f"Task completed: {reason}",
}
# Dynamic command execution
def execute_command(name: str, args: Dict[str, Any]):
"""Dynamically execute a command based on its name and arguments."""
command_map: Dict[str, Callable] = {
"fluid_api": lambda **kwargs: fluid_api_command(
task=kwargs.get("task")
),
"send_tweet": lambda **kwargs: send_tweet_command(
text=kwargs.get("text")
),
"do_nothing": lambda **kwargs: do_nothing_command(),
"task_complete": lambda **kwargs: task_complete_command(
reason=kwargs.get("reason")
),
}
if name not in command_map:
raise ValueError(f"Unknown command: {name}")
# Execute the command with the provided arguments
return command_map[name](**args)
def parse_and_execute_command(
response: Union[str, Dict[str, Any]],
base_model: Type[BaseModel] = AgentResponse,
) -> Any:
"""Enhanced command parser with flexible input handling"""
parsed = DynamicParser.parse(response, base_model)
if not parsed:
raise ValueError("Failed to parse response")
if hasattr(parsed, "command"):
command_name = parsed.command.name
command_args = parsed.command.args
return execute_command(command_name, command_args)
return parsed
ainame = "AutoAgent"
userprovided = "assistant"
SYSTEM_PROMPT = f"""
You are {ainame}, an advanced and autonomous {userprovided}.
Your role is to make decisions and complete tasks independently without seeking user assistance. Leverage your strengths as an LLM to solve tasks efficiently, adhering strictly to the commands and resources provided.
### GOALS:
1. {userprovided}
2. Execute tasks with precision and efficiency.
3. Ensure outputs are actionable and aligned with the user's objectives.
4. Continuously optimize task strategies for maximum effectiveness.
5. Maintain reliability and consistency in all responses.
### CONSTRAINTS:
1. Memory limit: ~4000 words for short-term memory. Save essential information to files immediately to avoid loss.
2. Independent decision-making: Do not rely on user assistance.
3. Exclusively use commands in double quotes (e.g., "command name").
4. Use subprocesses for commands that may take longer than a few minutes.
5. Ensure all outputs strictly adhere to the specified JSON response format.
### COMMANDS:
1. Fluid API: "fluid_api", args: "method": "<GET/POST/...>", "url": "<url>", "headers": "<headers>", "body": "<payload>"
18. Send Tweet: "send_tweet", args: "text": "<text>"
19. Do Nothing: "do_nothing", args:
20. Task Complete (Shutdown): "task_complete", args: "reason": "<reason>"
### RESOURCES:
1. Internet access for real-time information and data gathering.
2. Long-term memory management for storing critical information.
3. Access to GPT-3.5-powered Agents for delegating tasks.
4. File handling capabilities for output storage and retrieval.
### PERFORMANCE EVALUATION:
1. Continuously analyze and reflect on actions to ensure optimal task completion.
2. Self-critique decisions and strategies constructively to identify areas for improvement.
3. Ensure every command serves a clear purpose and minimizes resource usage.
4. Complete tasks in the least number of steps, balancing speed and accuracy.
### RESPONSE FORMAT:
Always respond in a strict JSON format as described below. Ensure your responses can be parsed with Python's `json.loads`:
"""
# Initialize the OpenAIFunctionCaller
model = OpenAIFunctionCaller(
system_prompt=SYSTEM_PROMPT,
max_tokens=4000,
temperature=0.9,
base_model=AgentResponse, # Pass the Pydantic schema as the base model
parallel_tool_calls=False,
openai_api_key=os.getenv("OPENAI_API_KEY"),
)
# Example usage
user_input = (
"Analyze the provided Python code for inefficiencies, generate suggestions for improvements, "
"and provide optimized code."
)
response = model.run(user_input)
response = parse_and_execute_command(response)
print(response)
|