Spaces:
Sleeping
Sleeping
File size: 3,855 Bytes
d26c7f3 f49023b 910ae58 d26c7f3 e4f6727 cc6bd3b d26c7f3 4fb4269 d26c7f3 f49023b d26c7f3 910ae58 4fb4269 cc6bd3b 98ada6c 3d648f2 e4f6727 d26c7f3 e4f6727 d26c7f3 f49023b 58afc3a f49023b 58afc3a f49023b 58afc3a f49023b 3d648f2 f49023b 3d648f2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
from enum import Enum
from typing import Optional
from logger import Logger
TEST_MODE = False
class LLMInterface(Enum):
OPENAI = "OpenAI"
HUGGINGFACE = "HuggingFace"
# Add your own if you like (then adjust the LLMFactory)
class AgentPreset:
def __init__(self, interface: LLMInterface, model_name: str, temperature: Optional[float] = None,
max_tokens: Optional[int] = None, repeat_penalty: Optional[float] = None):
"""
Initialize an AgentPreset with LLM configuration parameters.
Args:
interface: The model interface to use (e.g., OPENAI, HUGGINGFACE)
model_name: Name of the model to use
temperature: Controls randomness in responses (0.0-1.0)
max_tokens: Maximum number of tokens to generate in response
repeat_penalty: Penalty for token repetition
"""
self.interface = interface
self.model_name = model_name
self.temperature = temperature
self.max_tokens = max_tokens
self.repeat_penalty = repeat_penalty
def get_interface(self) -> LLMInterface:
"""
Get the model interface.
Returns:
LLMInterface: The interface used for this agent.
"""
return self.interface
def get_model_name(self) -> str:
"""
Get the model name.
Returns:
str: The name of the model.
"""
return self.model_name
def get_temperature(self) -> float | None:
"""
Get the temperature setting.
Returns:
float: The temperature value controlling randomness.
"""
return self.temperature
def get_max_tokens(self) -> int | None:
"""
Get the maximum tokens setting.
Returns:
int: The maximum number of tokens for generation.
"""
return self.max_tokens
def get_repeat_penalty(self) -> float | None:
"""
Get the repeat penalty setting.
Returns:
float: The penalty value for token repetition.
"""
return self.repeat_penalty
class Args:
LOGGER = Logger.set_logger()
primary_llm_interface=LLMInterface.OPENAI
# secondary_llm_interface=LLMInterface.HUGGINGFACE
vlm_interface=LLMInterface.OPENAI
primary_model="groot" if TEST_MODE else "qwen/qwen3-30b-a3b"
secondary_model="groot" if TEST_MODE else "qwen2.5-7b-instruct-1m"
vision_model="groot" if TEST_MODE else "qwen/qwen2.5-vl-7b"
api_base="http://127.0.0.1:1234/v1" # LM Studio local endpoint
api_key="api_key"
token = "" # Not needed when using OpenAILike API
# Agent presets
PRIMARY_AGENT_PRESET = AgentPreset(
primary_llm_interface, primary_model,
temperature = None, max_tokens = 1500, repeat_penalty = None
)
SECONDARY_AGENT_PRESET = AgentPreset(
primary_llm_interface, secondary_model,
temperature = None, max_tokens = 1500, repeat_penalty = None
)
VISION_AGENT_PRESET = AgentPreset(
vlm_interface, vision_model,
temperature = None, max_tokens = 1500, repeat_penalty = None
)
class AppParams:
# --- Constants ---
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
MOCK_SUBMISSION = True
QUESTIONS_LIMIT = 3 # Use 0 for no limit !
class AlfredParams:
# Maximum number of interactions between Manager and Solver
MAX_INTERACTIONS = 6
# Maximum number of interactions between Solver and it's assistants
MAX_SOLVING_EFFORT = 6
# Verification happening every few messages to see whether the manager agent got stuck
AUDIT_INTERVAL = 3
class MiscParams:
NO_THINK = True
|