Spaces:
Running
Running
#!/usr/bin/env python3 | |
""" | |
Lyrics Analyzer Agent - Main Entry Point | |
This module serves as the entry point for the Lyrics Analyzer application, which | |
uses a system of specialized agents to search for and analyze song lyrics. | |
""" | |
from loguru import logger | |
from smolagents import LiteLLMModel | |
from agents.single_agent import create_single_agent | |
from config import ( | |
get_model_id, | |
get_ollama_api_base, | |
load_api_keys, | |
setup_logger, | |
) | |
from Gradio_UI import GradioUI | |
def main(): | |
""" | |
Main function to initialize and run the Lyrics Analyzer Agent. | |
This function sets up logging, loads API keys, initializes the LLM model, | |
and starts the Gradio UI server with the manager agent. | |
""" | |
# Setup logger and API keys | |
setup_logger() | |
load_api_keys() | |
# use_local = os.environ.get('SPACE_ID') is None | |
use_local_llm = False | |
use_localhost = False | |
# If using Ollama, we need to specify the API base URL | |
# Initialize the LLM model based on configuration | |
model_id = get_model_id(provider='gemini') | |
logger.info(f"Initializing with model: {model_id}") | |
if use_local_llm: | |
api_base = get_ollama_api_base() | |
logger.info(f"Using Ollama API base: {api_base}") | |
model = LiteLLMModel(model_id=model_id, api_base=api_base) | |
else: | |
model = LiteLLMModel(model_id=model_id) | |
# Create the manager agent which will create and manage the other agents | |
single_agent = create_single_agent(model, analysis_tool_model_id=model_id) | |
# Start the Gradio UI server | |
logger.info("Initializing Gradio UI and launching server") | |
# Determine if we're in test mode (local) or production (HuggingFace) | |
# HuggingFace environment has SPACE_ID environment variable | |
# Инструкции агенту настраиваются непосредственно в GradioUI.py | |
if use_localhost: | |
launch_kwargs = { | |
"debug": True, | |
"share": False, | |
"server_name": "127.0.0.1", | |
"server_port": 3000 | |
} | |
else: | |
# Configuration for production (HuggingFace) | |
launch_kwargs = { | |
"debug": True, | |
"share": True | |
# No server_name or server_port for HuggingFace deployment | |
} | |
# Передаем инструкцию в конструктор GradioUI | |
GradioUI(single_agent).launch(**launch_kwargs) | |
logger.success("Server started successfully") | |
# Run the application when executed directly | |
if __name__ == "__main__": | |
main() | |