import sys
import os
import argparse
from loguru import logger
from root_agent.rootAgent import RootAgent
import json

def load_settings():
    settings = {}
    try:
        with open('settings.txt', 'r', encoding='utf-8') as f:
            settings = json.load(f)
    except FileNotFoundError:
        logger.error("settings.txt not found. Please create one.")
    except json.JSONDecodeError:
        logger.error("Error decoding JSON from settings.txt. Please check the file format.")
    return settings

settings = load_settings()
default_userid = settings.get("userid", "test111")
default_openid = settings.get("openid", "zyinfoai")

from fastapi import FastAPI, Request
from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.staticfiles import StaticFiles


from context_manager import ContextManager
from output_generator import OutputGenerator
from input_processor import InputProcessor

# Configure logger
logger.remove()
#logger.add(sys.stderr, level="INFO", encoding="utf-8")
import os
from loguru import logger

# Remove existing handlers to prevent file locking issues
logger.remove()

# if os.path.exists("saia_log.log"):
#     try:
#         os.remove("saia_log.log")
#         logger.info("Successfully removed saia_log.log")
#     except OSError as e:
#         logger.error(f"Error removing saia_log.log: {e}")

logger.add("saia_log.log", rotation="200 KB", level="INFO", filter=lambda record: record["name"] != "tools.tool_manager")

root_agent = RootAgent(userid=default_userid, openid=default_openid)
context_manager = ContextManager()
output_generator = OutputGenerator()

def create_web_app():
    app = FastAPI()

    app.mount("/static", StaticFiles(directory=os.path.join(os.getcwd(), "static")), name="static")

    @app.get("/", response_class=HTMLResponse)
    async def read_root():
        with open(os.path.join(os.getcwd(), "static", "index.html"), "r") as f:
            return HTMLResponse(content=f.read())

    @app.post("/api/prompt")
    async def handle_prompt(request: Request):
        data = await request.json()
        processed_input = InputProcessor.process_input("web", data)
        user_input = processed_input["user_input"]
        file_context = processed_input["file_context"] # Currently empty for web, but for future compatibility

        if not user_input:
            return JSONResponse(content={"response": "No prompt provided."}, status_code=400)

        logger.info(f"Web UI input: {user_input}")
        context_manager.update_history("user", user_input)
        
        # Pass the full context to the root agent, including file_context from processed_input
        current_context = context_manager.get_context()
        current_context["file_context"] = file_context # Add file_context to the current_context
        
        response_data = root_agent.process_command(user_input, current_context, userid=default_userid, openid=default_openid)
        
        final_result = response_data.get("final_result", "No final result.")
        llm_metrics = response_data.get("llm_usage_metrics", {})
        total_task_duration = response_data.get("total_task_duration", 0.0)

        context_manager.update_history("assistant", final_result)
        logger.info(f"SAIA response: {final_result}")
        
        # Generate natural language response for the web UI
        return JSONResponse(content={
            "response": output_generator.generate_nlp_output(final_result),
            "llm_metrics": llm_metrics,
            "total_task_duration": total_task_duration
        })
    
    return app

def main():
    parser = argparse.ArgumentParser(description="SAIA Agent Server")
    parser.add_argument("--prompt", type=str, help="The prompt for the SAIA agent.")
    parser.add_argument("--max_run_steps", type=int, default=10, help="Maximum number of steps the agent can run.")
    args = parser.parse_args()

    user_input = args.prompt
    max_run_steps = args.max_run_steps

    if user_input is None:
        # If no prompt is provided via command line, use a hardcoded one for testing
        user_input = "在 temp/test.txt 中写入内容 'Hello World!'"
        #list all files in the current directory."
        logger.info("No prompt provided via command line. Using hardcoded prompt for testing.")
    
    file_context = ""

    # Initialize work_dir in context manager
    context_manager.update_task_focus(work_dir=os.getcwd())

    logger.info(f"User input: {user_input}")

    context_manager.update_history("user", user_input)
    current_context = context_manager.get_context()
    current_context["file_context"] = file_context # Ensure file_context is passed
    for i in range (10):
        response_data = root_agent.process_command(user_input, current_context, userid=default_userid, openid=default_openid, max_run_steps=max_run_steps)

    # Check if response_data is a dictionary
    final_result = response_data.get("final_result", "No final result.")
    llm_metrics = response_data.get("llm_usage_metrics", {})
    total_task_duration = response_data.get("total_task_duration", 0.0)

    # Log and print LLM usage metrics
    total_input_tokens = llm_metrics.get("total_input_tokens", 0)
    total_output_tokens = llm_metrics.get("total_output_tokens", 0)
    total_cost = llm_metrics.get("total_cost", 0.0)
    task_llm_metrics = llm_metrics.get("task_llm_metrics", [])

    logger.info(f"Total LLM Input Tokens: {total_input_tokens}")
    logger.info(f"Total LLM Output Tokens: {total_output_tokens}")
    logger.info(f"Total LLM Cost: ¥{total_cost * 7:.6f}")
    logger.info(f"Total Task Duration: {total_task_duration:.2f} seconds")

    print(f"Total LLM Input Tokens: {total_input_tokens}")
    print(f"Total LLM Output Tokens: {total_output_tokens}")
    print(f"Total LLM Cost: ¥{total_cost * 7:.6f}")
    print(f"Total Task Duration: {total_task_duration:.2f} seconds")

    for i, task_metric in enumerate(task_llm_metrics):
        logger.info(f"  Task LLM Call {i+1} (Type: {task_metric.get('task_type')}):")
        logger.info(f"    Input Tokens: {task_metric.get('input_tokens')}")
        logger.info(f"    Output Tokens: {task_metric.get('output_tokens')}")
        logger.info(f"    Cost: ¥{task_metric.get('cost') * 7:.6f}")
        print(f"  Task LLM Call {i+1} (Type: {task_metric.get('task_type')}):")
        print(f"    Input Tokens: {task_metric.get('input_tokens')}")
        print(f"    Output Tokens: {task_metric.get('output_tokens')}")
        print(f"    Cost: ¥{task_metric.get('cost') * 7:.6f}")

    # Truncate and clean the final_result for mem0 to avoid API errors
    max_len = 1000
    if len(final_result) > max_len:
        history_response = final_result[:max_len] + "... [truncated]"
    else:
        history_response = final_result

    context_manager.update_history("assistant", history_response)
    logger.info(f"SAIA final result: {final_result}")
    print(f"SAIA: {output_generator.generate_nlp_output(final_result)}")


if __name__ == "__main__":
    main()

