| |
| """Agent Zero Research - Literature analysis agent with workspace isolation.""" |
|
|
| import os, sys, json, time, threading |
| from pathlib import Path |
| from datetime import datetime |
| import gradio as gr |
| import requests as req |
|
|
| HF_TOKEN = os.environ.get("HF_TOKEN", "") |
| AGENT_NAME = "Research" |
| AGENT_ROLE = "research_analyst" |
| MODEL_NAME = os.environ.get("MODEL_NAME", "ScottzillaSystems/Huihui-Qwen3.5-9B-Claude-4.6-Opus-abliterated") |
|
|
| WORKSPACE_DIR = Path("/app/workspace/projects/research") |
| SHARED_DIR = Path("/app/workspace/shared") |
| TASK_QUEUE_DIR = SHARED_DIR / "task_queue" |
| for d in [WORKSPACE_DIR, TASK_QUEUE_DIR]: d.mkdir(parents=True, exist_ok=True) |
|
|
| def query_model(prompt: str) -> str: |
| api_url = f"https://api-inference.huggingface.co/models/{MODEL_NAME}" |
| for attempt in range(3): |
| try: |
| resp = req.post(api_url, headers={"Authorization": f"Bearer {HF_TOKEN}"}, |
| json={"inputs": prompt, "parameters": {"max_new_tokens": 4096, "temperature": 0.6}}, |
| timeout=180) |
| if resp.status_code == 200: |
| r = resp.json() |
| return r[0].get("generated_text","") if isinstance(r,list) else str(r) |
| time.sleep(10*(attempt+1)) |
| except: time.sleep(5) |
| return "[ERROR] Model unavailable" |
|
|
| def check_tasks(): |
| tasks = [] |
| for f in TASK_QUEUE_DIR.glob("*.json"): |
| t = json.loads(f.read_text()) |
| if t.get("assigned_to") == "research" and t.get("status") == "pending": |
| tasks.append(t) |
| return tasks |
|
|
| def execute_task(task): |
| task["status"] = "in_progress" |
| (TASK_QUEUE_DIR / f"{task['task_id']}.json").write_text(json.dumps(task, indent=2)) |
| prompt = f"You are a research analyst. Conduct thorough research on: {task['description']}. Provide citations, analysis, and recommendations." |
| result = query_model(prompt) |
| out_file = WORKSPACE_DIR / f"{task['task_id']}.md" |
| out_file.write_text(f"# Research: {task['description']}\n\n{result}") |
| task["status"] = "completed" |
| task["result"] = result[:2000] |
| task["output_file"] = str(out_file) |
| task["completed_at"] = datetime.now().isoformat() |
| (TASK_QUEUE_DIR / f"{task['task_id']}.json").write_text(json.dumps(task, indent=2)) |
|
|
| def autonomous_loop(): |
| while True: |
| for task in check_tasks(): execute_task(task) |
| time.sleep(60) |
|
|
| threading.Thread(target=autonomous_loop, daemon=True).start() |
|
|
| demo = gr.Blocks(title=f"Agent Zero - {AGENT_NAME}", theme=gr.themes.Soft()) |
| with demo: |
| gr.Markdown(f"# π¬ Agent Zero: {AGENT_NAME}\n**Role:** {AGENT_ROLE} | **Model:** {MODEL_NAME}") |
| with gr.Tabs(): |
| with gr.TabItem("π¬ Chat"): |
| chatbot = gr.Chatbot(height=400) |
| msg = gr.Textbox(label="Research topic") |
| send = gr.Button("Send") |
| def respond(m, h): |
| r = query_model(f"Research topic: {m}. Provide comprehensive analysis with citations.") |
| h = h or []; h.append((m, r[:2000])); return "", h |
| send.click(respond, [msg, chatbot], [msg, chatbot]) |
| with gr.TabItem("π Research Reports"): |
| files = gr.Dropdown(label="Reports", choices=[f.name for f in WORKSPACE_DIR.glob("*.md")]) |
| report = gr.Markdown() |
| def load_report(fname): |
| if fname: return (WORKSPACE_DIR / fname).read_text() |
| return "" |
| files.change(load_report, files, report) |
|
|
| demo.queue().launch(server_name="0.0.0.0", server_port=7860) |