#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
AtlasSplit / agent.py

Main orchestration for the "Spreadsheet Allocation & Settlement Agent".
Pipeline: read → context → LLM codegen → static audit → controlled execution →
self-heal retries → outputs (Result sheet + report.md) → full run snapshot.

Usage (CLI fallback):
    python agent.py --excel path/to/file.xlsx --sheet SHEET_NAME \
        --task "<natural language instruction>" --mode SAFE --retries 2

Environment variables (OpenAI-compatible):
    OPENAI_BASE_URL  (e.g. http://127.0.0.1:8000/v1  or provider /v1)
    OPENAI_API_KEY   (e.g. sk-local for local backends)
    LLM_MODEL        (e.g. qwen2.5-coder-7b-instruct)

This file will try to import optional modules:
  - audit.static_audit            (AST + allowlist; else uses a minimal fallback)
  - executor_inproc.exec_in_sandbox  (controlled exec; else uses a minimal fallback)
  - toolbox (equal_split / weighted_split / ensure_sum_fix)  injected to sandbox
  - rag/build_index assets (optional retrieval)

Author: AtlasSplit
"""
from __future__ import annotations
import argparse
import ast
import contextlib
import datetime as dt
import hashlib
import io
import json
import os
import re
import shutil
import sys
import textwrap
import time
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple

# ---------------------------- Optional imports ----------------------------
try:
    from openai import OpenAI  # Official SDK (OpenAI-compatible)
except Exception as _:
    OpenAI = None  # type: ignore

# Optional audit & executor
try:
    from audit import static_audit as external_static_audit  # type: ignore
except Exception:
    external_static_audit = None  # type: ignore

try:
    from executor_inproc import exec_in_sandbox as external_exec_in_sandbox  # type: ignore
except Exception:
    external_exec_in_sandbox = None  # type: ignore

# Optional RAG stack (best-effort)
try:
    import faiss  # type: ignore
    from sentence_transformers import SentenceTransformer  # type: ignore
    _RAG_AVAILABLE = True
except Exception:
    _RAG_AVAILABLE = False

# Toolbox is optional for agent itself; generated code may import these via sandbox
try:
    from toolbox import equal_split, weighted_split, ensure_sum_fix  # noqa: F401
    _HAS_TOOLBOX = True
except Exception:
    _HAS_TOOLBOX = False

import numpy as np  # allowed for schema sampling
import pandas as pd

# ---------------------------- Constants & Config ----------------------------
RUNS_DIR = Path("runs")
OUTPUT_DIR = Path("outputs")
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
RUNS_DIR.mkdir(parents=True, exist_ok=True)

ALLOWED_IMPORTS = {"pandas", "numpy", "openpyxl", "math"}
FORBIDDEN_MODULE_ROOTS = {"os", "subprocess", "shutil", "requests", "socket", "sys", "pathlib", "importlib"}
FORBIDDEN_BUILTINS = {"eval", "exec", "compile", "__import__"}

DEFAULT_TEMPERATURE = float(os.getenv("LLM_TEMPERATURE", "0.2"))
DEFAULT_MAX_TOKENS  = int(os.getenv("LLM_MAX_TOKENS", "1200"))

@dataclass
class RunArtifacts:
    ts: str
    run_dir: Path
    input_copy: Path
    prompt_path: Path
    code_path: Path
    stdout_path: Path
    stderr_path: Path
    report_path: Path


# ---------------------------- Utilities ----------------------------
def _now_str() -> str:
    return dt.datetime.now().strftime("%Y%m%d-%H%M%S")


def _mk_run_dir(src_path: Path) -> RunArtifacts:
    ts = _now_str()
    run_dir = RUNS_DIR / ts
    run_dir.mkdir(parents=True, exist_ok=False)

    # copy input as read-only snapshot
    input_copy = run_dir / f"input{src_path.suffix.lower()}"
    shutil.copy2(src_path, input_copy)

    return RunArtifacts(
        ts=ts,
        run_dir=run_dir,
        input_copy=input_copy,
        prompt_path=run_dir / "prompt.txt",
        code_path=run_dir / "code.py",
        stdout_path=run_dir / "stdout.log",
        stderr_path=run_dir / "stderr.log",
        report_path=OUTPUT_DIR / "report.md",
    )


def _is_excel(p: Path) -> bool:
    return p.suffix.lower() in {".xlsx", ".xlsm", ".xls"}


def _is_csv(p: Path) -> bool:
    return p.suffix.lower() in {".csv"}


def _load_preview(path: Path, sheet: str = "", preview_rows: int = 30) -> Dict[str, Any]:
    """Load a tiny preview & schema summary for the prompt."""
    info: Dict[str, Any] = {"path": str(path), "sheet": sheet}

    try:
        if _is_excel(path):
            xls = pd.ExcelFile(path)
            info["sheets"] = list(xls.sheet_names)
            if sheet and sheet in xls.sheet_names:
                df = xls.parse(sheet)
                target_sheet = sheet
            else:
                target_sheet = xls.sheet_names[0]
                df = xls.parse(target_sheet)
            info["target_sheet"] = target_sheet
        elif _is_csv(path):
            df = pd.read_csv(path)
            info["sheets"] = ["<CSV>"]
            info["target_sheet"] = "<CSV>"
        else:
            raise ValueError(f"Unsupported file suffix: {path.suffix}")

        # Basic profile
        info["shape"] = list(df.shape)
        info["columns"] = df.columns.tolist()
        info["dtypes"] = {c: str(df[c].dtype) for c in df.columns}
        # Numeric columns quick stats
        numeric_cols = [c for c in df.columns if pd.api.types.is_numeric_dtype(df[c])]
        info["numeric_summary"] = df[numeric_cols].describe().to_dict() if numeric_cols else {}
        # sample rows (stringified)
        info["sample"] = df.head(min(preview_rows, len(df))).to_dict(orient="records")
    except Exception as e:
        info["error"] = f"Preview failed: {e}"
    return info


# ---------------------------- Optional RAG ----------------------------
def _rag_retrieve(query: str, top_k: int = 4, index_path: Path = Path("rag/index.faiss"), budget_chars: int = 2500) -> List[str]:
    """Return a few domain chunks, best effort; skip silently if RAG is not available."""
    if not _RAG_AVAILABLE:
        return []
    if not index_path.exists():
        return []
    try:
        # Load meta
        meta_path = index_path.with_name("meta.jsonl")
        texts: List[str] = []
        metas: List[Dict[str, Any]] = []
        if meta_path.exists():
            with open(meta_path, "r", encoding="utf-8") as f:
                for line in f:
                    metas.append(json.loads(line))
                    texts.append(metas[-1]["text"])  # stored by build_index
        if not texts:
            return []
        # Build encoder & index
        model = SentenceTransformer("intfloat/e5-small")
        q_emb = model.encode([query], normalize_embeddings=True)
        index = faiss.read_index(str(index_path))
        D, I = index.search(q_emb, top_k)
        res: List[str] = []
        for idx in I[0]:
            if 0 <= idx < len(texts):
                res.append(texts[idx])
        joined = "\n".join(res)
        # Clip to budget
        if len(joined) > budget_chars:
            joined = joined[:budget_chars]
        return [joined]
    except Exception:
        return []


# ---------------------------- Prompt building ----------------------------
SYSTEM_PROMPT = (
    "You are a spreadsheet automation engineer.\n"
    "Output ONLY Python code, no explanations.\n"
    "Hard constraints:\n"
    "- Use only: pandas, numpy, openpyxl, math.\n"
    "- Forbidden: os, subprocess, requests, socket, sys, importlib, pathlib, shutil, eval, exec.\n"
    "- Read the workbook from path INPUT_PATH. If CSV, use pandas.read_csv. If Excel, use pandas + openpyxl.\n"
    "- Perform the task precisely. Prefer vectorized pandas.\n"
    "- Write results to a new sheet named 'Result' in the same workbook (if Excel) OR to 'outputs/result.xlsx' (if CSV).\n"
    "- Ensure numeric outputs keep 2 decimals; if a sum must equal a target total, fix the last-cent residual using ensure_sum_fix if available.\n"
    "- At the very end, print exactly one line: print(\"DONE: <short summary>\").\n"
    "- You may call helper functions equal_split, weighted_split, ensure_sum_fix if they exist.\n"
)


def _format_user_prompt(info: Dict[str, Any], task: str, rag_chunks: List[str]) -> str:
    header = [
        "[Domain Context]",
        *([c for c in rag_chunks if c] or []),
        "\n[Workbook Summary]",
        f"Path: {info.get('path')}",
        f"Sheets: {info.get('sheets')}",
        f"TargetSheet: {info.get('target_sheet')}",
        f"Shape: {info.get('shape')}",
        f"Columns: {info.get('columns')}",
        f"DTypes: {info.get('dtypes')}",
    ]
    sample = json.dumps(info.get("sample", [])[:10], ensure_ascii=False)
    body = (
        f"\n[SampleRows]\n{sample}\n\n[Task]\n{task}\n\n[Rules]\n"
        f"- Keep 2 decimals.\n"
        f"- Write 'Result' sheet or outputs/result.xlsx.\n"
        f"- Print DONE: summary at the end.\n"
    ).strip()
    return textwrap.dedent("\n".join(header) + "\n\n" + body)



# ---------------------------- LLM call ----------------------------
def _get_client() -> Any:
    if OpenAI is None:
        raise RuntimeError("openai package not available. Install 'openai' and try again.")
    base_url = os.getenv("OPENAI_BASE_URL")
    api_key = os.getenv("OPENAI_API_KEY")
    if not base_url:
        raise RuntimeError("OPENAI_BASE_URL is not set")
    if not api_key:
        api_key = "sk-local"  # default placeholder for local servers
    return OpenAI(base_url=base_url, api_key=api_key)


def _llm_generate_code(task: str, info: Dict[str, Any], last_error: str = "", last_code: str = "") -> str:
    client = _get_client()
    model = os.getenv("LLM_MODEL", "deepseek-coder")

    rag_chunks = _rag_retrieve(task, top_k=4)
    user_prompt = _format_user_prompt(info, task, rag_chunks)

    if last_error:
        user_prompt += "\n[Previous Error]\n" + last_error[:2000]
    if last_code:
        user_prompt += "\n[Previous Code Snippet]\n" + last_code[-1000:]

    resp = client.chat.completions.create(
        model=model,
        messages=[
            {"role": "system", "content": SYSTEM_PROMPT},
            {"role": "user", "content": user_prompt},
        ],
        temperature=DEFAULT_TEMPERATURE,
        max_tokens=DEFAULT_MAX_TOKENS,
    )
    code = resp.choices[0].message.content or ""
    # strip markdown fences if any
    code = re.sub(r"^```(?:python)?\n|\n```$", "", code.strip(), flags=re.MULTILINE)
    return code


# ---------------------------- Static audit (fallback) ----------------------------
def _minimal_static_audit(code: str) -> Tuple[bool, List[str]]:
    problems: List[str] = []
    try:
        tree = ast.parse(code)
    except SyntaxError as e:
        return False, [f"SyntaxError: {e}"]

    # ban imports
    for node in ast.walk(tree):
        if isinstance(node, ast.Import):
            for n in node.names:
                root = (n.name or "").split(".")[0]
                if root not in ALLOWED_IMPORTS:
                    problems.append(f"Forbidden import: {n.name}")
        if isinstance(node, ast.ImportFrom):
            root = (node.module or "").split(".")[0]
            if root not in ALLOWED_IMPORTS:
                problems.append(f"Forbidden from-import: {node.module}")
        if isinstance(node, ast.Name) and node.id in FORBIDDEN_BUILTINS:
            problems.append(f"Forbidden builtin: {node.id}")
        if isinstance(node, ast.Attribute) and isinstance(node.value, ast.Name):
            if node.value.id in FORBIDDEN_MODULE_ROOTS:
                problems.append(f"Forbidden access: {node.value.id}.{node.attr}")
    return (len(problems) == 0), problems


def static_audit(code: str) -> Tuple[bool, List[str]]:
    if external_static_audit is not None:
        try:
            return external_static_audit(code)
        except Exception as e:
            return False, [f"External audit failed: {e}"]
    return _minimal_static_audit(code)


# ---------------------------- Execution (fallback sandbox) ----------------------------
def _safe_import(name, globals=None, locals=None, fromlist=(), level=0):
    root = name.split(".")[0]
    if root not in ALLOWED_IMPORTS:
        raise ImportError(f"Import blocked: {name}")
    return __import__(name, globals, locals, fromlist, level)


def _exec_in_sandbox_fallback(code: str, extra_globals: Optional[Dict[str, Any]] = None):
    import builtins
    SAFE_BUILTINS = {
        k: getattr(builtins, k) for k in [
            "abs", "min", "max", "sum", "len", "range", "print", "enumerate",
            "map", "filter", "zip", "sorted", "round", "all", "any"
        ]
    }
    g: Dict[str, Any] = {"__builtins__": dict(SAFE_BUILTINS)}
    g["__builtins__"]["__import__"] = _safe_import
    # expose short aliases
    g.update({"pd": pd, "np": np})
    if extra_globals:
        g.update(extra_globals)
    exec(compile(code, "<llm_code>", "exec"), g, g)


def exec_controlled(code: str, extra_globals: Optional[Dict[str, Any]] = None):
    if external_exec_in_sandbox is not None:
        return external_exec_in_sandbox(code, extra_globals=extra_globals)
    return _exec_in_sandbox_fallback(code, extra_globals=extra_globals)


# ---------------------------- Reporting ----------------------------
def _hash_code(code: str) -> str:
    return hashlib.sha256(code.encode("utf-8")).hexdigest()[:12]


from textwrap import dedent

def _write_report(art: RunArtifacts, excel: Path, sheet: str, task: str,
                  code: str, stdout: str, stderr: str, success: bool,
                  max_len: int = 4000) -> None:
    art.report_path.parent.mkdir(parents=True, exist_ok=True)  # 确保目录存在
    report = dedent(f"""\
    # Execution Report ({art.ts})
    - File: {excel.name}
    - Sheet: {sheet or '(auto)'}
    - Task: {task}
    - Outputs: outputs/result.xlsx (or Result sheet)
    - Code hash: {_hash_code(code)}
    - Success: {success}

    ## Stdout
    ```
    {stdout.strip()[:max_len]}
    ```

    ## Stderr
    ```
    {stderr.strip()[:max_len]}
    ```
    """)
    art.report_path.write_text(report, encoding="utf-8")


# ---------------------------- Orchestration ----------------------------
def run(excel_path: str, task: str, sheet: str = "", preview_rows: int = 30, retries: int = 2, mode: str = "SAFE") -> bool:
    """Run the full pipeline. Returns True on success."""
    src = Path(excel_path).expanduser().resolve()
    assert src.exists(), f"Input not found: {src}"

    artifacts = _mk_run_dir(src)

    # Build preview/context
    info = _load_preview(artifacts.input_copy, sheet=sheet, preview_rows=preview_rows)

    last_error = ""
    last_code = ""
    success = False

    for attempt in range(retries + 1):
        # (Re)generate code
        code = _llm_generate_code(task, info, last_error=last_error, last_code=last_code)

        # Save prompt & code snapshot
        with open(artifacts.prompt_path, "w", encoding="utf-8") as f:
            f.write(f"SYSTEM:\n{SYSTEM_PROMPT}\n\nUSER:\n{_format_user_prompt(info, task, _rag_retrieve(task))}")
        with open(artifacts.code_path, "w", encoding="utf-8") as f:
            f.write(code)

        # Static audit
        ok, problems = static_audit(code)
        if not ok:
            last_error = "Static audit failed: " + "; ".join(problems)
            last_code = code
            continue

        # Controlled execution with captured IO
        stdout_io, stderr_io = io.StringIO(), io.StringIO()
        try:
            with contextlib.redirect_stdout(stdout_io), contextlib.redirect_stderr(stderr_io):
                # Provide INPUT_PATH so generated code can read it
                extra_globals = {"INPUT_PATH": str(artifacts.input_copy)}
                if _HAS_TOOLBOX:
                    # If toolbox is importable, hint names are already importable; still inject if code uses direct call
                    from toolbox import equal_split, weighted_split, ensure_sum_fix  # noqa: F401
                    extra_globals.update({
                        "equal_split": equal_split,  # type: ignore
                        "weighted_split": weighted_split,  # type: ignore
                        "ensure_sum_fix": ensure_sum_fix,  # type: ignore
                    })
                exec_controlled(code, extra_globals=extra_globals)
        except Exception as e:
            last_error = f"Execution error: {e}\nSTDERR: {stderr_io.getvalue()[:1000]}"
            last_code = code
            # Write IO logs
            with open(artifacts.stdout_path, "w", encoding="utf-8") as f:
                f.write(stdout_io.getvalue())
            with open(artifacts.stderr_path, "w", encoding="utf-8") as f:
                f.write(stderr_io.getvalue())
            continue

        # Inspect stdout for DONE marker
        out_text = stdout_io.getvalue()
        err_text = stderr_io.getvalue()
        with open(artifacts.stdout_path, "w", encoding="utf-8") as f:
            f.write(out_text)
        with open(artifacts.stderr_path, "w", encoding="utf-8") as f:
            f.write(err_text)

        if re.search(r"^DONE:\s*", out_text, flags=re.MULTILINE):
            success = True
            _write_report(artifacts, src, sheet, task, code, out_text, err_text, True)
            break
        else:
            last_error = "Missing DONE marker in stdout"
            last_code = code

    if not success:
        _write_report(artifacts, src, sheet, task, last_code, open(artifacts.stdout_path, 'r', encoding='utf-8').read() if artifacts.stdout_path.exists() else "", open(artifacts.stderr_path, 'r', encoding='utf-8').read() if artifacts.stderr_path.exists() else "", False)

    return success


# ---------------------------- CLI entrypoint ----------------------------
if __name__ == "__main__":
    p = argparse.ArgumentParser(description="AtlasSplit — Spreadsheet Allocation & Settlement Agent")
    p.add_argument("--excel", required=True, help="Path to Excel/CSV file")
    p.add_argument("--sheet", default="", help="Target sheet name (optional)")
    p.add_argument("--task", required=True, help="Natural language instruction")
    p.add_argument("--preview_rows", type=int, default=30, help="Rows to preview to the LLM")
    p.add_argument("--retries", type=int, default=2, help="Self-heal retries on failure")
    p.add_argument("--mode", default="SAFE", choices=["SAFE", "RELAXED", "UNSAFE"], help="Execution mode (reserved; SAFE recommended)")
    args = p.parse_args()

    ok = run(args.excel, args.task, sheet=args.sheet, preview_rows=args.preview_rows, retries=args.retries, mode=args.mode)
    print("DONE" if ok else "FAIL")
