from fastapi import FastAPI, UploadFile, File, Form, BackgroundTasks, Request
from fastapi.responses import JSONResponse, FileResponse
from pydantic import BaseModel
from typing import List
import tempfile, os, time, logging, shutil
from langchain_community.document_loaders import TextLoader
from fastapi.middleware.cors import CORSMiddleware
import sys
import json
import warnings

# 新增导入
try:
    from git import Repo
    git_available = True
except ImportError:
    git_available = False

# 静默 FLAML 的可选 AutoML 提示（对本项目非关键，避免干扰日志）
warnings.filterwarnings(
    "ignore",
    message=r"flaml\.automl is not available.*",
    category=UserWarning,
    module=r"flaml.*",
)

# FastAPI app 初始化必须在所有路由定义之前
app = FastAPI()

try:
    from backend.validate_report import generate_validation_report
    crewai_available = True
except Exception:
    crewai_available = False

# Word 导出工具
try:
    from backend.word_utils import save_report_to_word
    docx_available = True
except Exception:
    docx_available = False

# 支持的文本后缀（其余将尝试解码，失败则跳过）
# (从 /api/bug-check 移至全局)
allowed_exts = {
    ".py", ".js", ".ts", ".tsx", ".jsx", ".java", ".go", ".rb", ".cs",
    ".php", ".c", ".cpp", ".rs", ".kt", ".kts", ".scala", ".sh", ".ps1",
    ".yml", ".yaml", ".json", ".toml", ".ini", ".cfg", ".md", ".txt"
}

@app.post("/api/validate-report")
async def validate_report(request: Request, files: List[UploadFile] = File(...)):
    """
    使用 CrewAI 生成代码检验报告：
    - 接收上传文件
    - 复用启发式规则提取 issues
    - 调用 CrewAI 生成报告
    """
    if not crewai_available:
        return JSONResponse(status_code=503, content={"success": False, "error": "CrewAI 未安装或导入失败，请运行 pip install crewai"})

    # (allowed_exts 已移至全局)

    temp_files: List[str] = []
    file_contents = {}
    issues = []

    try:
        for f in files:
            ext = os.path.splitext(f.filename)[1].lower()
            if ext and ext not in allowed_exts:
                continue
            with tempfile.NamedTemporaryFile(delete=False) as tmp:
                tmp.write(await f.read())
                tmp_path = tmp.name
                temp_files.append(tmp_path)
            code = ""
            try:
                from langchain_community.document_loaders import TextLoader
                loader = TextLoader(tmp_path, autodetect_encoding=True)
                docs = loader.load()
                code = docs[0].page_content if docs else ""
            except Exception:
                try:
                    with open(tmp_path, "r", encoding="utf-8", errors="ignore") as fp:
                        code = fp.read()
                except Exception:
                    continue
            if not code.strip():
                continue
            file_contents[f.filename] = code
            file_issues = heuristic_analyze(code)
            for it in file_issues:
                if isinstance(it, dict):
                    issues.append({
                        "file": f.filename,
                        "line": it.get("line"),
                        "message": it.get("message"),
                        "suggestion": it.get("suggestion"),
                    })
                else:
                    # 回退：非预期类型，按字符串记录
                    issues.append({
                        "file": f.filename,
                        "line": None,
                        "message": str(it),
                        "suggestion": "",
                    })
        # 调用 CrewAI 生成报告
        report = generate_validation_report(file_contents, issues)

        # 检查是否请求 Word 格式
        format = request.query_params.get("format")
        if format == "word":
            if not docx_available:
                return JSONResponse(content={"success": False, "error": "python-docx 未安装，请运行 pip install python-docx"})
            word_path = save_report_to_word(report)
            # 生成带日期的文件名：代码检验报告YYYYMMDD.docx
            from datetime import datetime
            date_str = datetime.now().strftime("%Y-%m-%d")
            download_name = f"代码检验报告{date_str}.docx"
            # 返回 Word 文件下载
            return FileResponse(word_path, filename=download_name, media_type="application/vnd.openxmlformats-officedocument.wordprocessingml.document")
        else:
            return JSONResponse(content={"success": True, "report": report})
    except Exception as e:
        # 记录详细异常，便于排查
        try:
            logger.exception("/api/validate-report error")
        except Exception:
            pass
        return JSONResponse(content={"success": False, "error": str(e)})
    finally:
        for p in temp_files:
            try:
                os.remove(p)
            except Exception:
                pass


# Logger (移动到 heuristic_analyze 之前)
logger = logging.getLogger("ai_agent.backend")
if not logger.handlers:
    handler = logging.StreamHandler()
    formatter = logging.Formatter("[%(asctime)s] %(levelname)s %(message)s")
    handler.setFormatter(formatter)
    logger.addHandler(handler)
logger.setLevel(os.environ.get("LOG_LEVEL", "INFO"))


# 简单启发式规则检测（无需外部 API）
def heuristic_analyze(code: str):
    issues = []
    lines = code.splitlines()

    # 规则示例：
    # 1) TODO/FIXME 标记
    for idx, line in enumerate(lines, 1):
        text = line.lower()
        if "fixme" in text or "todo" in text:
            issues.append({
                "line": idx,
                "message": "包含 TODO/FIXME 标记，可能存在未完成或需修复的代码",
                "suggestion": "跟进 TODO/FIXME 标记，完善实现或修复问题"
            })

    # 2) Python/JS 潜在调试语句
    debug_tokens = ["print(", "console.log("]
    for idx, line in enumerate(lines, 1):
        if any(tok in line for tok in debug_tokens):
            issues.append({
                "line": idx,
                "message": "发现调试输出语句",
                "suggestion": "考虑移除或使用统一日志组件，并控制日志级别"
            })

    # 3) 极长行（可读性不足）
    for idx, line in enumerate(lines, 1):
        if len(line) > 160:
            issues.append({
                "line": idx,
                "message": "代码行过长，可能影响可读性",
                "suggestion": "适当换行或拆分表达式，遵循代码风格指南"
            })

    # 4) 可疑的 eval/exec 使用
    for idx, line in enumerate(lines, 1):
        low = line.lower()
        if "eval(" in low or "exec(" in low:
            issues.append({
                "line": idx,
                "message": "检测到 eval/exec 的使用，可能存在安全风险",
                "suggestion": "避免动态执行不受信任的代码，使用安全替代方案"
            })

    return issues

# --- 新增：辅助函数，用于分析本地目录 ---
def analyze_directory_heuristically(root_dir: str):
    """
    遍历目录，对允许的文件后缀执行启发式分析。
    返回与 /api/bug-check 相同的格式。
    """
    results = []
    skipped = []
    errors = []

    for dirpath, _, filenames in os.walk(root_dir):
        # 排除 .git 目录
        if ".git" in dirpath.split(os.sep):
            continue
            
        for filename in filenames:
            file_path = os.path.join(dirpath, filename)
            
            # 使用 os.path.relpath 来获取相对于 root_dir 的路径
            relative_path = os.path.relpath(file_path, root_dir)
            
            ext = os.path.splitext(filename)[1].lower()
            
            if not ext or ext not in allowed_exts:
                logger.info(f"skip non-text file by ext: {relative_path}")
                skipped.append(relative_path)
                results.append({"file": relative_path, "issues": []})
                continue

            code = ""
            try:
                # 优先使用 TextLoader，失败则使用纯文本读取回退
                loader = TextLoader(file_path, autodetect_encoding=True)
                docs = loader.load()
                code = docs[0].page_content if docs else ""
            except Exception as e:
                try:
                    with open(file_path, "r", encoding="utf-8", errors="ignore") as fp:
                        code = fp.read()
                except Exception as e2:
                    errors.append(f"读取文件失败: {relative_path}: {e2}")
                    results.append({"file": relative_path, "issues": [
                        {"line": None, "message": "无法读取文件内容", "suggestion": "确认文件编码或类型是否为文本"}
                    ]})
                    continue

            if not code.strip():
                results.append({"file": relative_path, "issues": []})
                continue
            
            # 使用启发式规则做快速检测
            issues = heuristic_analyze(code)
            logger.info(f"file done: {relative_path}, issues={len(issues)}")
            results.append({
                "file": relative_path,
                "issues": issues
            })

    return results, skipped, errors
# --- 新增结束 ---


@app.get("/api/llm/health")
def llm_health():
    """
    返回当前后端可用的 LLM 引擎与 AutoGen 可用性，方便排查环境问题。
    """
    engine = "unknown"
    model = None

    autogen_status = {"available": False, "compat": "none"}
    autogen_ok = False

    # 推断使用的引擎
    ollama_model = os.getenv("OLLAMA_MODEL")
    ollama_base = os.getenv("OLLAMA_BASE_URL")
    openai_key = os.getenv("OPENAI_API_KEY")

    if ollama_model or ollama_base:
        engine = "ollama"
        model = ollama_model or "(default)"
    elif openai_key:
        engine = "openai"
        model = os.getenv("AUTOGEN_MODEL", "gpt-4")

    return {
        "engine": engine,
        "model": model,
        "autogenAvailable": autogen_ok,
        "autogen": autogen_status,
        "python": sys.executable,
    }

@app.post("/api/bug-check")
async def bug_check(files: List[UploadFile] = File(...)):
    start = time.perf_counter()
    logger.info(f"/api/bug-check received {len(files)} files")
    results = []
    errors: List[str] = []

    # (allowed_exts 已移至全局)
    skipped: List[str] = []
    
    temp_files_to_remove = [] # 用于确保临时文件被删除

    for f in files:
        try:
            logger.info(f"processing file: {f.filename}")
            ext = os.path.splitext(f.filename)[1].lower()
            if ext and ext not in allowed_exts:
                logger.info(f"skip non-text file by ext: {f.filename}")
                skipped.append(f.filename)
                results.append({"file": f.filename, "issues": []})
                continue
            
            tmp_path = ""
            with tempfile.NamedTemporaryFile(delete=False) as tmp:
                tmp.write(await f.read())
                tmp_path = tmp.name
                temp_files_to_remove.append(tmp_path)

            code = ""
            # 优先使用 TextLoader，失败则使用纯文本读取回退
            try:
                loader = TextLoader(tmp_path, autodetect_encoding=True)
                docs = loader.load()
                code = docs[0].page_content if docs else ""
            except Exception as e:
                try:
                    with open(tmp_path, "r", encoding="utf-8", errors="ignore") as fp:
                        code = fp.read()
                except Exception as e2:
                    errors.append(f"读取文件失败: {f.filename}: {e2}")
                    results.append({"file": f.filename, "issues": [
                        {"line": None, "message": "无法读取文件内容", "suggestion": "确认文件编码或类型是否为文本"}
                    ]})
                    continue

            # 如果是明显的二进制/空内容，跳过或记录提示
            if not code.strip():
                results.append({"file": f.filename, "issues": []})
                continue

            # 使用启发式规则做快速检测（本地可运行，无需 API Key）
            issues = heuristic_analyze(code)
            logger.info(f"file done: {f.filename}, issues={len(issues)}")
            results.append({
                "file": f.filename,
                "issues": issues
            })
        except Exception as e:
            errors.append(f"处理文件失败: {f.filename}: {e}")
            logger.exception(f"error processing file: {f.filename}")
            results.append({"file": f.filename, "issues": [
                {"line": None, "message": "处理该文件时出错", "suggestion": "查看后端日志以定位异常"}
            ]})
        finally:
            # 确保在循环中创建的临时文件被清理
            for p in temp_files_to_remove:
                if os.path.exists(p):
                    try:
                        os.remove(p)
                    except Exception:
                        pass
            temp_files_to_remove.clear()


    elapsed = time.perf_counter() - start
    total_issues = sum(len(r.get("issues", [])) for r in results)
    logger.info(f"/api/bug-check completed in {elapsed:.2f}s, files={len(files)}, total_issues={total_issues}, errors={len(errors)}")
    payload = {"results": results, "skipped": skipped}
    if errors:
        payload["errors"] = errors
    return JSONResponse(content=payload)


@app.post("/api/auto-fix")
async def auto_fix(
    files: List[UploadFile] = File(...),
    target_issue: str | None = Form(None),
    user_message: str | None = Form(None),
    provider: str | None = Form(None),
    openai_key: str | None = Form(None),
):
    """
    简化版自动修复：直接用 LLM 生成修复建议和文件内容。
    - 接收上传文件，做启发式问题提取
    - 构造提示给 LLM（Ollama 或 OpenAI）
    - 返回对话与修复后的完整文件
    """
    from backend.simple_fixer import run_simple_fix

    logger.info(f"/api/auto-fix received {len(files)} files")

    # (allowed_exts 已移至全局)

    temp_files: List[str] = []
    file_contents = {}
    issues_by_file: dict[str, list[dict]] = {}

    try:
        for f in files:
            ext = os.path.splitext(f.filename)[1].lower()
            if ext and ext not in allowed_exts:
                logger.info(f"auto-fix: skip non-text file {f.filename}")
                continue
            with tempfile.NamedTemporaryFile(delete=False) as tmp:
                tmp.write(await f.read())
                tmp_path = tmp.name
                temp_files.append(tmp_path)

            code = ""
            try:
                loader = TextLoader(tmp_path, autodetect_encoding=True)
                docs = loader.load()
                code = docs[0].page_content if docs else ""
            except Exception:
                try:
                    with open(tmp_path, "r", encoding="utf-8", errors="ignore") as fp:
                        code = fp.read()
                except Exception:
                    logger.warning(f"auto-fix: cannot read file {f.filename}")
                    continue
            if not code.strip():
                continue

            file_contents[f.filename] = code
            issues = heuristic_analyze(code)
            issues_by_file[f.filename] = issues

        # 若指定了目标问题，仅保留该文件的上下文
        tgt = None
        if target_issue:
            try:
                obj = json.loads(target_issue)
                tgt = obj if isinstance(obj, dict) else None
            except Exception:
                tgt = None

        # provider and openai_key are received as Form fields (sent by frontend)
        logger.info(f"auto-fix: provider={provider}")

        result = run_simple_fix(file_contents, issues_by_file, user_message=user_message, target_issue=tgt, provider=provider, openai_key=openai_key)
        return JSONResponse(content=result)

    finally:
        for p in temp_files:
            try:
                os.remove(p)
            except Exception:
                pass


# --- 新增：GitHub 导入路由 ---

class GitHubRepo(BaseModel):
    url: str

def cleanup_temp_dir(path: str):
    """用于后台任务的安全删除函数"""
    logger.info(f"Cleaning up temp directory: {path}")
    try:
        shutil.rmtree(path)
    except Exception as e:
        logger.error(f"Failed to cleanup temp directory {path}: {e}")

@app.post("/api/import-github-repo")
async def import_github_repo(repo: GitHubRepo, background_tasks: BackgroundTasks):
    """
    从 GitHub URL 克隆仓库并执行启发式分析。
    """
    if not git_available:
        return JSONResponse(status_code=503, content={"success": False, "error": "GitPython 未安装或导入失败，请运行 pip install GitPython"})

    start = time.perf_counter()
    temp_dir = tempfile.mkdtemp()
    
    # 注册后台清理任务
    background_tasks.add_task(cleanup_temp_dir, temp_dir)

    try:
        logger.info(f"Cloning {repo.url} into {temp_dir}...")
        Repo.clone_from(repo.url, temp_dir, depth=1) # 使用 depth=1 浅克隆以加快速度
        
        logger.info(f"Analyzing directory: {temp_dir}")
        results, skipped, errors = analyze_directory_heuristically(temp_dir)
        
        elapsed = time.perf_counter() - start
        total_issues = sum(len(r.get("issues", [])) for r in results)
        logger.info(f"/api/import-github-repo completed in {elapsed:.2f}s, repo={repo.url}, total_issues={total_issues}, errors={len(errors)}")
        
        payload = {"results": results, "skipped": skipped}
        if errors:
            payload["errors"] = errors
        return JSONResponse(content=payload)

    except Exception as e:
        logger.exception(f"Failed to process repo: {repo.url}")
        return JSONResponse(status_code=500, content={"success": False, "error": f"克隆或分析仓库失败: {str(e)}"})
    # 注意：temp_dir 将由 background_tasks 自动清理

# --- 新增路由结束 ---


 # ...existing code...
# 允许跨域，方便 dev/preview 或不同端口访问
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# (Logger 已上移)