import os
import re
from crewai import Crew, Agent, Task
try:
    from crewai import LLM  # Preferred wrapper that uses litellm under the hood
except Exception:
    LLM = None  # type: ignore
try:
    from crewai import Process  # optional across versions
except Exception:  # pragma: no cover
    Process = None  # type: ignore
from typing import List, Dict, Any

# 你可以根据实际 agents.yaml/tasks.yaml 结构调整
# 这里假设直接用 Python API 构建 agent/task

def _build_llm():
    """Build a CrewAI LLM using ollama/<model> so litellm knows the provider."""
    base_url = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
    model_name = os.getenv("OLLAMA_MODEL", "llama3.1")
    temperature = float(os.getenv("CREWAI_LLM_TEMPERATURE", "0.2"))
    # Prefer CrewAI's LLM wrapper if available
    if LLM is not None:
        # Try base_url first
        try:
            return LLM(model=f"ollama/{model_name}", base_url=base_url, temperature=temperature)
        except Exception:
            # Some versions expect 'api_base' instead
            try:
                return LLM(model=f"ollama/{model_name}", api_base=base_url, temperature=temperature)  # type: ignore
            except Exception:
                return None
    # Fallback: return None so caller can decide next step
    return None


def _normalize_report_markdown(text: str) -> str:
    """Normalize AI report to clean bullets like '' and bold markers into Markdown.
    - Convert leading weird bullets to '- '
    - Convert lines like *标题** or **标题** to '## 标题'
    - Promote common section titles to headings
    - Prefix '问题:'/'建议:' lines with '- '
    """
    bullet_chars = "\u2022\u00B7\u2219\u25CF\u25E6\u2043\u30FB\u2027\u25AA\u25AB\u25A0\u25A1\u204C\u204D"
    section_titles = {"代码检验报告", "摘要", "问题清单", "影响评估", "修复建议", "结论"}
    out = []
    prev_blank = True
    for raw in text.splitlines():
        s = raw.rstrip()
        # strip leading weird bullets and spaces
        s = re.sub(rf"^\s*([{bullet_chars}]+)\s*", "", s)
        # Heading patterns: **标题** or *标题**
        m = re.match(r"^\*+\s*(.+?)\s*\*+\s*$", s)
        if m:
            s = f"## {m.group(1).strip()}"
        elif s in section_titles:
            s = f"## {s}"
        # Promote Issue/Suggestion lines to bullet items
        s = re.sub(r"^(问题|建议)\s*[:：]", r"- \1:", s)
        # Normalize standalone bullet-like starts (e.g., '*' at line start)
        s = re.sub(r"^\s*\*\s+", "- ", s)
        # Ensure blank line before headings for markdown
        if s.startswith("#") and not prev_blank:
            out.append("")
        out.append(s)
        prev_blank = (s.strip() == "")
    return "\n".join(out)


def _build_local_report(files: Dict[str, str], issues: List[Dict[str, Any]] | List[Any], hint: str | None = None) -> str:
    buf: List[str] = []
    buf.extend(["代码检验报告（本地回退生成）", "", "一、扫描文件", ""])
    for name in files.keys():
        buf.append(f"- {name}")
    buf.append("")
    buf.append("二、问题清单")
    if not issues:
        buf.append("- 未发现问题")
    else:
        for idx, issue in enumerate(issues, 1):
            if isinstance(issue, dict):
                buf.append(f"{idx}. 文件: {issue.get('file','?')} 行: {issue.get('line','?')} 问题: {issue.get('message','')} 建议: {issue.get('suggestion','无')}")
            else:
                buf.append(f"{idx}. {str(issue)}")
    if hint:
        buf.append("")
        buf.append(f"[提示] {hint}")
    return _normalize_report_markdown("\n".join(buf))


def generate_validation_report(files: Dict[str, str], issues: List[Dict[str, Any]] | List[Any]) -> str:
    """
    使用 CrewAI 生成代码检验报告
    Args:
        files: {filename: code_content}
        issues: [{file, line, message, suggestion}]
    Returns:
        report: str
    """
    # 构建 agent
    llm = _build_llm()
    # 若本地 Ollama 不可用且未提供 OPENAI_API_KEY，则直接走本地回退，避免 litellm 认证错误
    if llm is None and not os.getenv("OPENAI_API_KEY"):
        return _build_local_report(files, issues, hint="未检测到本地 Ollama (或连接失败)，且未设置 OPENAI_API_KEY，跳过 CrewAI 生成。")
    agent = Agent(
        role="代码检验专家",
        goal="根据代码和问题清单，生成详细的检验报告，包括每个问题的分析和建议。",
        backstory="你是资深代码审查员，擅长发现代码中的潜在问题并给出改进建议。",
        verbose=True,
        llm=llm,
    )
    # 构建任务上下文文本
    context = """\n代码文件列表：\n"""
    for fname, content in files.items():
        context += f"- {fname} (内容略)\n"
    context += "\n问题清单：\n"
    for idx, issue in enumerate(issues or [], 1):
        if isinstance(issue, dict):
            file = issue.get('file', '未知文件')
            line = issue.get('line', '未知')
            message = issue.get('message', '')
            suggestion = issue.get('suggestion', '无')
            context += f"{idx}. 文件: {file}, 行: {line}, 问题: {message}, 建议: {suggestion}\n"
        else:
            # 回退：issue 不是字典时，按纯文本记录
            context += f"{idx}. {str(issue)}\n"
    context += "\n请根据上述内容，生成一份详细的代码检验报告，逐条分析每个问题，并给出改进建议。报告需结构清晰，便于开发者理解和修复。"

    # 将上下文直接并入 description，避免某些 CrewAI 版本对 context 字段的严格校验
    full_description = (
        "生成代码检验报告。\n\n" + context +
        "\n\n输出要求：结构清晰（可分节：摘要/问题清单/影响评估/修复建议/结论），对每个问题进行具体分析与可操作建议。"
    )

    try:
        task = Task(
            description=full_description,
            agent=agent,
            expected_output="一份结构化、详细的代码检验报告，逐条分析问题并提供可操作建议"
        )
        # 创建 Crew，若不支持 process 参数则回退
        try:
            if Process and hasattr(Process, "sequential"):
                crew = Crew(
                    agents=[agent],
                    tasks=[task],
                    process=Process.sequential,
                    verbose=True,
                )
            else:
                crew = Crew(
                    agents=[agent],
                    tasks=[task],
                    verbose=True,
                )
        except TypeError:
            crew = Crew(
                agents=[agent],
                tasks=[task],
                verbose=True,
            )
        # 运行 Crew：优先使用 kickoff，否则回退 run
        if hasattr(crew, "kickoff"):
            result = crew.kickoff()
        else:
            result = crew.run()
        # 兼容不同版本返回值，将结果转为字符串
        if isinstance(result, str):
            return _normalize_report_markdown(result)
        if hasattr(result, 'final_output') and isinstance(result.final_output, str):
            return _normalize_report_markdown(result.final_output)
        if hasattr(result, 'raw') and isinstance(result.raw, str):
            return _normalize_report_markdown(result.raw)
        return _normalize_report_markdown(str(result))
    except Exception as e:
        # 回退：若 CrewAI 执行失败，生成本地简版报告，避免前端看到报错
        return _build_local_report(files, issues, hint=f"CrewAI 执行失败，已使用本地规则回退。原因: {e}")
