#!/usr/bin/env python3
"""
最简 RAG 单次补丁生成器（OpenAI 线上模型）
运行后得到 preds.json → 直接 sb-cli 提交
"""
import json, os, glob, tiktoken, tqdm, argparse
from datasets import load_dataset
from openai import OpenAI

client = OpenAI()

# ---------- 参数区 ----------
MODEL = "gpt-3.5-turbo"  # 也可 gpt-4 / gpt-4-turbo
MAX_CODE_TOKENS = 3000  # 控制代码长度，防止超长
MAX_OUTPUT = 1024  # 最大 diff token
TEMP = 0.2
# ----------------------------

encoding = tiktoken.encoding_for_model(MODEL)


def trim_code(code: str, max_tokens: int) -> str:
    """简单截断，保证不超 token"""
    tokens = encoding.encode(code)
    if len(tokens) <= max_tokens:
        return code
    return encoding.decode(tokens[:max_tokens])


def read_problem_file(instance):
    """把 patch 里第一个文件整段读出"""
    repo = instance["repo"].replace("/", "@")
    patch_path = instance["patch"].split(" a/")[1].split(" b/")[0]
    candidates = glob.glob(f"repos/{repo}/**/{patch_path}", recursive=True)
    if not candidates:
        return ""
    with open(candidates[0], encoding="utf-8") as f:
        return f.read()


def build_prompt(problem: str, code: str) -> str:
    code = trim_code(code, MAX_CODE_TOKENS)
    return f"""Below is a GitHub issue and the corresponding source file.
            Issue:
            {problem}
            
            Source file:
            {code}
            Please generate a unified git diff that fixes the issue.
            Return only the diff block, starting with diff --git.
            """


def call_openai(prompt: str) -> str:
    rsp = client.chat.completions.create(
        model=MODEL,
        messages=[{"role": "user", "content": prompt}],
        temperature=TEMPERATURE,
        max_tokens=MAX_OUTPUT_TOKENS,
    )
    return rsp.choices[0].message.content or ""


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-o", "--output", default="preds.json", help="输出文件名")
    parser.add_argument("-s", "--split", default="test", choices=["dev", "test"])
    args = parser.parse_args()

    ds = load_dataset("princeton-nlp/SWE-bench_Verified", split=args.split)
    preds = {}

    for ins in tqdm(ds, desc="Generating patches"):
        problem = ins["problem_statement"]
        code = read_problem_file(ins)
        prompt = build_prompt(problem, code)
        raw = call_openai(prompt)

        # 截取有效 diff
        if "diff --git" in raw:
            diff = "diff --git" + raw.split("diff --git")[1]
            # 去掉末尾可能的 ``` 或解释
            if "\n```" in diff:
                diff = diff.split("\n```")[0]
        else:
            diff = ""  # 无效补丁
        preds[ins["instance_id"]] = {
            "model_patch": diff,
            "model_name_or_path": MODEL
        }

    with open(args.output, "w", encoding="utf-8") as f:
        json.dump(preds, f, indent=2, ensure_ascii=False)
    print(f"✅ 已生成 {len(preds)} 条补丁 → {args.output}")


if __name__ == '__main__':
    main()
