#!/usr/bin/env python3
"""
最简 RAG 单次补丁生成器（OpenAI 线上模型）
运行后得到 preds.json → 直接 sb-cli 提交
"""
import json, os, glob, tiktoken, argparse
from datasets import load_dataset
from openai import OpenAI
from tqdm import tqdm

client = OpenAI(
    base_url="http://115.190.56.170:8000/v1",  # 注意最后不要漏掉 /v1
    api_key="sk-4duyWc4ZphbqBIAQytlp3A"
)

# ---------- 参数区 ----------
MODEL = "qwen3-coder-480b-a35b-instruct"  # 也可 gpt-4 / gpt-4-turbo
MAX_CODE_TOKENS = 3000  # 控制代码长度，防止超长
MAX_OUTPUT = 1024  # 最大 diff token
TEMPERATURE = 0.2


# ----------------------------

# encoding = tiktoken.encoding_for_model(MODEL)


# def trim_code(code: str, max_tokens: int) -> str:
#     """简单截断，保证不超 token"""
#     tokens = encoding.encode(code)
#     if len(tokens) <= max_tokens:
#         return code
#     return encoding.decode(tokens[:max_tokens])

# 2. 改写 trim_code：按字符截断（保守估算 1 token ≈ 4 字符）
def trim_code(code: str, max_tokens: int) -> str:
    max_chars = max_tokens * 4
    return code[:max_chars]


def read_problem_file(index):
    file_name = fr"D:\JunTuan\project\py_test\swebench使用\swe-bench-verified\commit_target\{index}.py"
    with open(file_name, encoding="utf-8") as f:
        return f.read()


def build_prompt(problem: str, code: str) -> str:
    code = trim_code(code, MAX_CODE_TOKENS)
    return f"""下面是一个 GitHub 问题和相应的源文件代码.
            【问题描述】:
            {problem}
            
            【Source file代码内容】:
            {code}
            
            严格按照如下要求:
            - 根据上面内容请生成一个统一的 git diff 来解决问题。
            - 仅返回 diff 块，以 diff --git 开头.
            """


def call_openai(prompt: str) -> str:
    """

    :rtype: str
    """
    rsp = client.chat.completions.create(
        model=MODEL,
        messages=[{"role": "user", "content": prompt}],
        temperature=TEMPERATURE,
        max_tokens=MAX_CODE_TOKENS,
    )
    return rsp.choices[0].message.content or ""


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-o", "--output", default="preds500.json", help="输出文件名")
    parser.add_argument("-s", "--split", default="test", choices=["dev", "test"])
    args = parser.parse_args()

    ds = load_dataset(
        "parquet",
        data_files={  # 由于无法访问到Hugface，这里加载下载好的文件
            "test": r"D:\JunTuan\zxli26\0303_swebench\swe-bench-verified\data\test-00000-of-00001.parquet"
        }, split="test"
    )
    preds = {}

    index = -1
    for ins in tqdm(ds, desc=f"模型正在答题"):
        try:
            index += 1
            problem = ins["problem_statement"]
            code = read_problem_file(index)
            prompt = build_prompt(problem, code)
            # print(f"【prompt】：{prompt}")
            raw = call_openai(prompt)
            # print(raw)
            # 截取有效 diff
            if "diff --git" in raw:
                diff = "diff --git" + raw.split("diff --git")[1]
                # 去掉末尾可能的 ``` 或解释
                if "\n```" in diff:
                    diff = diff.split("\n```")[0]
            else:
                diff = ""  # 无效补丁
            preds[ins["instance_id"]] = {
                "model_patch": diff,
                "model_name_or_path": MODEL
            }
        except Exception as e:
            print(e)
        # if index >= 5: break

    with open(args.output, "w", encoding="utf-8") as f:
        json.dump(preds, f, indent=2, ensure_ascii=False)
    print(f"✅ 已生成 {len(preds)} 条补丁 → {args.output}")


if __name__ == '__main__':
    main()
