# 通过
import json
import random
# 确保目录存在
import os

from utils.code_file_check import is_code_file
from utils.diff_utils import extract_comment_diff_snippet, extract_random_diff_snippet, extract_refinement_code_snippet, \
    extract_random_code_snippet


def analyze_pr_need_check_data(refinement_jsonl_file, commit_comment_jsonl_file):
    need_return_data = []
    need_check_count = 0

    # 读取PRRefinement数据
    refinement_data = []

    # 统计PRRefinement涉及的PR数量（去重）
    pr_numbers_in_refinement = set()
    pr_number_used_file = {}

    try:
        with open(refinement_jsonl_file, 'r', encoding='utf-8') as f:
            for line in f:
                refinement_data.append(json.loads(line.strip()))
                temp_refinement = json.loads(line.strip())

                temp_refinement['need_check'] = True
                temp_refinement['repo_name'] = REPO
                temp_diff_snippet = extract_comment_diff_snippet(temp_refinement)
                if temp_diff_snippet is None:
                     print(f"{temp_refinement['pr_number']}没有找到对应的diff_comment")
                     continue
                temp_refinement['diff_snippet'] = temp_diff_snippet

                temp_comment=temp_refinement['diff_comment']['body']
                temp_refinement['comment']=temp_comment

                original_code, before_code, after_code = extract_refinement_code_snippet(temp_refinement)
                if original_code is None and before_code is None and after_code is None:
                    print(f"{temp_refinement['pr_number']}没有找到对应的extract_refinement_code_snippet")
                    continue
                temp_refinement['original_code'] = original_code
                temp_refinement['before_code'] = before_code
                temp_refinement['after_code'] = after_code
                need_return_data.append(temp_refinement)
                need_check_count += 1

                pr_number = temp_refinement.get('pr_number')
                if pr_number:
                    pr_numbers_in_refinement.add(pr_number)
                    if pr_number not in pr_number_used_file:
                        pr_number_used_file[pr_number] = []

                    before_file_name = temp_refinement.get('before_file', {}).get('filename')
                    after_file_name = temp_refinement.get('after_file', {}).get('filename')
                    temp_used_dict = {"before_file": before_file_name, "after_file": after_file_name}
                    pr_number_used_file[pr_number].append(temp_used_dict)
        print(f"成功读取 {len(refinement_data)} 个PRRefinement数据")
    except Exception as e:
        print(f"读取PRRefinement数据失败: {e}")
        return

    # 读取PR详细数据
    pr_data = []
    no_need_check_count = 0
    no_need_check_data = []
    count = 0
    try:
        with open(commit_comment_jsonl_file, 'r', encoding='utf-8') as f:
            for line in f:
                temp_pr = json.loads(line.strip())
                if temp_pr.get('number') in pr_numbers_in_refinement:
                    pr_data.append(temp_pr)
                    used_file = pr_number_used_file.get(temp_pr.get('number'))
                    temp_pr_files = temp_pr.get('pr_files')
                    count += len(temp_pr_files)
                    for temp_pr_file in temp_pr_files:
                        temp_pr_file_before = temp_pr_file.get('patch').get('old_path')
                        temp_pr_file_after = temp_pr_file.get('patch').get('new_path')
                        is_used = False
                        for temp_used_file in used_file:
                            if temp_used_file.get("before_file") == temp_pr_file_before and temp_used_file.get(
                                    "after_file") == temp_pr_file_after:
                                is_used = True
                                temp_pr_file.get('patch')['is_used'] = True
                                break

                        if is_used == False and temp_pr_file.get('patch').get('is_used') != True and is_code_file(
                                temp_pr_file_before) and is_code_file(temp_pr_file_after):
                            temp_pr_file['need_check'] = False
                            temp_diff_snippet = extract_random_diff_snippet(temp_pr_file.get('patch').get('diff'))
                            before_code,after_code = extract_random_code_snippet(temp_pr_file.get('patch').get('diff'))
                            if temp_diff_snippet=="":
                                continue
                            temp_re_obj = {
                                "pr_number": temp_pr.get('number'),
                                "need_check": False,
                                "repo_name": REPO,
                                "diff_comment": None,
                                "before_code": before_code,
                                "after_code": after_code,
                                "before_file": {
                                    "filename": temp_pr_file_before,
                                    "patch": temp_pr_file.get('patch').get('diff')
                                },
                                "after_file": {
                                    "filename": temp_pr_file_after,
                                    "patch": temp_pr_file.get('patch').get('diff')
                                },
                                'diff_snippet': temp_diff_snippet
                            }
                            temp_pr_file.get('patch')['is_used'] = True
                            no_need_check_data.append(temp_re_obj)
                            no_need_check_count += 1

        print(f"{count} 个PR涉及的文件, 其中不需要check的数量: {no_need_check_count}")
        # 从no_need_check_data中随机选取和需要check的数据数量相同的数据，随机选取哈，然后添加到need_return_data中
        if len(no_need_check_data) > need_check_count:
            selected_no_need_check_data = random.sample(no_need_check_data, need_check_count)
        else:
            selected_no_need_check_data = no_need_check_data
        need_return_data.extend(selected_no_need_check_data)

    except Exception as e:
        print(f"读取PR详细数据失败: {e}")
        return

    return need_return_data, need_check_count



REPO_List = [
    "account_os_account",
    "arkui_ace_engine",
    "build",
    "communication_wifi",
    "developtools_ace_ets2bundle",
    "multimedia_audio_framework",
    "web_webview",
    "xts_acts",
    "kernel_linux_5.10"
]
pr_count = 0
refinement_count = 0
refinement_count_dict = {}

# 用于存储所有数据的列表
all_check_data = []

for repo in REPO_List:
    print("========" * 20)
    print(repo)
    # 替换为你要查询的仓库所有者和仓库名
    OWNER = "openharmony"
    REPO = repo

    # 输出文件名
    PR_JSONL_FILE = f"../pr_data/{REPO}/{OWNER}_{REPO}_prs.jsonl"
    PR_ISSUE_EXCEL_FILE = f"../pr_data/{REPO}/{OWNER}_{REPO}_issues_linked_to_prs.xlsx"
    PR_COMMIT_COMMENT_JSONL_FILE = f"../pr_data/{REPO}/{OWNER}_{REPO}_pr_commit_comment_details_with_files.jsonl"
    PR_Refinement_JSONL_FILE = f"../pr_data/{REPO}/{OWNER}_{REPO}_pr_refinement_code.jsonl"
    
    # 为每个仓库定义单独的输出文件
    REPO_OUTPUT_FILE = f"./RQ2/{REPO}/{OWNER}_{REPO}_check_data.jsonl"

    check_data, need_check_count = analyze_pr_need_check_data(PR_Refinement_JSONL_FILE, PR_COMMIT_COMMENT_JSONL_FILE)
    print(
        f"check_data数量: {len(check_data)}, 其中need_check数量: {need_check_count},其中不需要check数量: {len(check_data) - need_check_count}")
    
    # 将当前仓库的数据写入单独的jsonl文件
    try:
        repo_output_dir = os.path.dirname(REPO_OUTPUT_FILE)
        if not os.path.exists(repo_output_dir):
            os.makedirs(repo_output_dir)
        
        with open(REPO_OUTPUT_FILE, 'w', encoding='utf-8') as f:
            for item in check_data:
                f.write(json.dumps(item, ensure_ascii=False) + '\n')
        print(f"已将 {len(check_data)} 条数据写入 {REPO_OUTPUT_FILE}")
    except Exception as e:
        print(f"写入 {REPO_OUTPUT_FILE} 失败: {e}")
    
    # 将数据添加到总列表中
    all_check_data.extend(check_data)

# 将所有数据写入一个大的jsonl文件
ALL_DATA_OUTPUT_FILE = "./RQ2/all_repos_check_data.jsonl"
try:
    # 确保目录存在
    import os
    all_data_output_dir = os.path.dirname(ALL_DATA_OUTPUT_FILE)
    if not os.path.exists(all_data_output_dir):
        os.makedirs(all_data_output_dir)
        
    with open(ALL_DATA_OUTPUT_FILE, 'w', encoding='utf-8') as f:
        for item in all_check_data:
            f.write(json.dumps(item, ensure_ascii=False) + '\n')
    print(f"已将 {len(all_check_data)} 条数据写入 {ALL_DATA_OUTPUT_FILE}")
except Exception as e:
    print(f"写入 {ALL_DATA_OUTPUT_FILE} 失败: {e}")

# 按8:2分成训练集和测试集
random.shuffle(all_check_data)
split_index = int(len(all_check_data) * 0.8)

train_data = all_check_data[:split_index]
test_data = all_check_data[split_index:]

# 写入训练集
TRAIN_DATA_OUTPUT_FILE = "./RQ2/train_data.jsonl"
try:
    # 确保目录存在
    import os
    train_data_output_dir = os.path.dirname(TRAIN_DATA_OUTPUT_FILE)
    if not os.path.exists(train_data_output_dir):
        os.makedirs(train_data_output_dir)
        
    with open(TRAIN_DATA_OUTPUT_FILE, 'w', encoding='utf-8') as f:
        for item in train_data:
            f.write(json.dumps(item, ensure_ascii=False) + '\n')
    print(f"已将 {len(train_data)} 条数据写入 {TRAIN_DATA_OUTPUT_FILE}")
except Exception as e:
    print(f"写入 {TRAIN_DATA_OUTPUT_FILE} 失败: {e}")

# 写入测试集
TEST_DATA_OUTPUT_FILE = "./RQ2/test_data.jsonl"
try:
    # 确保目录存在
    import os
    test_data_output_dir = os.path.dirname(TEST_DATA_OUTPUT_FILE)
    if not os.path.exists(test_data_output_dir):
        os.makedirs(test_data_output_dir)
        
    with open(TEST_DATA_OUTPUT_FILE, 'w', encoding='utf-8') as f:
        for item in test_data:
            f.write(json.dumps(item, ensure_ascii=False) + '\n')
    print(f"已将 {len(test_data)} 条数据写入 {TEST_DATA_OUTPUT_FILE}")
except Exception as e:
    print(f"写入 {TEST_DATA_OUTPUT_FILE} 失败: {e}")
