import os
import pandas as pd
# from sklearn.model_selection import train_test_split

def read_files_and_prepare_datasets(test=False):
    # 获取当前文件夹下的所有 CSV 和 XLSX 文件
    current_dir = os.path.dirname(os.path.abspath(__file__))
    datadir = os.path.join(current_dir, 'SecVulEval')
    files = [f for f in os.listdir(datadir) if f.endswith('.csv') or f.endswith('.xlsx')]

    # 存储所有数据
    all_data = []

    for file in files:
        file_path = os.path.join(datadir, file)
        if file.endswith('.csv'):
            data = pd.read_csv(file_path)
        elif file.endswith('.xlsx'):
            data = pd.read_excel(file_path)

        # 检查是否包含所有必需的列
        required_columns = ['func_body', 'changed_statements', 'cwe_list', 'ds_response7']
        missing_columns = [col for col in required_columns if col not in data.columns]
        if missing_columns:
            print(f"文件 {file} 缺少列: {missing_columns}，已跳过。")
            continue

        # 只保留需要的列
        data = data[required_columns]
        all_data.append(data)

    # 合并所有数据
    combined_data = pd.concat(all_data, ignore_index=True)

    # 确保数据完整性，去除缺失值
    combined_data = combined_data.dropna(subset=['func_body', 'changed_statements', 'cwe_list', 'ds_response7'])

    # 对 func_body 进行预处理，新增一列 processed_func_body
    combined_data['processed_func_body'] = combined_data['func_body'].str.replace(" ", "").str.replace("\\n", "").str.replace("\\t", "").str.replace("\n", "").str.replace("\t", "")

    # 检查 processed_func_body 是否重复并去重
    before_dedup = len(combined_data)
    combined_data = combined_data.drop_duplicates(subset=['processed_func_body'], keep='first')
    after_dedup = len(combined_data)
    print(f"去重前数据量: {before_dedup}，去重后数据量: {after_dedup}")

    # 删除临时列 processed_func_body
    combined_data = combined_data.drop(columns=['processed_func_body'])
    instruction1 = "## 任务\n你是一个漏洞检测助手，任务是检测下面代码中的漏洞，若无漏洞输出无漏洞，否则输出漏洞编号，以及检测到的漏洞描述、漏洞所在的代码片段。你需要先给出分析过程，再输出结果。"
    instruction2 = "## 任务\n你是一个漏洞检测助手，任务是检测下面代码中的漏洞，若无漏洞输出无漏洞，否则输出漏洞可能所在语句以及漏洞编号。"

    all_data1 = combined_data.apply(lambda row: {
        "instruction": instruction1,
        "input": f"## 代码输入\n{row['func_body']}",
        "output": f"## 检测结果\n### 分析过程\n{row['ds_response7']}\n### 漏洞语句\n{row['changed_statements']}\n### 漏洞编号\n{row['cwe_list']}",
        "label": row['cwe_list']
    }, axis=1)
    all_data2 = combined_data.apply(lambda row: {
        "instruction": instruction2,
        "input": f"## 代码输入\n{row['func_body']}",
        #  ## 分析过程\n{row['ds_response7']} \n## 漏洞语句\n{row['changed_statements']}",
        "output": f"## 检测结果\n### 漏洞语句\n{row['changed_statements']}\n### 漏洞编号\n{row['cwe_list']}",
        "label": row['cwe_list']
    }, axis=1)
    # 保存结果到 JSONL 文件，确保中文字符不被转义
    datadir = os.path.join(current_dir, 'data')
    all_data1.to_json(os.path.join(datadir, 'sec1.jsonl'), orient='records', lines=True, force_ascii=False)
    all_data2.to_json(os.path.join(datadir, 'sec2.jsonl'), orient='records', lines=True, force_ascii=False)
    print("数据集已成功生成并保存为指定格式的 JSONL 文件！")


    # 划分训练集和测试集
    # train_data, test_data = train_test_split(combined_data, test_size=0.2, random_state=42)
    # if test:
    #     instruction = "找出下面c/c++程序代码中的漏洞，输出\"错误信息：...\n错误行数：...\nCWE编号：CWE-...\n\""
    #     test = test_data.apply(lambda row: {
    #         "instruction": instruction,
    #         "input": f"代码是：{row['func_body']}",
    #         "output": row['cwe_list']
    #     }, axis=1)
    #     # 保存测试集到 JSONL 文件
    #     datadir = os.path.join(current_dir, '../', 'data')
    #     test.to_json(os.path.join(datadir, 'sec_test.jsonl'), orient='records', lines=True, force_ascii=False)
    #     print("测试集已成功生成并保存为 sec_test.jsonl 文件！")
    # else:
    #     instruction = "你是一个漏洞检测助手，任务是检测下面代码中的漏洞，若无漏洞输出无漏洞，否则输出漏洞编号，以及检测到的漏洞描述、漏洞所在的代码片段。你需要先给出分析过程，再输出结果。"
    #     # 格式化数据为指定格式
    #     train1 = train_data.apply(lambda row: {
    #         "instruction": instruction,
    #         "input": f"代码是：{row['func_body']}",
    #         "output": f"分析过程：{row['ds_response7']}，漏洞语句：{row['changed_statements']}，漏洞编号：{row['cwe_list']}",
    #         "label": row['cwe_list']
    #     }, axis=1)

    #     train2 = train_data.apply(lambda row: {
    #         "instruction": instruction,
    #         "input": f"代码是：{row['func_body']} ，分析过程是：{row['ds_response7']}， 漏洞语句：{row['changed_statements']}",
    #         "output": f"漏洞编号：{row['cwe_list']}",
    #         "label": row['cwe_list']
    #     }, axis=1)

    #     test1 = test_data.apply(lambda row: {
    #         "instruction": instruction,
    #         "input": f"代码是：{row['func_body']}",
    #         "output": f"分析过程：{row['ds_response7']}，漏洞语句：{row['changed_statements']}，漏洞内容：{row['cwe_list']}",
    #         "label": row['cwe_list']
    #     }, axis=1)

    #     test2 = test_data.apply(lambda row: {
    #         "instruction": instruction,
    #         "input": f"代码是：{row['func_body']} ，分析过程是：{row['ds_response7']}， 漏洞语句：{row['changed_statements']}",
    #         "output": f"漏洞内容：{row['cwe_list']}",
    #         "label": row['cwe_list']
    #     }, axis=1)

    #     # 保存结果到 JSONL 文件，确保中文字符不被转义
    #     datadir = os.path.join(current_dir, '../', 'data')
    #     train1.to_json(os.path.join(datadir, 'sec_train1.jsonl'), orient='records', lines=True, force_ascii=False)
    #     train2.to_json(os.path.join(datadir, 'sec_train2.jsonl'), orient='records', lines=True, force_ascii=False)
    #     test1.to_json(os.path.join(datadir, 'sec_test1.jsonl'), orient='records', lines=True, force_ascii=False)
    #     test2.to_json(os.path.join(datadir, 'sec_test2.jsonl'), orient='records', lines=True, force_ascii=False)

    #     print("数据集已成功生成并保存为指定格式的 JSONL 文件！")

if __name__ == "__main__":
    read_files_and_prepare_datasets(True)