import os
import json

import tqdm
import networkx as nx
from sklearn.model_selection import train_test_split
from symbolizer import clean_gadget, tokenize_code_line
from os.path import exists


def get_all_cpg_file_path(cpg_file_root):
    cpg_file_paths = []
    all_files = os.walk(cpg_file_root)

    for root, dirs, files in all_files:
        for file in files:
            if file.endswith(".nx"):
                dot_file_path = os.path.join(root, file)
                cpg_file_paths.append(os.path.abspath(dot_file_path))

    return list(set(cpg_file_paths))

def process_parallel(cpg_file_path):

    try:
        cpg_path = cpg_file_path     # 所生成的CPG图的networkx文件
        cpg = nx.read_gpickle(cpg_path)         # 载入networkx

        for idx, n in enumerate(cpg):           # n: node_id
            if "code_sym_token" in cpg.nodes[n]:        # code token 已经symbol line   code token: {'code_sym_token': ['char', 'VAR1', '[', '100', ']', ';']}
                return
        file_paths = cpg.graph["file_path"]  # 对应的源码文件路径
        code_lines = list()     # 只取在cpg中的源码对应行号

        file_path = file_paths[0]
        with open(file_path, "r", encoding="utf-8", errors="ignore") as f:  # 读取源码
                file_contents = f.readlines()

        # for file_path in file_paths:
        #     with open(file_path, "r", encoding="utf-8", errors="ignore") as f:  # 读取源码
        #         file_content = f.readlines()
        #         file_contents.append(file_content)

        for n in cpg.nodes:               # 只取在cpg中的源码对应行号
            if 'row_index' not in cpg.nodes[n].keys() or cpg.nodes[n]['row_index'] == -1 or cpg.nodes[n]['row_index'] > len(file_contents):
                if 'node_var' in cpg.nodes[n].keys():
                    code_lines.append(cpg.nodes[n]['node_var'])
                else:
                    code_lines.append("")
                continue
            row_index = int(cpg.nodes[n]['row_index'])
            code_lines.append(file_contents[row_index - 1].strip())     # cpg中的行号从1开始，所以要n-1
        sym_code_lines = clean_gadget(code_lines)       # 返回symbol后的字符串列表，每一个字符串对应源代码的一行， 变量归一化到VARi，函数名被归一化到FUNi,
        for idx, n in enumerate(cpg):
            cpg.nodes[n]["code_sym_token"] = tokenize_code_line(sym_code_lines[idx], False)       # 将代码行的字符串拆分成一个个token

        # 将归一化后的CPG替换原始的CPG
        nx.write_gpickle(cpg, cpg_path)
    except:
        print(cpg_path, n)
        exit()


def add_symlines(cpg_file_paths):
    for cpg_file_path in tqdm.tqdm(cpg_file_paths):
        process_parallel(cpg_file_path)


def split_list(cpg_file_paths):
    X_train, X_test = train_test_split(cpg_file_paths, test_size=0.1)
    if not exists(f"{cpg_file_root}"):
        os.makedirs(f"{cpg_file_root}")
    with open(f"{cpg_file_root}/train.json", "w") as f:
        json.dump(X_train, f)
    with open(f"{cpg_file_root}/test.json", "w") as f:
        json.dump(X_test, f)


if __name__ == "__main__":
    cpg_file_root = "/Users/zhang/data/code2graph/python/source-code"
    cpg_file_paths = get_all_cpg_file_path(cpg_file_root)
    add_symlines(cpg_file_paths)       # 对源代码进行符号化（归一化），再写回networkx
    split_list(cpg_file_paths)     # 随机 9:1 分train, test ，并把各自的CPG file path 写入到对应的json文件中。