from disassemblyTools import disassemblyTool
from configure import *
import os
from datetime import datetime
from sklearn.model_selection import train_test_split
import random
import shelve
import tlsh
import copy
from tqdm import tqdm
import hashlib
from easySample import easySample
import sys
sys.path.append(r"/home/cyw/projects/function_sim_project/disassemblyTools")


# 切换数据集的时候，需要修改
#   pairInfPath
#   sampleLablesPath
#   family_lable_Path
#   functionSim_predata
#   all_data_path

# 临时增加逻辑，整体的存储逻辑可能不正确，没时间改了
pairName = "pair"
pairSampleAndLableName = "sample_and_lables"

# pairBasePath = r"/home/cyw/projects/function_sim_project/all_data/newPair/"
# pairBasePath = r"/home/cyw/projects/function_sim_project/all_data/newPair2/"
pairBasePath = r"/home/cyw/projects/function_sim_project/all_data/pair_infs_copy/"


# pairInfPath=r"/home/cyw/projects/function_sim_project/all_data/EECG_pair_infs/pair_infs"
pairInfPath = pairBasePath+"{}_infs".format(pairName)
disPairInfPath = r"/home/cyw/projects/function_sim_project/all_data/pair_infs/dis_pair_infs"

# 保存生成的各个数据集的训练对的信息
# sampleLablesPath = "/home/cyw/projects/function_sim_project/all_data/pair_infs/sample_and_lables"
sampleLablesPath = pairBasePath+"{}".format(pairSampleAndLableName)
disSampleLablesPath = "/home/cyw/projects/function_sim_project/all_data/pair_infs/dis_sample_and_lables"


class dataPre():
    """
        生成训练测试集的样本对
    """

    def __init__(self, dataPath=all_data_path, famLablePath=family_lable_Path, logPath=data_Process_logs):
        #   每个样本生成训练队所使用的epoch,epoch越大，数据集越大
        #   为了效率，这里使用都使用1次
        self.train_sample_epoch = 1
        self.test_sample_epoch = 1
        self.valid_sample_epoch = 1
        self.eSample = easySample()

    def get_pair_infs(self, with_Lable=False, selectPath="False"):
        """
            获取之前生成的的训练对信息
            with_Lable
                True：返回(sample1,sample2,flag,sample1_famlable,sample2_famlable)的数组
                False:返回(sample1,sample2,flag)的数组
            selectPath，用于样本对的存放地址
        """
        if selectPath == "False":
            with shelve.open(pairInfPath) as file:
                trainPair = file["trainPair"]
                testPair = file["testPair"]
                validPair = file["validPair"]
            print("样本对地址：{}".format(pairInfPath))
        else:
            with shelve.open(selectPath) as file:
                trainPair = file["trainPair"]
                testPair = file["testPair"]
                validPair = file["validPair"]
            print("样本对地址：{}".format(selectPath))
        res = {}
        res["train"], res["test"], res["valid"] = [], [], []
        for i in range(len(trainPair)):
            temp = trainPair[i][:3] if with_Lable == False else trainPair
            res["train"].append(temp)
        for i in range(len(testPair)):
            temp = testPair[i][:3] if with_Lable == False else testPair
            res["test"].append(temp)
        for i in range(len(validPair)):
            temp = validPair[i][:3] if with_Lable == False else validPair
            res["valid"].append(temp)
        return res

    def generate_pairs(self):
        """
            划分train，test,valid的样本
            同时生成对应集合种的训练对
        """
        self.write_log_file(data_Process_logs, "开始划分训练测试样本")
        if gene_disassemble_pair == False:
            sampleName, sampleLable = self.get_all_used_sample()
        else:
            print("正在生成反汇编实验样本对数据")
            sampleName, sampleLable = self.get_dis_all_used_sample()
        self.write_log_file(data_Process_logs,
                            "各个数据都存在的样本数量为{}".format(len(sampleName)))
        trainName, trainLable, testName, testLable, validName, validLable = self.samples_train_test_split(
            sampleName, sampleLable)

        # 获得各个部分的样本对，按照家族进行分类
        self.write_log_file(data_Process_logs, "生成训练集的样本对")
        trainPair = self.generate_sample_pairs(
            trainName, trainLable, self.train_sample_epoch)
        self.write_log_file(data_Process_logs, "生成测试集的样本对")
        testPair = self.generate_sample_pairs(
            testName, testLable, self.test_sample_epoch)
        self.write_log_file(data_Process_logs, "生成验证集的样本对")
        validPair = self.generate_sample_pairs(
            validName, validLable, self.valid_sample_epoch)

        if gene_disassemble_pair == False:
            with shelve.open(sampleLablesPath) as file:
                file["trainName"], file["trainLable"] = trainName, trainLable
                file["testName"], file["testLable"] = testName, testLable
                file["validName"], file["validLable"] = validName, validLable
            with shelve.open(pairInfPath) as file:
                file["trainPair"] = trainPair
                file["testPair"] = testPair
                file["validPair"] = validPair
        else:
            with shelve.open(disSampleLablesPath) as file:
                file["trainName"], file["trainLable"] = trainName, trainLable
                file["testName"], file["testLable"] = testName, testLable
                file["validName"], file["validLable"] = validName, validLable
            with shelve.open(disPairInfPath) as file:
                file["trainPair"] = trainPair
                file["testPair"] = testPair
                file["validPair"] = validPair
            print("正在保存反汇编实验样本对数据")

    def generate_all_to_all_pairs(self, names, lables, saveName):
        """
            生成一个训练集所有样本间的所有匹配对
            及其对应的ground Truth

            修改，实在是太多了，跑不完，每个家族样本取三个好了
        """
        #   获得dataInf中的样本和家族信息
        # lable_to_name={}
        # name_to_lable={}
        # for i in range(len(names)):
        #     name_to_lable[names[i]]=lables[i]
        # for i in range(len(lables)):
        #     if lables[i] not in lable_to_name:
        #         lable_to_name[lables[i]]=[names[i]]
        #     else:
        #         lable_to_name[lables[i]].append(names[i])
        lable_to_name = {}
        name_to_lable = {}
        for i in range(len(lables)):
            if lables[i] not in lable_to_name:
                lable_to_name[lables[i]] = [names[i]]
            elif len(lable_to_name[lables[i]]) <= 2:
                lable_to_name[lables[i]].append(names[i])
        names = []
        lables = []
        for lable in lable_to_name.keys():
            for name in lable_to_name[lable]:
                name_to_lable[name] = lable
                names.append(name)
                lables.append(lable)

        #   开始生成目标匹配对
        allPairs = []
        lth = len(names)
        for i in tqdm(range(lth)):
            temp = []
            for j in range(lth):
                if i == j:  # 应该是要提出的,不然mpp值.用哈希的方法会变成1
                    continue
                if lables[i] == lables[j]:
                    temp.append((names[i], names[j], 1))
                else:
                    temp.append((names[i], names[j], -1))
            allPairs.append(copy.deepcopy(temp))

        #   保存数据
        with shelve.open(pairBasePath+"{}".format(saveName)) as file:
            file["pairs"] = allPairs
            file["lable_to_name"] = lable_to_name
            file["name_to_lable"] = name_to_lable

    def generate_sample_pairs(self, name, lable, epoch):
        """
            生成用于训练的训练对，epoch表示循环几轮 
            返回结果为(sample1,sample2,flag,sample1_famlable,sample2_famlable)的数组
            flag=1表示同家族，flag=-1表示不同家族
        """
        Lable_to_sample = {}
        LableList = []
        for i in range(len(lable)):
            if lable[i] not in Lable_to_sample:
                Lable_to_sample[lable[i]] = [name[i]]
                LableList.append(lable[i])
            else:
                Lable_to_sample[lable[i]].append(name[i])
        self.write_log_file(data_Process_logs,
                            "   共有{}种家族".format(len(Lable_to_sample)))
        self.write_log_file(data_Process_logs, "   分别为：{}".format(
            str(Lable_to_sample.keys())))
        simPair = []
        disSimPair = []
        equelNum = 0
        while epoch != 0:
            epoch -= 1
            for i in range(len(name)):
                family_lable = lable[i]
                size = len(Lable_to_sample[family_lable])
                LableSize = len(LableList)
                # 添加相似的样本对
                if size == 1:
                    # 该家族没有其它的样本，便自己和自己一对
                    simPair.append(
                        (name[i], name[i], 1, family_lable, family_lable))
                    equelNum += 1
                else:
                    # 该家族有其他的样本，找非自己的其他样本组成一对
                    ind = random.randint(0, size-1)
                    tempName = Lable_to_sample[family_lable][ind]
                    while tempName == name[i]:
                        ind = random.randint(0, size-1)
                        tempName = Lable_to_sample[family_lable][ind]
                    simPair.append(
                        (name[i], tempName, 1, family_lable, family_lable))
                # 添加不相似的样本对
                ind = random.randint(0, LableSize-1)
                tempLable = LableList[ind]
                while tempLable == family_lable:
                    ind = random.randint(0, LableSize-1)
                    tempLable = LableList[ind]
                ind = random.randint(0, len(Lable_to_sample[tempLable])-1)
                tempName = Lable_to_sample[tempLable][ind]
                disSimPair.append(
                    (name[i], tempName, -1, family_lable, tempLable))
        res = simPair
        res.extend(disSimPair)
        self.write_log_file(data_Process_logs, "   生成训练对：{}对".format(len(res)))
        self.write_log_file(data_Process_logs,
                            "   相似对中重复：{}对".format(equelNum))
        return res

    def samples_train_test_split(self, sampleName, sampleLable):
        """
            获得用于train,test,valid阶段的样本名
            train:test:valid=7:1.5:1.5
        """
        X_train, X_test, y_train, y_test = train_test_split(
            sampleName, sampleLable, test_size=0.2, random_state=666)
        X_valid, X_test, y_valid, y_test = train_test_split(
            X_test, y_test, test_size=0.5, random_state=666)
        self.write_log_file(data_Process_logs, "训练集:{}  测试集：{}  验证集：{}".format(
            len(X_train), len(X_test), len(X_valid)))
        return X_train, y_train, X_test, y_test, X_valid, y_valid

    def get_dis_all_used_sample(self):
        """
            随便写了下，不知道对不对


            获得反汇编工具都能预处理的数据
                能够反汇编 、
                样本原数据存在、
                家族标记存在
            返回值：各项数据都存在的数据名称和其家族 res=[],lable=[]
        """
        disTools = disassemblyTool()
        availableName = disTools.get_disassembly_sample()
        trans_md5 = self.get_true_md5()
        tempLable = self.get_samples_fam()
        famLable = {}
        for name in tempLable:
            if (name not in availableName) or (name in trans_md5):
                continue
            lable = tempLable[name]
            famLable[name] = lable
        res = []
        lable = []
        for i in famLable:
            res.append(i)
            lable.append(famLable[i])
        return res, lable

    def get_all_used_sample(self):
        """
            获得各个模型都能预处理的数据
                能够反汇编 、
                样本原数据存在、
                家族标记存在
            返回值：各项数据都存在的数据名称和其家族 res=[],lable=[]
        """
        trans_md5 = self.get_true_md5()
        tempLable = self.get_samples_fam()
        famLable = {}
        modify_Lable_num = 0
        for name in tempLable:
            lable = tempLable[name]
            if name in trans_md5:
                modify_Lable_num += 1
                name = trans_md5[name]
            famLable[name] = lable
        print("修改{}个样本的样本名".format(modify_Lable_num))
        sampleExist = self.get_samples_binary_is_exist()
        disSample = self.get_disassembly_sample()
        res = []
        lable = []
        for i in famLable:
            if i in sampleExist and i in disSample:
                res.append(i)
                lable.append(famLable[i])
        return res, lable

    def get_true_md5(self):
        """
            获得错误md5值和正确md5值之间的映射
        """
        print("正在校验样本md5值与样本名的映射")
        res = {}
        g = os.walk(all_data_path)
        for path, dir_list, file_list in g:
            for file_name in tqdm(file_list):
                # 避免其它中间文件的影响
                if (len(file_name.split(".")) == 1):
                    tar = os.path.join(path, file_name)
                    with open(tar, 'rb') as fp:
                        data = fp.read()
                    file_md5 = hashlib.md5(data).hexdigest()
                    if file_name != file_md5:
                        res[file_md5] = file_name
        return res

    def get_disassembly_sample(self):
        """
            移除节点数大于8000的样本（为了效率）
            将节点数等于0的异常样本移除
            移除radare2不能处理的样本名
            返回剩下的ida处理的样本名
        """
        # ida
        g = os.walk(functionSim_predata)
        cnt = 0
        res = {}
        aNum, bNum = 0, 0
        for path, dir_list, file_list in g:
            for file_name in tqdm(file_list):
                a = file_name.split(".")
                if len(a) == 2 and a[1] == "dir":
                    # 判断该函数是否存在函数结点，不存在结点的函数，保存name，后续查找无结点原因
                    with shelve.open(functionSim_predata+"//"+a[0]) as file:
                        cg = file["cg"]
                        if len(cg) == 0:
                            self.write_log_file(
                                data_Process_logs, "functionSim反汇编的样本{}---函数结点为{}".format(a[0], len(cg)))
                            aNum += 1
                            continue
                        if len(cg) >= 8000:
                            self.write_log_file(
                                data_Process_logs, "functionSim反汇编的样本{}---函数结点为{},过大舍去".format(a[0], len(cg)))
                            bNum += 1
                            continue
                    res[a[0]] = True
        print("ida处理的反汇编样本中,{}个样本不能正常的反汇编,{}个样本结点数量过大舍去".format(aNum, bNum))
        # radare2
        # /home/cyw/projects/function_sim_project/siamese_Graphsage/gene_function_embedding.py 获得嵌入失败的样本名,共12个

        # datas=['742a22390bc72ec38eb18c95446972e8_origin', '2190be4494ab75983c79a1856044e2cf_origin', '974f3efc624ef376d41874f9edd294b2_origin', 'f01afa2e1c0267b2dfb89693a86bb060_origin', '903df3dae25466d58e04caf52df393a9_origin', '78d7fff3916ce40dfe50a50277ff60f8_origin', 'c8e1f8ecb5ba5a0a3c9338c0715abc35_origin', 'bf313223cd0c1bdf938c22eb7344371f_origin', 'b8b4c27058b737bc4149879395bbfa0a_origin', 'ca4812960ec2e217cb8b43f80ba8447b_origin', 'ae0381760bdf4bbcee13c242ce365787_origin', 'f928a7e68d826fce5cc4e87e943341d7_origin']
        datas = []
        remove_num = 0
        for temp in datas:
            name = temp.split("_")[0]
            if name in res:
                del res[name]
                remove_num += 1
        print("移除了{}个radare2工具不能处理的样本".format(remove_num))
        self.write_log_file(data_Process_logs, "反汇编的样本数量为{}".format(len(res)))
        return res

    def get_samples_binary_is_exist(self):
        """
            移除掉样本为空输入的情况,使TLSH模型能正常运行
            返回能正常运行的样本名
        """
        print("检验样本是否为空输入")
        res = {}
        g = os.walk(all_data_path)
        for path, dir_list, file_list in g:
            for file_name in tqdm(file_list):
                if tlsh.hash(open(all_data_path+file_name, 'rb').read()) != "TNULL":
                    res[file_name] = True
        self.write_log_file(data_Process_logs, "二进制样本数量为{}".format(len(res)))
        return res

    def get_samples_fam(self):
        """
            获得样本名及其对应的家族
            剔除样本的错误家族：SINGLETON and -	[]
        """
        res = {}
        with open(family_lable_Path) as file:
            for inf in file.readlines():
                temp = inf.split("	")
                name, lable = temp[0], temp[1].strip()
                if name in res:
                    self.write_log_file(data_Process_logs, "样本重复")
                if lable.split(":")[0] == "SINGLETON":
                    self.write_log_file(data_Process_logs,
                                        "样本家族标记错误 {} {}".format(name, lable))
                    continue
                if lable.split("	")[0] == "-":
                    self.write_log_file(data_Process_logs,
                                        "样本家族标记错误 {} {}".format(name, lable))
                    continue
                res[name] = lable
        self.write_log_file(data_Process_logs, "存在家族的样本数量为{}".format(len(res)))
        return res

    def write_log_file(self, file_name_path, log_str, splitSymbol="   ", print_flag=True):
        if print_flag:
            print(log_str)
        if log_str is None:
            log_str = 'None'
        if os.path.isfile(file_name_path):
            with open(file_name_path, 'a+') as log_file:
                log_file.write(str(datetime.now())+splitSymbol+log_str + '\n')
        else:
            with open(file_name_path, 'w+') as log_file:
                log_file.write(str(datetime.now())+splitSymbol+log_str + '\n')

    def calulate_pair_infs(self, sampleName, sampleLable):
        """
            家族数量、图节点平均大小、样本个数、数量前5的家族名、
        """
        res = {}
        res["famSum"] = 0
        res["sampleNum"] = len(sampleName)
        res["famCount"] = {}
        res["sample2Node"] = {}
        res["avgNode"] = {}
        res["topFam"] = {}
        famDict = {}
        for lable in sampleLable:
            value = famDict.get(lable, 0)
            famDict[lable] = value+1
        res["famSum"] = len(famDict)
        res["famCount"] = famDict
        topFam = sorted(famDict, key=famDict.get, reverse=True)[:5]
        res["topFam"] = topFam
        # 获得结点数量需要反汇编样本才可以
        sample2Node = {}
        nodeSum = 0
        for name in tqdm(sampleName):
            sample = self.eSample.get_sample(name, "functionSim")
            nodeNum = len(sample["adj"])
            sample2Node[name] = nodeNum
            nodeSum += nodeNum
        res["avgNode"] = round(nodeSum*1.0/len(sampleName), 2)
        res["sample2Node"] = sample2Node
        print("样本个数：{}\n家族数量：{}\n图平均结点大小：{}\n数量前5家族名：{}".format(
            res["sampleNum"], res["famSum"], res["avgNode"], res["topFam"]))
        return res

    def show_pair_infs(self):
        """
            展示划分的集合中的各个统计属性
        """
        with shelve.open(sampleLablesPath) as file:
            testName, testLable = file["testName"], file["testLable"]
            validName, validLable = file["validName"], file["validLable"]
            trainName, trainLable = file["trainName"], file["trainLable"]
        ans = {}
        ans["train"] = self.calulate_pair_infs(trainName, trainLable)
        ans["test"] = self.calulate_pair_infs(testName, testLable)
        ans["valid"] = self.calulate_pair_infs(validName, validLable)
        return ans


if __name__ == "__main__":
    dataProcess = dataPre()
    show_detail_inf = False

    if gene_disassemble_pair == False:
        if show_detail_inf == False:
            # dataProcess.generate_pairs()
            with shelve.open(sampleLablesPath) as file:
                trainName, trainLable = file["trainName"], file["trainLable"]
                testName, testLable = file["testName"], file["testLable"]
                validName, validLable = file["validName"], file["validLable"]
            print("正在生成all to all pairs")
            dataProcess.generate_all_to_all_pairs(
                trainName, trainLable, "train")
            dataProcess.generate_all_to_all_pairs(testName, testLable, "test")
            dataProcess.generate_all_to_all_pairs(
                validName, validLable, "valid")
        else:
            dataProcess.show_pair_infs()
    else:
        print("正在生成反汇编实验所需的样本对")
        dataProcess.generate_pairs()
        with shelve.open(disSampleLablesPath) as file:
            trainName, trainLable = file["trainName"], file["trainLable"]
            testName, testLable = file["testName"], file["testLable"]
            validName, validLable = file["validName"], file["validLable"]
        print("正在生成all to all pairs")
        dataProcess.generate_all_to_all_pairs(
            trainName, trainLable, "dis_train")
        dataProcess.generate_all_to_all_pairs(testName, testLable, "dis_test")
        dataProcess.generate_all_to_all_pairs(
            validName, validLable, "dis_valid")
