# 本文件主要用于模型样本的可视化分析，粒度在单个汇编语句形成的结点上
import os
import sys
import torch
import nn.api as nn_api
import nn.simplest_gcn as simplest_gcn
import nn.transformer as transformer
import nn.lstm as lstm
from tasks.bvd.graph_class import *
from tasks.bvd.graph_constructor import json_constructor
import pickle
from .dataset_class import TransformerDataset, GCNDataset, LSTMDataset
import numpy as np
from data.api import random_get_samples, init_log, get_all_samples
from torch_geometric.data import Data,Batch
import time
from .vis_class import asm_node
import matplotlib

# mod_type = 'GCN'
# if mod_type == 'GCN':
#     from torch_geometric.loader import DataLoader # only gcn or graph mod
# else :
#     from torch.utils.data import DataLoader


# 宏变量声明
# GCN
# dataset_save_pkl = r'preprocessed_data/vis/cwe121_rearranged_random.pkl'
# dataset_dir = r'preprocessed_data/cfg_transferred/CWE121_Stack_Based_Buffer_Overflow'
# nn_file = r'mods/simplest_v1.2.pt'
# vis_file = r'preprocessed_data/vis/simplest_vis.pkl'
# vis_output = r'preprocessed_data/vis/simplest_vis_output.pkl'
# pkl_random_savefile = r'preprocessed_data/vis/cfg_graph_rearranged_random.pkl'
# log_dir = r'logs/visual_log.txt'

# GCN newest
# dataset_save_pkl = r'preprocessed_data/vis/cwe121_v1.0_o0_newest.pkl'
# dataset_dir = r'preprocessed_data/CWE121_Stack_Based_Buffer_Overflow_O0_filtered_newest20241110'
# nn_file = r'mods/simplest_v1.0_newest.pt'
# vis_file = r'preprocessed_data/vis/simplest_vis_newest.pkl'
# vis_output = r'preprocessed_data/vis/simplest_vis_output_newest.pkl'
# pkl_savefile = r'preprocessed_data/vis/cfg_graph_o0_newest.pkl'
# log_dir = r'logs/visual_log_newest.txt'
# sample_file_list=r'preprocessed_data/vis/sample_file_list_newest.json'

# addon dataset
dataset_save_pkl = r'preprocessed_data/addon_test/addon_vis/vis_save_pkl/' # NOTE: NOT SAME AS RUN
dataset_dir = r'preprocessed_data/addon_test/addon_juliet_dataset/'
nn_file = r'mods/addon_test/'
vis_file = r'preprocessed_data/addon_test/addon_vis/vis_file/'
vis_output = r'preprocessed_data/addon_test/addon_vis/vis_output/'
pkl_savefile = r'preprocessed_data/addon_test/addon_vis/pkl_save_file/'
log_dir = r'logs/addon_test/'
sample_file_list=r'preprocessed_data/addon_test/addon_vis/sample_file_list/'


# OLD FUNCTIONS
def visual_multiple_mod(if_alldataset=False):
    global dataset_save_pkl, dataset_dir, nn_file, vis_file, vis_output, pkl_savefile, log_dir, sample_file_list

    mod_raw_ls = list(os.listdir(nn_file))
    mod_ls = []
    for mod in mod_raw_ls:
        if 'backup' in mod:
            continue
        mod_ls.append(mod)

    dataset_dir_ls = [dataset_dir + dataset.split('.pt')[0] for dataset in mod_ls]
    dataset_save_pkl_ls = [dataset_save_pkl + dataset.split('.pt')[0] + '.pkl' for dataset in mod_ls]
    nn_file_ls = [nn_file + dataset.split('.pt')[0] + '.pt' for dataset in mod_ls]
    pkl_savefile_ls = [pkl_savefile + dataset.split('.pt')[0] + '_cfg.pkl' for dataset in mod_ls]
    log_dir_ls = [log_dir + dataset.split('.pt')[0] + '_log.txt' for dataset in mod_ls]
    vis_file_ls = [vis_file + dataset.split('.pt')[0] + '.pkl' for dataset in mod_ls]
    vis_output_ls = [vis_output + dataset.split('.pt')[0] + '_output.pkl' for dataset in mod_ls]
    sample_file_list_ls = [sample_file_list + dataset.split('.pt')[0] + '.json' for dataset in mod_ls]

    for i in range(0, len(dataset_dir_ls)):
        dataset_save_pkl = dataset_save_pkl_ls[i]
        dataset_dir = dataset_dir_ls[i]
        nn_file = nn_file_ls[i]
        vis_file = vis_file_ls[i]
        vis_output = vis_output_ls[i]
        pkl_savefile = pkl_savefile_ls[i]
        log_dir = log_dir_ls[i]
        sample_file_list = sample_file_list_ls[i]
        visual_single_mod()
        analyse_single_pkl()

def visual_single_mod(each_sample_num=3):
    # init_log(log_dir)
    random_files = random_get_samples(dataset_dir,each_sample_num=each_sample_num,sample_file_list=sample_file_list)

    objects=[]
    
    before_io = time.time()
    if os.path.exists(pkl_savefile):
        with open(pkl_savefile, 'rb') as f:
            objects = pickle.load(f)
    else:
        for files,label in random_files:
            cfg=files[1]
            factory = json_constructor(cfg, 'cfg')
            objects.append((factory.construct_by_inst(),label))

        with open(pkl_savefile, 'wb') as f:
            pickle.dump(objects ,f)
    after_io = time.time()
    print('load json total time (seconds):', after_io - before_io) 

    # 已经假设每个节点的顺序和angr结果的顺序相同，待检查
    dataset = GCNDataset(objects, dataset_save_pkl, renew=True)
    dataloader = DataLoader(dataset, batch_size=1, shuffle=False)
    model = simplest_gcn.GCNSampleVisual(nn_file, vis_file)
    nn_api.eval_model(model, dataloader, device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu"))

def analyse_single_pkl(target_dir=None):
    # TODO 处理对每个节点生成完权重后的中间文件 并 生成热力图或其他表征方式等（目前仅实现了pkl存储）
    f = open(vis_file, 'rb')
    file_dict = pickle.load(f)
    for file_name, vis_info in file_dict.items():
        why_0 = [item[0][0] for item in vis_info]
        why_0_indices = sorted(range(len(why_0)), key=lambda i: why_0[i],reverse=True)
        why_0_data = [vis_info[i] for i in why_0_indices]
        why_1 = [item[0][1] for item in vis_info]
        why_1_indices = sorted(range(len(why_0)), key=lambda i: why_1[i],reverse=True)
        why_1_data = [vis_info[i] for i in why_1_indices]  

        if os.path.exists(vis_output):
            output_f = open(vis_output, 'rb')
            dict_why = pickle.load(output_f)
            dict_why[file_name] = {'0':why_0_data,'1':why_1_data}
            output_f.close()
            output_f = open(vis_output, 'wb')
            pickle.dump(dict_why, output_f)
        else:
            output_f = open(vis_output, 'wb')
            dict_why = dict()
            dict_why[file_name] = {'0':why_0_data,'1':why_1_data}
            pickle.dump(dict_why, output_f)

        
        #TODO topk

# NEW API FUNCTIONS
def visual_gcn_mod(dataset_dir, dataset_class_save_file, graph_save_file, model_dir, vis_info_dir, vis_node_con_dir, log_file, sample_num, sample_file_list, vis_json_output_dir):

    # 1. visual_single_mod

    init_log(log_file)
    if sample_num is not None:
        random_files = random_get_samples(dataset_dir, sample_num, sample_file_list)
    else:
        random_files = get_all_samples(dataset_dir, sample_file_list)
    objects = []
    
    before_io = time.time()
    if os.path.exists(graph_save_file):
        with open(graph_save_file, 'rb') as f:
            objects = pickle.load(f)
    else:
        for files,label in random_files:
            cfg=files[1]
            factory = json_constructor(cfg, 'cfg')
            objects.append((factory.construct_by_inst(),label))

        with open(graph_save_file, 'wb') as f:
            pickle.dump(objects ,f)
    after_io = time.time()
    print('load json total time (seconds):', after_io - before_io) 

    # 已经假设每个节点的顺序和angr结果的顺序相同，待检查
    dataset = GCNDataset(objects, dataset_class_save_file, renew=True) # NOTE：renew是为了应对samples可能会变的情况，如需要提高运行效率可以删去renew
    # dataset = GCNDataset(objects, dataset_class_save_file)
    from torch_geometric.loader import DataLoader
    dataloader = DataLoader(dataset, batch_size=1, shuffle=False)
    model = simplest_gcn.GCNSampleVisual(model_dir, vis_info_dir)
    nn_api.eval_model(model, dataloader, device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu"))


    # 2. analyse_single_pkl
    for vis_info_file in os.listdir(vis_info_dir):
        vis_node_con_file = os.path.join(vis_node_con_dir, vis_info_file)
        if os.path.exists(vis_node_con_file):
            continue
        f = open(os.path.join(vis_info_dir, vis_info_file), 'rb')
        file_dict = pickle.load(f)
        for file_name, vis_info in file_dict.items():
            why_0 = [item[0][0] for item in vis_info]
            why_0_indices = sorted(range(len(why_0)), key=lambda i: why_0[i],reverse=True)
            why_0_data = [vis_info[i] for i in why_0_indices]
            why_1 = [item[0][1] for item in vis_info]
            why_1_indices = sorted(range(len(why_0)), key=lambda i: why_1[i],reverse=True)
            why_1_data = [vis_info[i] for i in why_1_indices]  

            if os.path.exists(vis_node_con_file):
                output_f = open(vis_output, 'rb')
                dict_why = pickle.load(output_f)
                dict_why[file_name] = {'0':why_0_data,'1':why_1_data}
                output_f.close()
                output_f = open(vis_node_con_file, 'wb')
                pickle.dump(dict_why, output_f)
                # continue
            else:
                output_f = open(vis_node_con_file, 'wb')
                dict_why = dict()
                dict_why[file_name] = {'0':why_0_data,'1':why_1_data}
                pickle.dump(dict_why, output_f)

    # 3. transfer pkl to json
    for vis_node_con_file in os.listdir(vis_node_con_dir):
        pkl_file = open(os.path.join(vis_node_con_dir, vis_node_con_file), 'rb')
        vis_output = pickle.load(pkl_file)
        # 每个vis文件分析结果都单独保存为两个部分，0和1
        for filename in vis_output.keys():
            output_file_name = filename.replace('.pkl', '.json')
            file_route = filename.replace('.pkl', '.json').split('@')[1:]
            file_route = '/'.join(file_route)
            file_path = os.path.join(dataset_dir, file_route)
            print(file_path, output_file_name)
            output_file_path = os.path.join(vis_json_output_dir, output_file_name)
            if os.path.exists(output_file_path):
                continue

            visinfo = vis_output[filename]
            json_file = open(file_path, 'r')
            json_load = json.load(json_file)
            json_nodes = json_load["nodes"]
             # 构建每个汇编节点的信息，建立汇编节点地址到汇编节点信息的映射字典
            dict_asmaddr_2_asminfo = {}
            for func in json_nodes:
                idx = 0
                for asm in func["disasm"]:
                    node = asm_node(func["id"], func["addr"], func["function_name"], idx, asm["address"], asm["mnemonic"], asm["op_str"], 0, 0)
                    idx += 1
                    dict_asmaddr_2_asminfo[asm["address"]] = node

            # 基于pkl决策影响和json汇编信息建立映射关系，并存到文件里，这里分0/1
            output_ls_0 = []
            for vis0 in visinfo["0"]:
                target_addr = vis0[1]
                if target_addr == -1:
                    continue
                dict_asmaddr_2_asminfo[target_addr].ctb_0 = float(vis0[0][0][0].item())
                dict_asmaddr_2_asminfo[target_addr].ctb_1 = float(vis0[0][0][1].item())
                output_ls_0.append(dict_asmaddr_2_asminfo[target_addr].to_json())

            output_ls_1 = []
            for vis1 in visinfo["1"]:
                target_addr = vis1[1]
                if target_addr == -1:
                    continue
                dict_asmaddr_2_asminfo[target_addr].ctb_0 = float(vis1[0][0][0].item())
                dict_asmaddr_2_asminfo[target_addr].ctb_1 = float(vis1[0][0][1].item())
                output_ls_1.append(dict_asmaddr_2_asminfo[target_addr].to_json())

            output_f = open(output_file_path, 'w')
            json.dump({'0':output_ls_0,'1':output_ls_1}, output_f)
            output_f.close()


def visual_transformer_mod(dataset_dir, dataset_class_save_file, graph_save_file, model_dir, vis_info_dir, vis_node_con_dir, log_file, sample_num, sample_file_list, vis_json_output_dir):
    # 1. visual_single_mod
    # init_log(log_file)
    if sample_num is not None:
        random_files = random_get_samples(dataset_dir, sample_num, sample_file_list)
    else:
        random_files = get_all_samples(dataset_dir, sample_file_list)
    objects=[]

    before_io = time.time()
    if os.path.exists(graph_save_file):
        with open(graph_save_file, 'rb') as f:
            objects = pickle.load(f)
    else:
        for files,label in random_files:
            cfg=files[1]
            factory = json_constructor(cfg, 'cfg')
            objects.append((factory.construct_by_inst(),label))

        with open(graph_save_file, 'wb') as f:
            pickle.dump(objects ,f)
    after_io = time.time()
    print('load json total time (seconds):', after_io - before_io)
    
    dataset = TransformerDataset(objects, dataset_class_save_file, renew=True)
    from torch.utils.data import DataLoader
    def collate_fn(batch):
        batch_size = len(batch)
        batch_data = Batch()
        batch_data.y = torch.tensor([data.y for data in batch])
        batch_data.x = torch.zeros((batch_size, 1000, 128), dtype = torch.float)
        for i, data in enumerate(batch):
            batch_data.x[i] = data.x
        batch_data.file_name = [data.file_name for data in batch]
        batch_data.addr_ls = [data.addr_ls for data in batch]
        return batch_data
    dataloader = DataLoader(dataset, batch_size=1, shuffle=False, collate_fn=collate_fn)
    model = transformer.TransformerSampleVisual(model_dir, vis_info_dir)
    nn_api.eval_model(model, dataloader, device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu"))

    # 2. analyse_single_pkl
    for vis_info_file in os.listdir(vis_info_dir):
        vis_node_con_file = os.path.join(vis_node_con_dir, vis_info_file)
        if os.path.exists(vis_node_con_file):
            continue
        f = open(os.path.join(vis_info_dir, vis_info_file), 'rb')
        file_dict = pickle.load(f)
        for file_name, vis_info in file_dict.items():
            why_0 = [item[0][0] for item in vis_info]
            why_0_indices = sorted(range(len(why_0)), key=lambda i: why_0[i][0].item(),reverse=True)
            why_0_data = [vis_info[i] for i in why_0_indices]
            why_1 = [item[0][0] for item in vis_info]
            why_1_indices = sorted(range(len(why_0)), key=lambda i: why_1[i][1].item(),reverse=True)
            why_1_data = [vis_info[i] for i in why_1_indices]

            if os.path.exists(vis_node_con_file):
                output_f = open(vis_output, 'rb')
                dict_why = pickle.load(output_f)
                dict_why[file_name] = {'0':why_0_data,'1':why_1_data}
                output_f.close()
                output_f = open(vis_node_con_file, 'wb')
                pickle.dump(dict_why, output_f)
                # continue
            else:
                output_f = open(vis_node_con_file, 'wb')
                dict_why = dict()
                dict_why[file_name] = {'0':why_0_data,'1':why_1_data}
                pickle.dump(dict_why, output_f)

    # 3. transfer pkl to json
    for vis_node_con_file in os.listdir(vis_node_con_dir):
        pkl_file = open(os.path.join(vis_node_con_dir, vis_node_con_file), 'rb')
        vis_output = pickle.load(pkl_file)
        # 每个vis文件分析结果都单独保存为两个部分，0和1
        for filename in vis_output.keys():
            output_file_name = filename.replace('.pkl', '.json')
            file_route = filename.replace('.pkl', '.json').split('@')[1:]
            file_route = '/'.join(file_route)
            file_path = os.path.join(dataset_dir, file_route)
            print(file_path, output_file_name)
            output_file_path = os.path.join(vis_json_output_dir, output_file_name)
            if os.path.exists(output_file_path):
                continue

            visinfo = vis_output[filename]
            json_file = open(file_path, 'r')
            json_load = json.load(json_file)
            json_nodes = json_load["nodes"]
            # 构建每个汇编节点的信息，建立汇编节点地址到汇编节点信息的映射字典
            dict_asmaddr_2_asminfo = {}
            for func in json_nodes:
                idx = 0
                for asm in func["disasm"]:
                    node = asm_node(func["id"], func["addr"], func["function_name"], idx, asm["address"], asm["mnemonic"], asm["op_str"], 0, 0) 
                    idx += 1
                    dict_asmaddr_2_asminfo[asm["address"]] = node

            # 基于pkl决策影响和json汇编信息建立映射关系，并存到文件里，这里分0/1
            output_ls_0 = []
            for vis0 in visinfo["0"]:
                target_addr = vis0[1]
                if target_addr == -1:
                    continue
                dict_asmaddr_2_asminfo[target_addr].ctb_0 = float(vis0[0][0][0].item())
                dict_asmaddr_2_asminfo[target_addr].ctb_1 = float(vis0[0][0][1].item())
                output_ls_0.append(dict_asmaddr_2_asminfo[target_addr].to_json())

            output_ls_1 = []
            for vis1 in visinfo["1"]:
                target_addr = vis1[1]
                if target_addr == -1:
                    continue
                dict_asmaddr_2_asminfo[target_addr].ctb_0 = float(vis1[0][0][0].item())
                dict_asmaddr_2_asminfo[target_addr].ctb_1 = float(vis1[0][0][1].item())
                output_ls_1.append(dict_asmaddr_2_asminfo[target_addr].to_json())

            output_f = open(output_file_path, 'w')
            json.dump({'0':output_ls_0,'1':output_ls_1}, output_f)
            output_f.close()





def visual_lstm_mod(dataset_dir, dataset_class_save_file, graph_save_file, model_dir, vis_info_dir, vis_node_con_dir, log_file, sample_num, sample_file_list, vis_json_output_dir):
    
    # 1. visual_single_mod
    init_log(log_file)
    if sample_num is not None:
        random_files = random_get_samples(dataset_dir, sample_num, sample_file_list)
    else:
        random_files = get_all_samples(dataset_dir, sample_file_list)
    
    objects = []
    before_io = time.time()
    if os.path.exists(graph_save_file):
        with open(graph_save_file, 'rb') as f:
            objects = pickle.load(f)
    else:
        for files,label in random_files:
            cfg=files[1]
            factory = json_constructor(cfg, 'cfg')
            objects.append((factory.construct_by_inst(),label))

        with open(graph_save_file, 'wb') as f:
            pickle.dump(objects ,f)
    after_io = time.time()
    print('load json total time (seconds):', after_io - before_io)

    # 已经假设每个节点的顺序和angr结果的顺序相同，待检查
    dataset = LSTMDataset(objects, dataset_class_save_file, renew=False) # NOTE:ZGH
    def collate_fn(batch):
        batch_size = len(batch)
        batch_data = Batch()
        batch_data.y = torch.tensor([data.y for data in batch])
        batch_data.x = torch.zeros((batch_size, 1000, 128), dtype = torch.float)
        batch_data.asm_count = torch.tensor([data.asm_count for data in batch])
        batch_data.file_name = [data.file_name for data in batch]
        batch_data.addr_ls = [data.addr_ls for data in batch]
        for i, data in enumerate(batch):
            batch_data.x[i] = data.x
        return batch_data
    from torch.utils.data import DataLoader
    dataloader = DataLoader(dataset, batch_size=1, shuffle=False, collate_fn=collate_fn)
    model = lstm.LSTMVisual(model_dir, vis_info_dir)
    nn_api.eval_model(model, dataloader, device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu"))

    # 2. analyse_single_pkl
    for vis_info_file in os.listdir(vis_info_dir):
        vis_node_con_file = os.path.join(vis_node_con_dir, vis_info_file)
        if os.path.exists(vis_node_con_file):
            continue
        f = open(os.path.join(vis_info_dir, vis_info_file), 'rb')
        file_dict = pickle.load(f)
        for file_name, vis_info in file_dict.items():
            # print(file_name, vis_info)
            # exit()
            why_0 = [item[0][0] for item in vis_info]
            # print(why_0)
            # exit()
            why_0_indices = sorted(range(len(why_0)), key=lambda i: why_0[i][0].item(),reverse=True)
            why_0_data = [vis_info[i] for i in why_0_indices]
            why_1 = [item[0][0] for item in vis_info]
            why_1_indices = sorted(range(len(why_1)), key=lambda i: why_1[i][1].item(),reverse=True)
            why_1_data = [vis_info[i] for i in why_1_indices]

            if os.path.exists(vis_node_con_file):
                output_f = open(vis_output, 'rb')
                dict_why = pickle.load(output_f)
                dict_why[file_name] = {'0':why_0_data,'1':why_1_data}
                output_f.close()
                output_f = open(vis_node_con_file, 'wb')
                pickle.dump(dict_why, output_f)
                # continue
            else:
                output_f = open(vis_node_con_file, 'wb')
                dict_why = dict()
                dict_why[file_name] = {'0':why_0_data,'1':why_1_data}
                pickle.dump(dict_why, output_f)

    # 3. transfer pkl to json
    for vis_node_con_file in os.listdir(vis_node_con_dir):
        pkl_file = open(os.path.join(vis_node_con_dir, vis_node_con_file), 'rb')
        vis_output = pickle.load(pkl_file)
        # 每个vis文件分析结果都单独保存为两个部分，0和1
        for filename in vis_output.keys():
            output_file_name = filename.replace('.pkl', '.json')
            file_route = filename.replace('.pkl', '.json').split('@')[1:]
            file_route = '/'.join(file_route)
            file_path = os.path.join(dataset_dir, file_route)
            print(file_path, output_file_name)
            output_file_path = os.path.join(vis_json_output_dir, output_file_name)
            if os.path.exists(output_file_path):
                continue

            visinfo = vis_output[filename]
            json_file = open(file_path, 'r')
            json_load = json.load(json_file)
            json_nodes = json_load["nodes"]
            # 构建每个汇编节点的信息，建立汇编节点地址到汇编节点信息的映射字典
            dict_asmaddr_2_asminfo = {}
            for func in json_nodes:
                idx = 0
                for asm in func["disasm"]:
                    node = asm_node(func["id"], func["addr"], func["function_name"], idx, asm["address"], asm["mnemonic"], asm["op_str"], 0, 0)
                    idx += 1
                    dict_asmaddr_2_asminfo[asm["address"]] = node

            # 基于pkl决策影响和json汇编信息建立映射关系，并存到文件里，这里分0/1
            output_ls_0 = []
            for vis0 in visinfo["0"]:
                target_addr = vis0[1]
                if target_addr == -1:
                    continue
                dict_asmaddr_2_asminfo[target_addr].ctb_0 = float(vis0[0][0][0].item())
                dict_asmaddr_2_asminfo[target_addr].ctb_1 = float(vis0[0][0][1].item())
                output_ls_0.append(dict_asmaddr_2_asminfo[target_addr].to_json())

            output_ls_1 = []
            for vis1 in visinfo["1"]:
                target_addr = vis1[1]
                if target_addr == -1:
                    continue
                dict_asmaddr_2_asminfo[target_addr].ctb_0 = float(vis1[0][0][0].item())
                dict_asmaddr_2_asminfo[target_addr].ctb_1 = float(vis1[0][0][1].item())
                output_ls_1.append(dict_asmaddr_2_asminfo[target_addr].to_json())

            output_f = open(output_file_path, 'w')
            json.dump({'0':output_ls_0,'1':output_ls_1}, output_f)
            output_f.close()
            
        

if __name__ == '__main__':
    visual_multiple_mod()
    # analyse_pkl()