from torch.utils.data import Dataset
import time
import pickle
import os
import torch
import numpy as np
import random
from torch_geometric.data import Data
from .annotation_parser import annotation_json_parser

# Be used for transformer
class TransformerDataset(Dataset):
    def __init__(self,objects,samples_route,annotation_dir=None,renew=False):
        self.labels=[]
        self.samples=[]
        self.file_names=[]
        self.node_addr=[]
        self.samples_route=samples_route
        self.annotation_dir=annotation_dir

        before_io = time.time()
        if os.path.exists(self.samples_route) and renew == False:
            with open(self.samples_route, 'rb') as f:
                data = pickle.load(f)
                for sample in data:
                    self.labels.append(sample['label'])
                    self.samples.append(sample['vector'])
                    self.file_names.append(sample['file_name'])
                    self.node_addr.append(sample['node_addr'])
        else:
            sample_ls=[]
            for obj in objects:
                self.labels.append(obj[1])
                vec = self.vectorize(obj[0])
                addr = self.getaddr(obj[0])
                self.samples.append(vec)
                self.node_addr.append(addr)
                self.file_names.append(obj[0].file_name)
                sample_ls.append({'label':obj[1], 'vector':vec, 'file_name':obj[0].file_name, 'node_addr':addr})
            with open(self.samples_route, 'wb') as f:
                pickle.dump(sample_ls, f)

        end_io = time.time()
        print('vectorize total time:', end_io-before_io) 
            
    def getaddr(self,obj):
        addr_ls = []
        for node in obj.inst_nodes:
            addr = node.address
            addr_ls.append(addr)
        return addr_ls

    def vectorize(self,obj): 
        # NOTE predefined one-hot dictionary
        one_hot_dict = {"xor": 0, "mov": 1, "pop": 2, "and": 3, "push": 4, "lea": 5, "call": 6, "add": 7, "sub": 8, "jmp": 9, "cmp": 10, "je": 11, "nop": 12, "leave": 13, "ret": 14, "test": 15, "hlt": 16, "div": 17, "imul": 18, "shr": 19, "shl": 20, "sar": 21, "movabs": 22, "setb": 23, "movzx": 24, "setne": 25, "jae": 26, "jne": 27, "jle": 28, "jbe": 29, "pxor": 30, "movaps": 31, "movq": 32, "js": 33, "cdqe": 34, "not": 35, "sete": 36, "jg": 37, "enter": 38, "rol": 39, "fadd": 40, "jb": 41, "rep stosq": 42}
        one_hot_dict['others'] = 43

        x = torch.FloatTensor(np.zeros((1000, 128)))
        edge_index = torch.LongTensor(np.zeros((len(obj.inst_edges), 2)))

        # get node features (without positional embedding)
        idx = 0
        for node in obj.inst_nodes:
            features = node.features
            # 0-43: one hot for feature[0]
            mnemonic = features[0]
            if mnemonic in one_hot_dict:
                x[idx][one_hot_dict[mnemonic]] = 1
            else:
                x[idx][one_hot_dict['others']] = 1
            # 44-85: feature[1]
            for i in range(44, 44+len(features[1])):
                x[idx][i] = ord(features[1][i-44])
            # 86-127: feature[2]
            for i in range(86, 86+len(features[2])):
                x[idx][i] = ord(features[2][i-86])            
            idx += 1
        # add edge_index
        idx = 0
        for edge in obj.inst_edges:
            edge_index[idx][0] = edge.src
            edge_index[idx][1] = edge.dst
            idx += 1

        return Data(x, edge_index.t().contiguous())

    def __getitem__(self, index):
        data=self.samples[index]
        data.y=torch.LongTensor([self.labels[index]])
        data.file_name=self.file_names[index]
        data.addr_ls=self.node_addr[index]
        return data

    def __len__(self):
        return len(self.samples)


# Be used for GCN
class GCNDataset(Dataset):
    def __init__(self,objects,samples_route,annotation_dir=None,renew=False,annotation_usage='repeat'):
        self.labels=[]
        self.samples=[]
        self.file_names=[]
        self.node_addr=[]
        self.samples_route=samples_route
        self.annotation_dir=annotation_dir
        self.annotation_usage=annotation_usage

        before_io = time.time()
        if os.path.exists(self.samples_route) and renew == False:
            with open(self.samples_route, 'rb') as f:
                data = pickle.load(f)
                for sample in data:
                    self.labels.append(sample['label'])
                    self.samples.append(sample['vector'])
                    self.file_names.append(sample['file_name'])
                    self.node_addr.append(sample['node_addr'])
        else:
            print('***STAGE: Constructing Dataset***')
            sample_ls=[]
            for obj in objects:
                self.labels.append(obj[1])
                
                graph_json_route = obj[0].file_name # preprocessed_data/addon_test/addon_juliet_dataset/CWE121_Stack_Based_Buffer_Overflow_O0_s/s06/good/CWE121_Stack_Based_Buffer_Overflow__CWE806_char_declare_loop_42/cfg_graph.json
                sample_name = graph_json_route.split(r'/')[-2] # CWE121_Stack_Based_Buffer_Overflow__CWE806_char_declare_loop_42
                sample_label = graph_json_route.split(r'/')[-3] # good
                s0i = graph_json_route.split(r'/')[-4] # s06
                print('Processing on sample:', sample_name)
                # NOTE 2025/1/2注解利用第一版（注解建模约束），如果指定注解路径，则采用包含注解的vectorize函数，并应去除未建模节点生成相应的addr_ls
                # 第一版注解保留了原来的节点向量建模方式，仅对节点生成范围进行约束，故只需要用到asm注解
                # NOTE: 2025/3/29注解利用修正，第一版注解建模约束采用民主集中投票制（敏感函数相关节点投票）在模型训练部分实现，此部分主要实现重复训练有注解部分

                if annotation_dir is not None and (self.annotation_usage == 'repeat' or self.annotation_usage == 'all'):
                    print('[NOTE] Annotation Dir Specified, Try to Find Related Annotation files')
                    sample_annotation_dir_route = os.path.join(annotation_dir, s0i, sample_label, sample_name) # preprocessed_data/notation/O0/s06/good/CWE121_Stack_Based_Buffer_Overflow__CWE806_char_declare_loop_42
                    # print(sample_annotation_dir_route)
                    # exit()
                    asm_annotation_file = os.path.join(sample_annotation_dir_route, r'result.json')
                    decompile_annotation_file = os.path.join(sample_annotation_dir_route, r'decompile_result.json')
                    if os.path.exists(sample_annotation_dir_route) and os.path.exists(asm_annotation_file) and os.path.exists(decompile_annotation_file):
                        
                        parser = annotation_json_parser(asm_annotation_file, decompile_annotation_file)
                        annotation = parser.construct()
                        if {**annotation.inlined_func_dict, **annotation.direct_sensitive_dict, **annotation.plt_sensitive_addr_dict} != {}:
                            addr = self.getaddr(obj[0])
                            vec, addr = self.vecotrize_sensitive_func_with_annotation(obj[0], annotation, parser, addr)
                            
                            # 因为重复建模有注解样本，一个全量建模，一个只建模敏感函数，所以单独写append
                            self.samples.append(vec[0])
                            self.node_addr.append(addr[0])
                            self.file_names.append(obj[0].file_name)
                            # self.labels.append(obj[1])
                            sample_ls.append({'label':obj[1], 'vector':vec[0], 'file_name':obj[0].file_name, 'node_addr':addr[0]})
                            
                            self.samples.append(vec[1])
                            self.node_addr.append(addr[1])  
                            self.file_names.append(obj[0].file_name) 
                            self.labels.append(obj[1])
                            sample_ls.append({'label':obj[1], 'vector':vec[1], 'file_name':obj[0].file_name, 'node_addr':addr[1]})
                            
                            continue
                        
                        else:
                            print('[NOTE] Annotation Have No inline/direct/plt Sensitive Func Call, Construct Dataset without annotation')
                            addr = self.getaddr(obj[0])
                            vec = self.vectorize(obj[0])
                    else:
                        print('[NOTE] Annotation Sample Dir Not Found, Construct Dataset without annotation')
                        addr = self.getaddr(obj[0])
                        vec = self.vectorize(obj[0])
                else:
                    print('[NOTE] Annotation Dir is None, Construct Dataset without annotation')  
                    addr = self.getaddr(obj[0])
                    vec = self.vectorize(obj[0])

                self.samples.append(vec)
                self.node_addr.append(addr)
                self.file_names.append(obj[0].file_name)
                sample_ls.append({'label':obj[1], 'vector':vec, 'file_name':obj[0].file_name, 'node_addr':addr})
            with open(self.samples_route, 'wb') as f:
                pickle.dump(sample_ls, f)

        end_io = time.time()
        print('vectorize total time:', end_io-before_io) # time:
        # self.labels = torch.LongTensor(self.labels)

    # NOTE：2025/1/2注解利用第一版（注解建模约束），借助注解，只对敏感函数包含的相关节点进行建模，其他节点不进行建模
    # NOTE: 2025/3/29注解利用修正，第一版注解建模约束采用民主集中投票制（敏感函数相关节点投票）在模型训练部分实现，此部分主要实现重复训练有注解部分
    def vecotrize_sensitive_func_with_annotation(self, obj, annotation, parser, addr):
        
        return_data, return_addr = [], []
        one_hot_dict = {"xor": 0, "mov": 1, "pop": 2, "and": 3, "push": 4, "lea": 5, "call": 6, "add": 7, "sub": 8, "jmp": 9, "cmp": 10, "je": 11, "nop": 12, "leave": 13, "ret": 14, "test": 15, "hlt": 16, "div": 17, "imul": 18, "shr": 19, "shl": 20, "sar": 21, "movabs": 22, "setb": 23, "movzx": 24, "setne": 25, "jae": 26, "jne": 27, "jle": 28, "jbe": 29, "pxor": 30, "movaps": 31, "movq": 32, "js": 33, "cdqe": 34, "not": 35, "sete": 36, "jg": 37, "enter": 38, "rol": 39, "fadd": 40, "jb": 41, "rep stosq": 42}
        one_hot_dict['others'] = 43

        x = torch.FloatTensor(np.zeros((1000, 128)))
        # edge_index = torch.LongTensor(np.zeros((len(obj.inst_edges), 2)))

        # 全量建模的样本构建
        # get node features (without positional embedding)
        idx = 0
        for node in obj.inst_nodes:
            features = node.features
            # 0-43: one hot for feature[0]
            mnemonic = features[0]
            if mnemonic in one_hot_dict:
                x[idx][one_hot_dict[mnemonic]] = 1
            else:
                x[idx][one_hot_dict['others']] = 1
            # 44-85: feature[1]
            for i in range(44, 44+len(features[1])):
                x[idx][i] = ord(features[1][i-44])
            # 86-127: feature[2]
            for i in range(86, 86+len(features[2])):
                x[idx][i] = ord(features[2][i-86])            
            idx += 1
        # add edge_index
        edges_set = set((edge.src, edge.dst) for edge in obj.inst_edges)  # 去重
        edge_index = torch.LongTensor(np.zeros((len(edges_set), 2)))
        for idx, (src, dst) in enumerate(edges_set):
            edge_index[idx][0] = src
            edge_index[idx][1] = dst
        # idx = 0
        # for edge in obj.inst_edges:
        #     edge_index[idx][0] = edge.src
        #     edge_index[idx][1] = edge.dst
        #     idx += 1
        return_data.append(Data(x, edge_index.t().contiguous()))
        return_addr.append(addr)

        # 只建模敏感函数的样本构建
        # 取出所有敏感函数相关asm地址，并获取敏感函数相关asm对应的地址在inst_nodes中的index
        # NOTE 2025/3/29 目前的版本是x都在原位上，并没有重新进行idx的排序，包括连接边也是原位的。x_sensitive和x的idx是相同的
        x_sensitive = torch.FloatTensor(np.zeros((1000, 128)))
        sensitive_asm_addr_ls = parser.get_sensitive_asm_addrs(annotation)
        sensitive_node_idx_ls = []
        for (start_addr, end_addr) in sensitive_asm_addr_ls:
            for i in range(len(obj.inst_nodes)):
                # print(type(obj.inst_nodes[i].address),obj.inst_nodes[i].address)
                # exit()
                if int(obj.inst_nodes[i].address, 16) >= int(start_addr, 16) and int(obj.inst_nodes[i].address, 16) <= int(end_addr, 16):
                    sensitive_node_idx_ls.append(i)
                else:
                    # 按90%的概率随机建模
                    if random.random() > 0.5:
                        sensitive_node_idx_ls.append(i)
        sensitive_node_idx_ls = list(set(sensitive_node_idx_ls))
        for i in sensitive_node_idx_ls:
            x_sensitive[i] = x[i]
        # 筛选与敏感节点相关的直接连接边
        # NOTE 2025/3/29 暂未考虑间接连通边
        sensitive_edge_idx_ls = []
        for edge in obj.inst_edges:
                
            # NOTE 2025/4/20 删去判定，建模所有的连接边
            # if edge.src in sensitive_node_idx_ls and edge.dst in sensitive_node_idx_ls:
                sensitive_edge_idx_ls.append((edge.src, edge.dst))

        sensitive_edge_idx_ls = list(set(sensitive_edge_idx_ls))
        edge_index_sensitive = torch.LongTensor(np.zeros((len(sensitive_edge_idx_ls), 2)))
        for idx, (src, dst) in enumerate(sensitive_edge_idx_ls):
            edge_index_sensitive[idx][0] = src
            edge_index_sensitive[idx][1] = dst
        return_data.append(Data(x_sensitive, edge_index_sensitive.t().contiguous()))
        
        # 只建模敏感函数的样本地址，其他地址置为0
        addr_sensitive = []
        for a in addr:
            addr_sensitive.append(a)
        for i in range(len(addr)):
            if i not in sensitive_node_idx_ls:
                addr_sensitive[i] = 0
        return_addr.append(addr_sensitive)
    
        # print(len(return_data[0].edge_index[0]))
        # print(len(return_data[1].edge_index[0]))
        # exit()

        return return_data, return_addr

    def getaddr(self,obj):
        addr_ls = []
        for node in obj.inst_nodes:
            addr = node.address
            addr_ls.append(addr)
        return addr_ls

    def vectorize(self,obj):
        # result={}
        # result['x'] =torch.FloatTensor(np.zeros((5000,128))) # 5000*128
        # result['edge_index']=torch.LongTensor(np.zeros((2,100))) # 2*n
        # 0-80 81-91 91-101 101-110 16*3+80
        
        # NOTE predefined one-hot dictionary
        one_hot_dict = {"xor": 0, "mov": 1, "pop": 2, "and": 3, "push": 4, "lea": 5, "call": 6, "add": 7, "sub": 8, "jmp": 9, "cmp": 10, "je": 11, "nop": 12, "leave": 13, "ret": 14, "test": 15, "hlt": 16, "div": 17, "imul": 18, "shr": 19, "shl": 20, "sar": 21, "movabs": 22, "setb": 23, "movzx": 24, "setne": 25, "jae": 26, "jne": 27, "jle": 28, "jbe": 29, "pxor": 30, "movaps": 31, "movq": 32, "js": 33, "cdqe": 34, "not": 35, "sete": 36, "jg": 37, "enter": 38, "rol": 39, "fadd": 40, "jb": 41, "rep stosq": 42}
        one_hot_dict['others'] = 43

        x = torch.FloatTensor(np.zeros((1000, 128)))
        # edge_index = torch.LongTensor(np.zeros((len(obj.inst_edges), 2)))


        # get node features (without positional embedding)
        idx = 0
        for node in obj.inst_nodes:
            features = node.features
            # 0-43: one hot for feature[0]
            mnemonic = features[0]
            if mnemonic in one_hot_dict:
                x[idx][one_hot_dict[mnemonic]] = 1
            else:
                x[idx][one_hot_dict['others']] = 1
            # 44-85: feature[1]
            for i in range(44, 44+len(features[1])):
                x[idx][i] = ord(features[1][i-44])
            # 86-127: feature[2]
            for i in range(86, 86+len(features[2])):
                x[idx][i] = ord(features[2][i-86])            
            idx += 1
        # add edge_index
        edges_set = set((edge.src, edge.dst) for edge in obj.inst_edges)  # 去重
        edge_index = torch.LongTensor(np.zeros((len(edges_set), 2)))
        for idx, (src, dst) in enumerate(edges_set):
            edge_index[idx][0] = src
            edge_index[idx][1] = dst
        # with open(feature_pkl, 'wb') as f:
        #     pickle.dump((x, edge_index), f)

        return Data(x, edge_index.t().contiguous())
        # 5000*128 nodes, 5000*2 edges
        # return Data(x=torch.FloatTensor(np.zeros((5000,128))),edge_index=torch.LongTensor(np.zeros((100,2))).t().contiguous())
        
    
    def __getitem__(self, index):
        data=self.samples[index]
        data.y=torch.LongTensor([self.labels[index]])
        data.file_name=self.file_names[index]
        data.addr_ls=self.node_addr[index]
        # data.x=data.x[:,:44]
        return data

    def __len__(self):
        return len(self.samples)


# Be used for RNN
# class RNNDateset(Dataset):
#     def __init__(self):
#         super().__init__()

# Be used for LSTM
class LSTMDataset(Dataset):
    def __init__(self, objects, samples_route, annotation_dir=None, renew=False, annotation_usage='repeat'):
        self.labels = []
        self.samples = []
        self.file_names = []
        self.node_addr = []
        self.asm_count = []
        self.samples_route = samples_route
        self.annotation_dir = annotation_dir
        self.annotation_usage = annotation_usage

        before_io = time.time()
        if os.path.exists(self.samples_route) and renew == False:
            with open(self.samples_route, 'rb') as f:
                data = pickle.load(f)
                for sample in data:
                    self.labels.append(sample['label'])
                    self.samples.append(sample['vector'])
                    self.file_names.append(sample['file_name'])
                    self.node_addr.append(sample['node_addr'])
                    self.asm_count.append(sample['asm_count'])
        else: 
            sample_ls = []
            for obj in objects:
                self.labels.append(obj[1])
                vec = self.vectorize(obj[0])
                self.samples.append(vec)
                addr = self.getaddr(obj[0])
                self.file_names.append(obj[0].file_name)
                self.node_addr.append(addr)
                # self.asm_count.append(len(obj[0].inst_nodes))
                sample_ls.append({'label':obj[1], 'vector':vec, 'file_name':obj[0].file_name, 'node_addr':addr, 'asm_count':len(obj[0].inst_nodes)})
            with open(self.samples_route, 'wb') as f:
                pickle.dump(sample_ls, f)

        end_io = time.time()
        print('vectorize total time:', end_io-before_io)

    def getaddr(self, obj):
        addr_ls = []
        for node in obj.inst_nodes:
            addr = node.address
            addr_ls.append(addr)
        return addr_ls

    def vectorize(self, obj):
        # NOTE predefined one-hot dictionary
        one_hot_dict = {"xor": 0, "mov": 1, "pop": 2, "and": 3, "push": 4, "lea": 5, "call": 6, "add": 7, "sub": 8, "jmp": 9, "cmp": 10, "je": 11, "nop": 12, "leave": 13, "ret": 14, "test": 15, "hlt": 16, "div": 17, "imul": 18, "shr": 19, "shl": 20, "sar": 21, "movabs": 22, "setb": 23, "movzx": 24, "setne": 25, "jae": 26, "jne": 27, "jle": 28, "jbe": 29, "pxor": 30, "movaps": 31, "movq": 32, "js": 33, "cdqe": 34, "not": 35, "sete": 36, "jg": 37, "enter": 38, "rol": 39, "fadd": 40, "jb": 41, "rep stosq": 42}
        one_hot_dict['others'] = 43

        x = torch.FloatTensor(np.zeros((1000, 128)))
        edge_index = torch.LongTensor(np.zeros((len(obj.inst_edges), 2)))

        # get node features (without positional embedding)
        idx = 0
        for node in obj.inst_nodes:
            features = node.features
            # 0-43: one hot for feature[0]
            mnemonic = features[0]
            if mnemonic in one_hot_dict:
                x[idx][one_hot_dict[mnemonic]] = 1
            else:
                x[idx][one_hot_dict['others']] = 1
            # 44-85: feature[1]
            for i in range(44, 44+len(features[1])):
                x[idx][i] = ord(features[1][i-44])
            # 86-127: feature[2]
            for i in range(86, 86+len(features[2])):
                x[idx][i] = ord(features[2][i-86])            
            idx += 1
        # 将多余的x删除
        # x = x[:idx, :]
        self.asm_count.append(idx)
        # add edge_index
        idx = 0
        for edge in obj.inst_edges:
            edge_index[idx][0] = edge.src
            edge_index[idx][1] = edge.dst
            idx += 1

        return Data(x, edge_index.t().contiguous())
        
    def __getitem__(self, index):
        data=self.samples[index]
        data.y=torch.LongTensor([self.labels[index]])
        data.file_name = self.file_names[index]
        data.addr_ls = self.node_addr[index]
        data.asm_count = self.asm_count[index]
        # data.x = data.x[:data.asm_count, :]
        return data

    def __len__(self):
        return len(self.samples)