from typing import List
from torch.utils.data import Dataset
import pickle as pk
import gensim
import torch
import numpy as np
from collections import defaultdict
import tqdm
from loguru import logger

from my_utils.parsing_util import ACTIVATION_LABEL, CROPPING_SIZE, DEPTH_MULTIPLIER, DILATION_RATE_0_LABEL, FILTERS_LABEL, FILTERS_LABEL_LARGE, UNITS_LABEL
from my_utils.parsing_util import INTERPOLATION_LABEL, KERNEL_SIZE, KERNEL_SIZE_LARGE, PADDING_LABEL, PADDING_SIZE, POOLING_SIZE, SAMPLING_SIZE
from my_utils.parsing_util import SLOT_COUNT, SLOT_LENGTH, STRIDE_LABELS

import _thread

label_list = [
    'LSTM', 'UpSampling2D', 'UpSampling3D', 'Cropping2D', 'ZeroPadding2D', 'ZeroPadding3D', 'SeparableConv2D', 'Conv2D',
    'DepthwiseConv2D', 'ReLU', 'ThresholdedReLU', 'LeakyReLU', 'Softmax', 'ELU', 'Conv3D', 'Dense', 'Reshape',
    'Flatten', 'Concatenate', 'Average', 'Maximum', 'Minimum', 'Add', 'Subtract', 'Multiply', 'Dot', 'MaxPooling2D',
    'MaxPooling3D', 'AveragePooling2D', 'AveragePooling3D', 'GlobalMaxPooling2D', 'GlobalMaxPooling3D',
    'GlobalAveragePooling2D', 'GlobalAveragePooling3D', 'BatchNormalization'
]

atom_list = [
    'divide', 'abs', 'expand_dims', 'negative', 'upsampling', 'batch_matmul', 'global_max_pool2d', 'subtract',
    'minimum', 'concatenate', 'add', 'relu', 'bias_add', 'global_avg_pool2d', 'exp', 'cast', 'multiply', 'clip',
    'upsampling3d', 'tanh', 'max_pool3d', 'log', 'strided_slice', 'pad', 'avg_pool3d', 'max_pool2d', 'maximum', 'dense',
    'transpose', 'sqrt', 'sigmoid', 'adaptive_avg_pool3d', 'batch_flatten', 'conv3d', 'avg_pool2d', 'softmax',
    'greater', 'leaky_relu', 'adaptive_max_pool3d', 'conv2d'
]


def shape_embedding(shape, max_length=12, max_dim=5):
    embed = np.zeros(max_dim * max_length)
    for i in range(min(max_dim, len(shape))):
        x = shape[i]
        tmp = []
        while x > 0:
            if x & 1:
                tmp.append(1)
            else:
                tmp.append(0)
            x = x >> 1
        while len(tmp) < max_length:
            tmp.append(0)
        tmp = tmp[::-1]
        if len(tmp) > max_length:
            tmp = tmp[:-(len(tmp) - max_length)]
        embed[i * max_length:(i + 1) * max_length] = tmp
    return torch.from_numpy(embed)


def get_conv2d_shape(data):
    out_shape = data["output_shape"]
    in_shape = data["input_shape"]

    if len(in_shape) > 0 and isinstance(in_shape[0], List):
        in_shape = in_shape[0]

    shape_embedding_list = [shape_embedding(in_shape), shape_embedding(out_shape)]
    return torch.cat(shape_embedding_list)


def get_shape(type, config, weight_info=False):

    # if type == "UpSampling2D":
    in_shape = []
    out_shape = []
    if "outputShape" not in config:
        out_shape = config["extral_param"]["outputShape"]
    else:
        out_shape = config["outputShape"]

    in_shape = config["input_shape"]

    if len(in_shape) > 0 and isinstance(in_shape[0], List):
        in_shape = in_shape[0]

    if "Conv2D" in type:
        in_shape = [1] + in_shape

    shape_embedding_list = [shape_embedding(in_shape), shape_embedding(out_shape)]
    if weight_info and "Conv2D" in type:
        weight_shape_list = config["paramsShape"]
        for weight_shape in weight_shape_list:
            shape_embedding_list.append(shape_embedding(weight_shape))

    return torch.cat(shape_embedding_list)


def get_clean_name(name):

    if 'conv2d' in name:
        return 'conv2d'

    if 'fused_' in name:
        name = name.replace('fused_', '')
    if '_compute_' in name:
        name = name.replace('_compute_', '')
    if 'tvmgen_default_' in name:
        name = name.replace('tvmgen_default_', '')
    if 'nn_' in name:
        name = name.replace('nn_', '')
    if name.split('_')[-1].isdigit():
        name = name[:-(len(name.split('_')[-1]) + 1)]

    return name


def get_label(type, attr_name, attr_idx, config):

    if type == "Cropping2D":
        if attr_name == "cropping":
            # attr_labels = [i + 1 for i in range(6)]
            attr_list = []
            for t in config["cropping"]:
                attr_list.extend(list(t))
            return CROPPING_SIZE.index(attr_list[attr_idx])

        else:
            logger.error("No Implement for attr name {}".format(attr_name))
    elif type == "ZeroPadding2D":
        if attr_name == "padding":
            return PADDING_SIZE.index(config["padding"])
        else:
            logger.error("No Implement for attr name {}".format(attr_name))
    elif type == "Conv2D":
        if attr_name == "padding":
            strides = [1, 1]
            if "strides" in config:
                strides = config["strides"]
            if config["input_shape"][0] % strides == 0 and config["input_shape"][1] % strides == 0:
                return 1
            return PADDING_LABEL.index(config["padding"])
        elif attr_name == "activation":
            return 0 if config["activation"] != 'linear' else 1
        elif attr_name == "filters":
            return FILTERS_LABEL.index(config["filters"])
        elif attr_name == "strides":
            if attr_name not in config:
                return 0
            return STRIDE_LABELS.index(config["strides"])
        elif attr_name == "kernel_size":
            return KERNEL_SIZE.index(config["kernel_size"])
        elif attr_name == "dilation_rate":
            attr_labels = [1, 3, 5, 7]
            if attr_name not in config:
                return 0
            return attr_labels.index(config[attr_name][attr_idx])
        elif attr_name == "depth_multiplier":
            return 10
    elif type == "SeparableConv2D":
        if attr_name == "padding":
            strides = [1, 1]
            if "strides" in config:
                strides = config["strides"]
            if config["input_shape"][0] % strides == 0 and config["input_shape"][1] % strides == 0:
                return 1
            return PADDING_LABEL.index(config["padding"])
        elif attr_name == "activation":
            # return ACTIVATION_LABEL.index(config["activation"])
            return 0 if config["activation"] != 'linear' else 1
        elif attr_name == "filters":
            return FILTERS_LABEL.index(config["filters"])
        elif attr_name == "strides":
            if attr_name not in config:
                return 0
            return STRIDE_LABELS.index(config[attr_name])
        elif attr_name == "kernel_size":
            if attr_name not in config:
                return 0
            return KERNEL_SIZE.index(config["kernel_size"])
        elif attr_name == "depth_multiplier":
            return DEPTH_MULTIPLIER.index(config["depth_multiplier"])
        elif attr_name == "dilation_rate":
            if attr_name not in config:
                return 0
            return DILATION_RATE_0_LABEL.index(config[attr_name][attr_idx])
    elif type == "DepthwiseConv2D":
        if attr_name == "padding":
            strides = [1, 1]
            if "strides" in config:
                strides = config["strides"]
            if config["input_shape"][0] % strides == 0 and config["input_shape"][1] % strides == 0:
                return 1
            return PADDING_LABEL.index(config["padding"])
        elif attr_name == "activation":
            return 0 if config["activation"] != 'linear' else 1
        elif attr_name == "filters":
            return 0
        elif attr_name == "strides":
            if attr_name not in config:
                return 0
            return STRIDE_LABELS.index(config[attr_name])
        elif attr_name == "kernel_size":
            if attr_name not in config:
                return 0
            return KERNEL_SIZE.index(config["kernel_size"])
        elif attr_name == "dilation_rate":
            if attr_name not in config:
                return 0
            return DILATION_RATE_0_LABEL.index(config[attr_name][attr_idx])
        elif attr_name == "depth_multiplier":
            return DEPTH_MULTIPLIER.index(config["depth_multiplier"])

    elif type == "UpSampling2D":
        if attr_name == "size":
            return SAMPLING_SIZE.index(config[attr_name])
        elif attr_name == "interpolation":
            return INTERPOLATION_LABEL.index(config[attr_name])
    elif type == "UpSampling3D":
        if attr_name == "size":
            return SAMPLING_SIZE.index(config[attr_name])
    elif type == "ZeroPadding3D" or type == "ZeroPadding2D":
        if attr_name == "padding":
            return PADDING_SIZE.index(config[attr_name])
    elif type == "MaxPooling2D" or type == "MaxPooling3D" or type == "AveragePooling2D" or type == "AveragePooling3D":
        if attr_name == "pool_size":
            return POOLING_SIZE.index(config[attr_name])
        elif attr_name == "padding":
            strides = [config["pool_size"], config["pool_size"]]
            if config["input_shape"][0] % strides[0] == 0 and config["input_shape"][1] % strides[1] == 0:
                return 1
            return PADDING_LABEL.index(config["padding"])
    elif type == "Dense":
        if attr_name == "activation":
            return ACTIVATION_LABEL.index(config["activation"])
        elif attr_name == "units":
            return UNITS_LABEL.index(config["units"])
    elif type == "Conv3D":
        if attr_name == "padding":
            strides = 1
            if "strides" in config:
                strides = config["strides"]
            if config["input_shape"][0][0] % strides == 0 and config["input_shape"][0][1] % strides == 0 and config[
                    "input_shape"][0][2] % strides == 0:
                return 1
            return PADDING_LABEL.index(config["padding"])
        elif attr_name == "activation":
            return 0
        elif attr_name == "filters":
            return FILTERS_LABEL_LARGE.index(config["filters"])
        elif attr_name == "strides":
            if attr_name not in config:
                return 0
            return STRIDE_LABELS.index(config["strides"])
        elif attr_name == "kernel_size":
            return KERNEL_SIZE_LARGE.index(config["kernel_size"])
        elif attr_name == "dilation_rate":
            if attr_name not in config:
                return 0
            return DILATION_RATE_0_LABEL.index(config[attr_name][attr_idx])
    else:
        logger.error("No Implement for type {}".format(type))


def to_binary(x: int):
    binary_str = "{:0>15b}".format(x)
    vector = torch.zeros(SLOT_LENGTH)
    for idx, c in enumerate(binary_str):
        vector[idx] = 1 if c == '1' else 0
    return vector


def get_super_label_with_onehot(functype_atom, config, functype):
    label_vector = torch.zeros(1 + SLOT_LENGTH * SLOT_COUNT)
    label_vector[0] = atom_list.index(functype_atom)
    sup_label_vector = get_super_label(functype_atom, config, functype)
    label_vector[1:SLOT_LENGTH * (SLOT_COUNT - 1) + 1] = sup_label_vector[SLOT_LENGTH:]
    return label_vector


def get_super_label_with_onehot_all(functype_atom, config, functype):
    label_vector = torch.zeros(1 + SLOT_LENGTH * SLOT_COUNT)
    label_vector[0] = atom_list.index(functype_atom)
    sup_label_vector = get_super_label(functype_atom, config, functype, is_attr_one_hot=True)
    label_vector[1:SLOT_LENGTH * (SLOT_COUNT - 1) + 1] = sup_label_vector[SLOT_LENGTH:]
    return label_vector


def get_super_label(functype_atom, config, functype, is_attr_one_hot=False):

    label_vector = torch.zeros(SLOT_LENGTH * SLOT_COUNT)  # The first slot is for the classification slot

    # Here, We encode the function type to a binary vector with a length-10 vector
    index = atom_list.index(functype_atom)
    label_vector[:SLOT_LENGTH] = to_binary(index)

    if config is None:
        return label_vector

    attr_list = []
    # Encode the attributes of each function type
    if functype_atom == "conv2d":
        attr_list = [('padding', 0), ('filters', 0), ('strides', 0), ('strides', 1), ('kernel_size', 0),
                     ('depth_multiplier', 0)]
    elif functype_atom == 'conv3d':
        attr_list = [('padding', 0), ('filters', 0), ('strides', 0), ('strides', 1), ('strides', 2), ('kernel_size', 0)]
    elif functype_atom == "upsampling":
        attr_list = [('size', 0), ('interpolation', 0)]
    elif functype_atom == "upsampling3d":
        attr_list = [('size', 0)]
    elif functype_atom == "pad":
        attr_list = [('padding', 0)]
    elif functype_atom == "max_pool2d" or functype_atom == "max_pool3d" or functype_atom == "avg_pool2d" or functype_atom == "avg_pool3d":
        attr_list = [('pool_size', 0), ('padding', 0)]
    elif functype_atom == "strided_slice":
        attr_list = [('cropping', 0), ('cropping', 1), ('cropping', 2), ('cropping', 3)]
    # elif functype == "dense":
    # attr_list = [('activation', 0)]

    for i, (attr, pos) in enumerate(attr_list):
        tmp_label = get_label(functype, attr, pos, config)
        if is_attr_one_hot:
            label_vector[i + SLOT_LENGTH] = tmp_label
        else:
            label_vector[(i + 1) * SLOT_LENGTH:(i + 2) * SLOT_LENGTH] = to_binary(tmp_label)

    if is_attr_one_hot:
        label_vector[SLOT_LENGTH + len(attr_list)] = -1
    return label_vector


class TVMDataSet(Dataset):
    def __init__(self, path, train_ratio=0.7, mode='train', w2v_model="w2v-new.model") -> None:
        super().__init__()
        self.data = []
        self.label = []
        model = gensim.models.Word2Vec.load(w2v_model)

        f = open(path, "rb")
        data_dict = pk.load(f)
        length_dict = defaultdict(lambda: 0)

        for k in data_dict:
            func_type = k.split('-')[0]
            if func_type == "auto_schedule":
                func_type = k.split('-')[1]
            length_dict[func_type] += 1

        count_dict = defaultdict(lambda: 0)

        logger.info("Start loading data ... [mode: {}]".format(mode))

        for k in tqdm.tqdm(data_dict):
            func_type = k.split('-')[0]
            if func_type == "auto_schedule":
                func_type = k.split('-')[1]
            if mode == 'train' and count_dict[func_type] >= length_dict[func_type] * train_ratio:
                continue
            if mode == 'test' and count_dict[func_type] <= length_dict[func_type] * train_ratio:
                count_dict[func_type] += 1
                continue

            count_dict[func_type] += 1

            self.data.append([])
            target = label_list.index(func_type)
            self.label.append(torch.from_numpy(np.array([target])))
            for func in data_dict[k]:
                self.data[-1].append([])
                for b in data_dict[k][func]['blocks']:
                    self.data[-1][-1].append(torch.zeros(1, len(b), 200))
                    for pos, t in enumerate(b):
                        if t not in model.wv:
                            continue
                        vector = torch.from_numpy(np.copy(model.wv[t]))
                        self.data[-1][-1][-1][:, pos, :] = vector

        f.close()

        logger.info("Finished!")

    def __getitem__(self, index):
        return self.data[index], self.label[index]

    def __len__(self):
        return len(self.label)


class TVMAttrDataSet(Dataset):
    def __init__(self,
                 path,
                 train_ratio=0.7,
                 mode='train',
                 w2v_model="w2v-new.model",
                 type="Cropping2D",
                 attr_name="cropping",
                 attr_idx=0,
                 config_path="",
                 deep_walk_path="",
                 shape_info=False,
                 value_trace=False,
                 weight_info=False):

        super().__init__()
        self.data = []
        self.shape_data = []
        self.label = []
        self.value_data = []

        embedding_dim = 200
        self.shape_info = shape_info
        self.value_trace = value_trace
        self.weight_info = weight_info

        length_dict = defaultdict(lambda: 0)

        # Load the word2vec mdoel
        model = gensim.models.Word2Vec.load(w2v_model)

        # Load the data config
        config_dict = {}

        f = open(config_path + "/layers_output.txt", mode="r")

        for line in f.readlines():
            filename = line.split("\\")[0]
            config = eval(line.split("\\")[1])
            config_dict[filename] = config
        f.close()

        # Load data
        f = open(path, "rb")
        data_dict = pk.load(f)
        f.close()

        # Load deep walk
        deep_walk_data = None
        if deep_walk_path != "":
            with open(deep_walk_path, "rb") as f:
                deep_walk_data = pk.load(f)
            embedding_dim = 400

        # Count the length
        for k in data_dict:
            func_type = k.split('-')[0]
            if func_type == "auto_schedule":
                func_type = k.split('-')[1]
            if not func_type == type:
                continue
            length_dict[func_type] += 1

        count_dict = defaultdict(lambda: 0)

        logger.info("Start loading data ... [mode: {}]".format(mode))
        total_count = sum([int(length_dict[k] * train_ratio) + 1 for k in length_dict])
        pbar = tqdm.tqdm(total=total_count)
        if mode == 'test':
            pbar = tqdm.tqdm(total=(sum([length_dict[k] for k in length_dict]) - total_count))

        for k in data_dict:
            func_type = k.split('-')[0]
            if func_type == "auto_schedule":
                func_type = k.split('-')[1]
            if not func_type == type:
                continue

            if mode == 'train' and count_dict[func_type] >= length_dict[func_type] * train_ratio:
                continue
            if mode == 'test' and count_dict[func_type] <= length_dict[func_type] * train_ratio:
                count_dict[func_type] += 1
                continue

            count_dict[func_type] += 1
            pbar.update(1)

            self.data.append([])

            if value_trace:
                self.value_data.append([])

            if shape_info:
                shape = get_shape(type, config_dict[k], weight_info=self.weight_info)
                self.shape_data.append(shape)

            target = get_label(type, attr_name, attr_idx, config_dict[k])
            self.label.append(torch.from_numpy(np.array([target])))
            for func in data_dict[k]:
                self.data[-1].append([])
                if value_trace:
                    self.value_data[-1].append([])

                # logger.debug(k)
                if deep_walk_path != "":
                    for b, addr in zip(data_dict[k][func]['blocks'], data_dict[k][func]['addrs']):
                        self.data[-1][-1].append(torch.zeros(1, len(b), embedding_dim))
                        for pos, t in enumerate(b):
                            if t not in model.wv:
                                continue
                            vector = torch.from_numpy(np.copy(model.wv[t]))
                            self.data[-1][-1][-1][:, pos, :embedding_dim // 2] = vector
                            if addr in deep_walk_data[k][func]:
                                self.data[-1][-1][-1][:, pos, embedding_dim // 2:] = torch.from_numpy(
                                    deep_walk_data[k][func][addr])
                else:
                    for b in data_dict[k][func]['blocks']:
                        self.data[-1][-1].append(torch.zeros(1, len(b), embedding_dim))

                        value_count = sum([t.isdigit() or "0x" in t for t in b])

                        if value_trace:
                            if value_count == 0:
                                self.value_data[-1][-1].append(torch.zeros(1, 1, embedding_dim))
                            else:
                                self.value_data[-1][-1].append(torch.zeros(1, value_count, embedding_dim))
                        value_pos = 0
                        for pos, t in enumerate(b):

                            if value_trace:
                                if t.isdigit():
                                    vector = torch.zeros(embedding_dim) if int(t) >= 0 else torch.ones(embedding_dim)
                                    vector[:15] = shape_embedding([abs(int(t))], max_length=15, max_dim=1)
                                    self.value_data[-1][-1][-1][:, value_pos, :] = vector
                                    t = "num"
                                    value_pos += 1
                                elif "0x" in t:
                                    tmp = int(t, 16)
                                    vector = torch.zeros(embedding_dim) if tmp >= 0 else torch.ones(embedding_dim)
                                    vector[:15] = shape_embedding([abs(tmp)], max_length=15, max_dim=1)
                                    self.value_data[-1][-1][-1][:, value_pos, :] = vector
                                    t = "num"
                                    value_pos += 1
                            else:
                                if t.isdigit() or "0x" in t:
                                    t = "num"
                            if t not in model.wv:
                                continue
                            vector = torch.from_numpy(np.copy(model.wv[t]))
                            self.data[-1][-1][-1][:, pos, :] = vector

        f.close()
        pbar.close()
        logger.info("Finished!")

    def __getitem__(self, index):
        if self.shape_info:
            if self.value_trace:
                return self.data[index], self.label[index], self.shape_data[index], self.value_data[index]
            return self.data[index], self.label[index], self.shape_data[index]
        elif self.value_trace:
            return self.data[index], self.label[index], self.label[index], self.value_data[index]

        return self.data[index], self.label[index], self.label[index]

    def __len__(self):
        return len(self.label)


class TVMDataSetV2(Dataset):
    def __init__(self,
                 path,
                 train_ratio=0.7,
                 mode='train',
                 w2v_model="w2v-new.model",
                 config_path="",
                 deep_walk_path="",
                 shape_info=False,
                 value_trace=False,
                 weight_info=False,
                 is_onehot=False,
                 is_attr_onehot=False):

        super().__init__()
        self.data = []
        self.shape_data = []
        self.label = []
        self.value_data = []

        embedding_dim = 200
        self.shape_info = shape_info
        self.value_trace = value_trace
        self.weight_info = weight_info

        cs_words = [
            "csqword", "csdword", "cspword", "csxmmword", "csymmword", "csunk", "csoff", "csbyte", "aAssertFail",
            "csasc"
        ]
        self.cs_words = cs_words
        self.embedding_dim = embedding_dim

        length_dict = defaultdict(lambda: 0)

        # Load the word2vec mdoel
        model = gensim.models.Word2Vec.load(w2v_model)

        self.model = model

        # Load the data config
        config_dict = {}

        f = open(config_path + "/layers_output.txt", mode="r")

        for line in f.readlines():
            filename = line.split("\\")[0]
            config = eval(line.split("\\")[1])
            config_dict[filename] = config
        f.close()

        # Load data
        f = open(path, "rb")
        data_dict = pk.load(f)
        f.close()

        # Load conv2d label
        f = open(config_path + "/conv2d_dense_pool_label.pkl", "rb")
        conv2d_shape_dict = pk.load(f)
        f.close()

        # Count the length
        for k in data_dict:
            func_type = k.split('-')[0]
            if func_type == "auto_schedule":
                continue
            # func_type = k.split('-')[1]
            for func_name in data_dict[k]:
                atom_name = get_clean_name(func_name)
                length_dict[atom_name] += 1

        count_dict = defaultdict(int)

        logger.info("Start loading data ... [mode: {}]".format(mode))
        total_count = sum([int(length_dict[k] * train_ratio) + 1 for k in length_dict])
        pbar_count = total_count
        if mode == 'test':
            pbar_count = sum([length_dict[k] for k in length_dict]) - total_count

        pbar = tqdm.tqdm(total=pbar_count)
        for k in data_dict:
            func_type = k.split('-')[0]
            if func_type == "auto_schedule":
                # func_type = k.split('-')[1]
                continue

            if func_type == 'LSTM':
                continue

            # target = get_label(func_type, attr_name, attr_idx, config_dict[k])

            for func in data_dict[k]:
                atom_func_name = get_clean_name(func)
                if mode == 'train' and count_dict[atom_func_name] >= (length_dict[atom_func_name] * train_ratio):
                    continue
                if mode == 'test' and count_dict[atom_func_name] <= (length_dict[atom_func_name] * train_ratio):
                    count_dict[atom_func_name] += 1
                    continue

                count_dict[atom_func_name] += 1
                pbar.update(1)

                # self.data.append([])
                if value_trace:
                    self.value_data.append([])

                if shape_info:
                    # Add single function shape info
                    shape = []
                    if "Conv2D" in func_type or "Activation" in k or "Dense" in k or "Global" in k or "Conv3D" in k:
                        shape = get_conv2d_shape(conv2d_shape_dict[k][func])
                    else:
                        shape = get_shape(func_type, config_dict[k], weight_info=self.weight_info)
                    self.shape_data.append(shape)

                # self.data[-1].append([])
                self.data.append(data_dict[k][func]["blocks"])#函数每一个block的汇编

                if value_trace:
                    self.value_data[-1].append([])

                target = get_super_label(atom_func_name, config_dict[k] if k in config_dict else None, func_type)
                if is_onehot:
                    target = get_super_label_with_onehot(atom_func_name, config_dict[k] if k in config_dict else None,
                                                         func_type)
                if is_attr_onehot:
                    target = get_super_label_with_onehot_all(atom_func_name,
                                                             config_dict[k] if k in config_dict else None, func_type)
                # if mode == 'test':
                # logger.debug(atom_func_name)
                if k not in config_dict:
                    logger.debug("No Config: {}".format(k))

                self.label.append(target)

                for b in data_dict[k][func]['blocks']:#找value_sequence，b是一条汇编的token序列
                    if len(b) == 0:
                        continue
                    # self.data[-1][-1].append(torch.zeros(1, len(b), embedding_dim))
                    value_count = sum([t.isdigit() or "0x" in t for t in b])#找该block中数字的数量

                    if value_trace:
                        if value_count == 0:
                            self.value_data[-1][-1].append(torch.zeros(1, 1, embedding_dim))#1x1x200
                        else:
                            self.value_data[-1][-1].append(torch.zeros(1, value_count, embedding_dim))#1xnx200
                    value_pos = 0
                    for pos, t in enumerate(b):
                        for cs_word in cs_words:
                            if cs_word in t:
                                t = cs_word
                        if value_trace:
                            if t.isdigit():
                                vector = torch.zeros(embedding_dim) if int(t) >= 0 else torch.ones(embedding_dim)
                                vector[:15] = shape_embedding([abs(int(t))], max_length=15, max_dim=1)
                                self.value_data[-1][-1][-1][:, value_pos, :] = vector
                                t = "num"
                                value_pos += 1
                            elif "0x" in t:
                                tmp = int(t, 16)
                                vector = torch.zeros(embedding_dim) if tmp >= 0 else torch.ones(embedding_dim)
                                vector[:15] = shape_embedding([abs(tmp)], max_length=15, max_dim=1)
                                self.value_data[-1][-1][-1][:, value_pos, :] = vector
                                t = "num"
                                value_pos += 1
                        else:
                            if t.isdigit() or "0x" in t:
                                t = "num"
                        if t not in model.wv:
                            continue
                        # vector = torch.from_numpy(np.copy(model.wv[t]))
                        # self.data[-1][-1][-1][:, pos, :] = vector

        f.close()
        pbar.close()
        logger.info("Finished!")

        self.order = torch.randperm(len(self.label))
        # Prefetch
        prefetch_num = 10
        self.tmp_data = []
        self.fetch_idx = 10
        for i in self.order[:prefetch_num]:
            x, label_1, label_2, label_3 = self.get_data(i)#data,label,label,value_data。data是函数级别
            tmp = []
            for b in x:#基本块
                if len(b) == 0:
                    continue
                tmp.append(torch.zeros(1, len(b), embedding_dim))
                for pos, t in enumerate(b):#token
                    for cs_word in cs_words:
                        if cs_word in t:
                            t = cs_word
                    if value_trace:
                        if t.isdigit():
                            t = "num"
                        elif "0x" in t:
                            t = "num"
                    else:
                        if t.isdigit() or "0x" in t:
                            t = "num"
                    if t not in model.wv:
                        continue
                    vector = torch.from_numpy(np.copy(model.wv[t]))
                    tmp[-1][:, pos, :] = vector
            self.tmp_data.append([[tmp], label_1, label_2, label_3])

    def update_tmp_data(self):
        if self.fetch_idx == len(self.label):
            self.order = torch.randperm(len(self.label))
            self.fetch_idx = 0
        i = self.order[self.fetch_idx]
        self.fetch_idx += 1
        x, label_1, label_2, label_3 = self.get_data(i)
        tmp = []
        for b in x:
            if len(b) == 0:
                continue
            tmp.append(torch.zeros(1, len(b), self.embedding_dim))
            for pos, t in enumerate(b):
                for cs_word in self.cs_words:
                    if cs_word in t:
                        t = cs_word
                if self.value_trace:
                    if t.isdigit():
                        t = "num"
                    elif "0x" in t:
                        t = "num"
                else:
                    if t.isdigit() or "0x" in t:
                        t = "num"
                if t not in self.model.wv:
                    continue
                vector = torch.from_numpy(np.copy(self.model.wv[t]))
                tmp[-1][:, pos, :] = vector
        if len(tmp) > 0:
            self.tmp_data.append([[tmp], label_1, label_2, label_3])
        else:
            _thread.start_new_thread(self.update_tmp_data, ())

    def __getitem__(self, index):
        item = self.tmp_data.pop(0)
        _thread.start_new_thread(self.update_tmp_data, ())
        return item

    def get_data(self, index):
        if self.shape_info:
            if self.value_trace:
                return self.data[index], self.label[index], self.shape_data[index], self.value_data[index]
            return self.data[index], self.label[index], self.shape_data[index]
        elif self.value_trace:
            return self.data[index], self.label[index], self.label[index], self.value_data[index]

        return self.data[index], self.label[index], self.label[index]

    def __len__(self):
        return len(self.label)


class TVMDataSetGNN(Dataset):
    def __init__(self,
                 path,
                 train_ratio=0.7,
                 mode='train',
                 w2v_model="w2v-new.model",
                 config_path="",
                 deep_walk_path="",
                 shape_info=False,
                 value_trace=False,
                 weight_info=False,
                 is_onehot=False):

        super().__init__()
        self.data = []
        self.shape_data = []
        self.label = []
        self.value_data = []
        self.edge_data = []

        embedding_dim = 200
        self.shape_info = shape_info
        self.value_trace = value_trace
        self.weight_info = weight_info

        cs_words = [
            "csqword", "csdword", "cspword", "csxmmword", "csymmword", "csunk", "csoff", "csbyte", "aAssertFail",
            "csasc"
        ]
        self.cs_words = cs_words
        self.embedding_dim = embedding_dim

        length_dict = defaultdict(lambda: 0)

        # Load the word2vec mdoel
        model = gensim.models.Word2Vec.load(w2v_model)

        self.model = model

        # Load the data config
        config_dict = {}

        f = open(config_path + "/layers_output.txt", mode="r")

        for line in f.readlines():
            filename = line.split("\\")[0]
            config = eval(line.split("\\")[1])
            config_dict[filename] = config
        f.close()

        # Load data
        f = open(path, "rb")
        data_dict = pk.load(f)
        f.close()

        # Load conv2d label
        f = open(config_path + "/conv2d_dense_pool_label.pkl", "rb")
        conv2d_shape_dict = pk.load(f)
        f.close()

        # Count the length
        for k in data_dict:
            func_type = k.split('-')[0]
            if func_type == "auto_schedule":
                continue
            # func_type = k.split('-')[1]
            for func_name in data_dict[k]:
                atom_name = get_clean_name(func_name)
                length_dict[atom_name] += 1

        count_dict = defaultdict(int)

        logger.info("Start loading data ... [mode: {}]".format(mode))
        total_count = sum([int(length_dict[k] * train_ratio) + 1 for k in length_dict])
        pbar = tqdm.tqdm(total=total_count)
        if mode == 'test':
            pbar = tqdm.tqdm(total=(sum([length_dict[k] for k in length_dict]) - total_count))

        for k in data_dict:
            func_type = k.split('-')[0]
            if func_type == "auto_schedule":
                # func_type = k.split('-')[1]
                continue

            if func_type == 'LSTM':
                continue

            # For each .so file, there may be multi functions and we collect all functions
            for func in data_dict[k]:
                atom_func_name = get_clean_name(func)

                # Counting for training or test samples
                if mode == 'train' and count_dict[atom_func_name] >= (length_dict[atom_func_name] * train_ratio):
                    continue
                if mode == 'test' and count_dict[atom_func_name] <= (length_dict[atom_func_name] * train_ratio):
                    count_dict[atom_func_name] += 1
                    continue

                count_dict[atom_func_name] += 1
                pbar.update(1)

                # self.data.append([])
                if value_trace:
                    self.value_data.append([])

                if shape_info:
                    # Add single function shape info
                    shape = []
                    if "Conv2D" in func_type or "Activation" in k or "Dense" in k or "Global" in k or "Conv3D" in k:
                        shape = get_conv2d_shape(conv2d_shape_dict[k][func])
                    else:
                        shape = get_shape(func_type, config_dict[k], weight_info=self.weight_info)
                    self.shape_data.append(shape)

                # Add basic block data
                self.data.append(data_dict[k][func]["blocks"])

                if value_trace:
                    self.value_data[-1].append([])

                # Generate the label vector
                target = get_super_label(atom_func_name, config_dict[k] if k in config_dict else None, func_type)
                if is_onehot:
                    target = get_super_label_with_onehot(atom_func_name, config_dict[k], func_type)
                self.label.append(target)

                # Add edge data for GCN/GNN
                self.edge_data.append(data_dict[k][func]["edges"])

                for b in data_dict[k][func]['blocks']:
                    if len(b) == 0:
                        continue
                    # self.data[-1][-1].append(torch.zeros(1, len(b), embedding_dim))
                    value_count = sum([t.isdigit() or "0x" in t for t in b])

                    if value_trace:
                        if value_count == 0:
                            self.value_data[-1][-1].append(torch.zeros(1, 1, embedding_dim))
                        else:
                            self.value_data[-1][-1].append(torch.zeros(1, value_count, embedding_dim))
                    value_pos = 0
                    for pos, t in enumerate(b):
                        for cs_word in cs_words:
                            if cs_word in t:
                                t = cs_word
                        if value_trace:
                            if t.isdigit():
                                vector = torch.zeros(embedding_dim) if int(t) >= 0 else torch.ones(embedding_dim)
                                vector[:15] = shape_embedding([abs(int(t))], max_length=15, max_dim=1)
                                self.value_data[-1][-1][-1][:, value_pos, :] = vector
                                t = "num"
                                value_pos += 1
                            elif "0x" in t:
                                tmp = int(t, 16)
                                vector = torch.zeros(embedding_dim) if tmp >= 0 else torch.ones(embedding_dim)
                                vector[:15] = shape_embedding([abs(tmp)], max_length=15, max_dim=1)
                                self.value_data[-1][-1][-1][:, value_pos, :] = vector
                                t = "num"
                                value_pos += 1
                        else:
                            if t.isdigit() or "0x" in t:
                                t = "num"
                        if t not in model.wv:
                            continue
                        # vector = torch.from_numpy(np.copy(model.wv[t]))
                        # self.data[-1][-1][-1][:, pos, :] = vector

        f.close()
        pbar.close()
        logger.info("Finished!")

        self.order = torch.randperm(len(self.label))
        # Prefetch
        prefetch_num = 10
        self.tmp_data = []
        self.fetch_idx = 10
        for i in self.order[:prefetch_num]:
            x, label, tmp_shape_data, tmp_value_data, tmp_edge_data = self.get_data(i)
            tmp = []
            for b in x:
                if len(b) == 0:
                    continue
                tmp.append(torch.zeros(1, len(b), embedding_dim))
                for pos, t in enumerate(b):
                    for cs_word in cs_words:
                        if cs_word in t:
                            t = cs_word
                    if value_trace:
                        if t.isdigit():
                            t = "num"
                        elif "0x" in t:
                            t = "num"
                    else:
                        if t.isdigit() or "0x" in t:
                            t = "num"
                    if t not in model.wv:
                        continue
                    vector = torch.from_numpy(np.copy(model.wv[t]))
                    tmp[-1][:, pos, :] = vector
            self.tmp_data.append([[tmp], label, tmp_shape_data, tmp_value_data, tmp_edge_data])

    def update_tmp_data(self):
        if self.fetch_idx == len(self.label):
            self.order = torch.randperm(len(self.label))
            self.fetch_idx = 0
        i = self.order[self.fetch_idx]
        self.fetch_idx += 1
        x, label, shape_data, value_data, label_edge = self.get_data(i)
        tmp = []
        for b in x:
            if len(b) == 0:
                continue
            tmp.append(torch.zeros(1, len(b), self.embedding_dim))
            for pos, t in enumerate(b):
                for cs_word in self.cs_words:
                    if cs_word in t:
                        t = cs_word
                if self.value_trace:
                    if t.isdigit():
                        t = "num"
                    elif "0x" in t:
                        t = "num"
                else:
                    if t.isdigit() or "0x" in t:
                        t = "num"
                if t not in self.model.wv:
                    continue
                vector = torch.from_numpy(np.copy(self.model.wv[t]))
                tmp[-1][:, pos, :] = vector
        if len(tmp) > 0:
            self.tmp_data.append([[tmp], label, shape_data, value_data, label_edge])
        else:
            _thread.start_new_thread(self.update_tmp_data, ())

    def __getitem__(self, index):
        item = self.tmp_data.pop(0)
        _thread.start_new_thread(self.update_tmp_data, ())
        return item

    def get_data(self, index):
        if self.shape_info:
            return self.data[index], self.label[index], self.shape_data[index], self.value_data[index], self.edge_data[
                index]
        else:
            return self.data[index], self.label[index], self.label[index], self.value_data[index], self.edge_data[index]

    def __len__(self):
        return len(self.label)
