from collections import defaultdict
import torch
from torch.utils.data.dataset import Dataset
import gensim
import pickle as pk
from .tvm_dataset import get_clean_name, atom_list
from loguru import logger
import tqdm
import _thread
import numpy as np

cs_words = [
    "csqword", "csdword", "cspword", "csxmmword", "csymmword", "csunk", "csoff", "csbyte", "aAssertFail", "csasc"
]


def ins2torch(x, embedding_dim, model):
    tmp = []
    for b in x:
        if len(b) == 0:
            continue
        tmp.append(torch.zeros(1, len(b), embedding_dim))
        for pos, t in enumerate(b):
            for cs_word in cs_words:
                if cs_word in t:
                    t = cs_word
            if t.isdigit() or "0x" in t:
                t = "num"
            if t not in model.wv:
                continue
            vector = torch.from_numpy(np.copy(model.wv[t]))
            tmp[-1][:, pos, :] = vector
    return tmp


def get_onehot_label(functype_atom):
    label_vector = torch.LongTensor([atom_list.index(functype_atom)])
    return label_vector


class TVMDataSetARM(Dataset):
    def __init__(self, path, train_ratio=0.7, mode='train', w2v_model="w2v-new.model"):

        super().__init__()
        self.data = []
        self.shape_data = []
        self.label = []
        self.value_data = []

        embedding_dim = 200

        self.cs_words = cs_words
        self.embedding_dim = embedding_dim

        length_dict = defaultdict(lambda: 0)

        # Load the word2vec mdoel
        model = gensim.models.Word2Vec.load(w2v_model)

        self.model = model

        # Load data
        f = open(path, "rb")
        data_dict = pk.load(f)
        f.close()

        # Count the length
        for k in data_dict:
            func_type = k.split('-')[0]
            if func_type == "auto_schedule" or func_type == "LSTM":
                continue
            # func_type = k.split('-')[1]
            for func_name in data_dict[k]:
                atom_name = get_clean_name(func_name)
                length_dict[atom_name] += 1

        logger.info("Atom Func Name Set: {}".format(list(length_dict.keys())))
        count_dict = defaultdict(int)

        logger.info("Start loading data ... [mode: {}]".format(mode))
        total_count = sum([int(length_dict[k] * train_ratio) + 1 for k in length_dict])
        pbar_count = total_count
        if mode == 'test':
            pbar_count = sum([length_dict[k] for k in length_dict]) - total_count

        pbar = tqdm.tqdm(total=pbar_count)
        for k in data_dict:
            logger.debug(k)
            func_type = k.split('-')[0]
            if func_type == "auto_schedule" or func_type == 'LSTM':
                continue

            for func in data_dict[k]:
                atom_func_name = get_clean_name(func)
                if atom_func_name not in atom_list:
                    continue

                if mode == 'train' and count_dict[atom_func_name] >= (length_dict[atom_func_name] * train_ratio):
                    continue
                if mode == 'test' and count_dict[atom_func_name] <= (length_dict[atom_func_name] * train_ratio):
                    count_dict[atom_func_name] += 1
                    continue
                pbar.update(1)

                count_dict[atom_func_name] += 1

                self.data.append(data_dict[k][func]["blocks"])

                target = get_onehot_label(atom_func_name)

                self.label.append(target)

                for b in data_dict[k][func]['blocks']:
                    if len(b) == 0:
                        continue
                    for _, t in enumerate(b):
                        for cs_word in cs_words:
                            if cs_word in t:
                                t = cs_word
                        if t.isdigit() or "0x" in t:
                            t = "num"
                        if t not in model.wv:
                            continue

        f.close()
        pbar.close()
        logger.info("Finished!")

        self.order = torch.randperm(len(self.label))
        # Prefetch
        prefetch_num = 10
        self.tmp_data = []
        self.fetch_idx = 10
        for i in self.order[:prefetch_num]:
            x, label_1, label_2, label_3 = self.get_data(i)
            tmp = ins2torch(x, self.embedding_dim, self.model)
            self.tmp_data.append([[tmp], label_1, label_2, label_3])

    def update_tmp_data(self):
        if self.fetch_idx == len(self.label):
            self.order = torch.randperm(len(self.label))
            self.fetch_idx = 0
        i = self.order[self.fetch_idx]
        self.fetch_idx += 1
        x, label_1, label_2, label_3 = self.get_data(i)
        tmp = ins2torch(x, self.embedding_dim, self.model)
        if len(tmp) > 0:
            self.tmp_data.append([[tmp], label_1, label_2, label_3])
        else:
            _thread.start_new_thread(self.update_tmp_data, ())

    def __getitem__(self, index):
        item = self.tmp_data.pop(0)
        _thread.start_new_thread(self.update_tmp_data, ())
        return item

    def get_data(self, index):

        return self.data[index], self.label[index], self.label[index], self.label[index]

    def __len__(self):
        return len(self.label)


if __name__ == "__main__":
    dataset = TVMDataSetARM("arm_ida_data.pkl", w2v_model="word2vec-train-ida-arm.model")
    for x in dataset:
        print(len(x))
        exit(0)