import os
import torch
import random
import pickle
import numpy as np


class DataGenerator():
    def __init__(self, data, batch_size):
        print('DataGenerator: Data Length', len(data))
        self.data = data
        self.batch_size = batch_size
        self.steps = len(self.data) // self.batch_size
        if len(self.data[0]) % self.batch_size != 0:
            self.steps += 1

    def get_step(self):
        return self.steps

    def get_batch(self):
        while True:
            index = list(range(len(self.data)))
            random.shuffle(index)

            sentence = []
            real_len = []
            slot_label = []
            intent_label = []
            for i in index:
                sentence.append(self.data[i][0])
                real_len.append(self.data[i][1])
                slot_label.append(self.data[i][2])
                intent_label.append(self.data[i][3])

                if len(sentence) == self.batch_size or i == index[-1]:
                    yield (sentence, real_len, slot_label, intent_label)
                    sentence = []
                    real_len = []
                    slot_label = []
                    intent_label = []


def data2index(sentence_path, intent_path, slot_path):
    # load the mapping dict
    with open(r'./tag_dict.pkl', 'rb') as f:
        tag_dict = pickle.load(f)

    sentence = np.load(sentence_path, allow_pickle=True)
    intent = np.load(intent_path, allow_pickle=True)
    actual_length = []  # true length
    slot = []
    num_unknown = 0
    with open(slot_path, 'r', encoding='utf-8') as f:
        for line in f.readlines():
            temp_slot = []
            for label in line.strip().split(' '):
                if label in tag_dict['tag2index']:
                    temp_slot.append(tag_dict['tag2index'][label])
                else:
                    temp_slot.append(tag_dict['tag2index']['O'])
                    num_unknown += 1

            # start with <PAD>
            temp_slot.insert(0, tag_dict['tag2index']['<PAD>'])
            actual_length.append(len(temp_slot))
            if len(temp_slot) < 30:  # padding with <PAD>
                temp_slot.extend([tag_dict['tag2index']['<PAD>']] * (30 - len(temp_slot)))
            slot.append(temp_slot)

    data = []
    for i in range(len(sentence)):
        # 格式：句子，句子实际长度，槽，Intent标签
        data.append([sentence[i], actual_length[i], slot[i], intent[i]])

    return data


def load_training_data(num_fold, data_path):
    """
    [[句子]，句子实际长度，[Slot]，Intent标签]
    :param num_fold: which fold
    :param data_path:
    :return: train_data, dev_data
    """
    # training set
    sentence_path = data_path + '/train_x' + str(num_fold) + '.npy'
    intent_path = data_path + '/train_intent_y' + str(num_fold) + '.npy'
    slot_path = data_path + '/train_slot_y' + str(num_fold) + '.txt'

    train_data = data2index(sentence_path, intent_path, slot_path)

    # validation set
    sentence_path = data_path + '/dev_x' + str(num_fold) + '.npy'
    intent_path = data_path + '/dev_intent_y' + str(num_fold) + '.npy'
    slot_path = data_path + '/dev_slot_y' + str(num_fold) + '.txt'

    dev_data = data2index(sentence_path, intent_path, slot_path)

    return train_data, dev_data


def load_testing_data(data_path):
    """

    :param data_path:
    :return:
    """
    sentence_path = data_path + '/test_x.npy'
    intent_path = data_path + '/test_intent.npy'
    slot_path = data_path + '/test_slot.txt'

    test_data = data2index(sentence_path, intent_path, slot_path)
    return test_data


def make_mask(batch, real_len, max_len, label_size):
    """
    计算loss时的mask
    """
    mask = torch.zeros(batch, max_len, label_size)
    for index, item in enumerate(real_len):
        mask[index, :item, :] = 1.0
    return mask


def masked_log_softmax(vector: torch.Tensor, mask: torch.Tensor, dim: int = -1) -> torch.Tensor:
    if mask is not None:
        mask = mask.float()
        while mask.dim() < vector.dim():
            mask = mask.unsqueeze(1)

        vector = vector + (mask + 1e-45).log()
    return torch.nn.functional.log_softmax(vector, dim=dim)


def one_hot(array, num, maxlen):
    shape = array.size()
    batch = shape[0]
    if len(shape) == 1:
        res = torch.zeros(batch, num)
        for i in range(batch):
            res[i][array[i]] = 1
    else:
        res = torch.zeros(batch, maxlen, num)
        for i in range(batch):
            for j in range(maxlen):
                if array[i, j] != num:
                    res[i][j][array[i, j]] = 1
    return res


def get_chunks(labels):
    chunks = []
    start_idx, end_idx = 0, 0
    for idx in range(1, len(labels) - 1):
        chunkStart, chunkEnd = False, False
        if labels[idx - 1] not in ('O', '<PAD>', '<unk>', '<s>', '</s>', '<STOP>', '<START>'):
            prevTag, prevType = labels[idx - 1][:1], labels[idx - 1][2:]
        else:
            prevTag, prevType = 'O', 'O'
        if labels[idx] not in ('O', '<PAD>', '<unk>', '<s>', '</s>', '<STOP>', '<START>'):
            Tag, Type = labels[idx][:1], labels[idx][2:]
        else:
            Tag, Type = 'O', 'O'
        if labels[idx + 1] not in ('O', '<PAD>', '<unk>', '<s>', '</s>', '<STOP>', '<START>'):
            nextTag, nextType = labels[idx + 1][:1], labels[idx + 1][2:]
        else:
            nextTag, nextType = 'O', 'O'

        if (Tag == 'B' and prevTag in ('B', 'I', 'O')) or (prevTag, Tag) in [('O', 'I'), ('E', 'E'), ('E', 'I'),
                                                                             ('O', 'E')]:
            chunkStart = True
        if Tag != 'O' and prevType != Type:
            chunkStart = True

        if (Tag in ('B', 'I') and nextTag in ('B', 'O')) or (Tag == 'E' and nextTag in ('E', 'I', 'O')):
            chunkEnd = True
        if Tag != 'O' and Type != nextType:
            chunkEnd = True

        if chunkStart:
            start_idx = idx
        if chunkEnd:
            end_idx = idx
            chunks.append((start_idx, end_idx, Type))
            start_idx, end_idx = 0, 0
    return chunks
