import json

import pandas as pd
import config
import tensorflow as tf


def get_vocab_id():
    vocab = pd.read_csv(config.VOCAB_PATH, names=['word', 'id'])
    return list(vocab['word']), dict(vocab.values)


def get_label_id():
    label = pd.read_csv(config.LABEL_PATH, names=['label', 'id'])
    return list(label['label']), dict(label.values)


def data_generator(train=True, batch_size=config.BATCH_SIZE):
    dataset = DataSet(train=train)
    data_points = []
    for data_point in dataset:
        data_points.append(data_point)
        if len(data_points) == batch_size:
            data_batch = list(zip(*data_points))
            yield collate_fn(data_batch)
            data_points = []

    if data_points:
        data_batch = list(zip(*data_points))
        yield collate_fn(data_batch)


class DataSet():
    def __init__(self, train: bool = True, base_len=50):
        super(DataSet, self).__init__()

        self.base_len = base_len
        self.path = config.TRAIN_PATH if train else config.TEST_PATH

        self.data = pd.read_csv(self.path, sep=' ', names=['word', 'label'])

        _, self.vocab2id = get_vocab_id()
        _, self.label2id = get_label_id()

        self.cut_point = [0]
        self.count = 0
        self.get_address_point_size()

    def __getitem__(self, index):
        address = self.data.iloc[self.cut_point[index]:self.cut_point[index + 1], :]
        address_record = [self.vocab2id.get(word, config.WORD_UNK_ID) for word in address['word']]
        target = [self.label2id.get(label, self.label2id['O']) for label in address['label']]
        return address_record, target

    def __len__(self):
        return self.count

    def get_address_point_size(self):
        with open(self.path, encoding='utf-8') as file:
            file_iter = file.readlines()

        for index, word in enumerate(file_iter):
            if word == '\n':
                self.cut_point.append(index - self.count)
                self.count += 1


def collate_fn(batch):
    address_es = []
    target_s = []
    mask = []

    batch.sort(key=lambda x: len(x[0]), reverse=True)
    max_len = max(len(batch[0][0]), len(batch[0][1]))

    for item in batch:
        pad_len = max_len - len(item[0])

        address_es.append(item[0] + [config.WORD_PAD_ID] * pad_len)
        target_s.append(item[1] + [config.LABEL_O_ID] * pad_len)
        # for crf calc
        mask.append([True] * len(item[0]) + [False] * pad_len)
    return tf.convert_to_tensor(address_es), tf.convert_to_tensor(target_s), tf.convert_to_tensor(mask, dtype=tf.bool)


if __name__ == '__main__':
    id2vocab, _ = get_vocab_id()
    id2label, _ = get_label_id()
    for index, (x, y, m) in enumerate(data_generator()):
        print(x.shape, y.shape, m.shape)
        print([id2vocab[idx] for idx in x[0]])
        break
