from torchtext.data import Dataset, Field, LabelField, BucketIterator, Example
from torchtext.vocab import Vectors

import os
import re


class QADataset(Dataset):
    def __init__(self, path, need_sep_token, text_field, label_field):
        """
        :param path:
        :param need_sep_token: 标明Question和Answer是合并还是分开
        :param kwargs:
        """

        fields = [('context', text_field), ('question', text_field), ('label', label_field)]
        files = [os.path.join(path, f) for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
        context_list = []
        question_list = []
        label_list = []
        for file in files:
            with open(file, 'r') as rf:
                for idx, line in enumerate(rf):
                    if idx == 2:
                        context_list.append(line.strip())
                    if idx == 4:
                        question_list.append(line.strip())
                    if idx == 6:
                        label_list.append(line.strip())

        examples = []
        for index in range(len(context_list)):
            examples.append(Example.fromlist([context_list[index], question_list[index], label_list[index]], fields))

        super(QADataset, self).__init__(examples, fields)


def get_iterator(config):
    def tokenize(text):
        return re.split('[: ]', text)

    text_field = Field(sequential=True, use_vocab=True, tokenize=tokenize, batch_first=True)
    # LABEL = Field(sequential=False, use_vocab=False) 这个是真实的数值型的label
    label_field = LabelField(use_vocab=True)

    # 创建两个数据集
    train_dataset = QADataset(config.train_file, False, text_field, label_field)
    dev_dataset = QADataset(config.dev_file, False, text_field, label_field)

    # 构建词向量
    vectors = Vectors(name=config.vector_file, cache=config.cache_file)
    vocabs = []
    with open(config.vocab_file, 'r') as rf:
        for line in rf:
            vocabs.append(line.strip())

    entities = ['@entity%d' % i for i in range(config.n_entities)]
    vocabs = vocabs + entities + ['<UNK>', '@placeholder']

    # 这东西，可以直接从dataset中构建词典，也可以直接从list中构建，如果超出词典范围就会报错
    text_field.build_vocab(vocabs, vectors=vectors)
    label_field.build_vocab(entities)
    train_iterator = BucketIterator(train_dataset, batch_size=config.batch_size,
                                    device=config.device,
                                    sort_key=lambda x: len(x.context), sort_within_batch=True, shuffle=True)

    dev_iterator = BucketIterator(dev_dataset, batch_size=config.batch_size,
                                  device=config.device,
                                  sort_key=lambda x: len(x.context), sort_within_batch=True, shuffle=True)
    return train_iterator, dev_iterator, text_field.vocab.vectors
