from torchtext.legacy.data import BucketIterator, Example, Field, LabelField, Dataset, RawField, NestedField, \
    TabularDataset
from torchtext.vocab import Vectors

import os
import re
import nltk
import json
import torch


def word_tokenize(text):
    return [token.replace("''", '"').replace("``", '"') for token in nltk.word_tokenize(text)]


class SQuAD():
    """
    负责处理原始文本数据
    """

    def process(self, path):
        """
        处理数据，保存到path+'l'里面
        :param path:
        :return:
        """
        dump = []
        abnormals = [' ', '\n']

        with open(path, 'r', encoding='utf-8') as f:
            data = json.load(f)
            data = data['data']  # 取到value，是一个list

            for article in data:  # 对于每一个dict
                for paragraph in article['paragraphs']:  # 对于paragraphs里的每一个dict
                    context = paragraph['context']
                    tokens = word_tokenize(context)
                    for qa in paragraph['qas']:  # 对于段落的每一个问题
                        id = qa['id']
                        question = qa['question']
                        for ans in qa['answers']:  # 对u每一个答案
                            answer = ans['text']
                            # 获取开始答案的位置（字符级别）
                            s_idx = ans['answer_start']
                            # 获取结束答案的位置（字符级别）
                            e_idx = s_idx + len(answer)
                            # 将s_idx和e_idx转为词级别的计数
                            l = 0
                            # 一旦找到了开始的那个词的位置，就可以把下面的flag设置为True
                            flag = False
                            for i, t in enumerate(tokens):
                                while l < len(context):
                                    if context[l] in abnormals:  # 碰上空格，换行符等需要考虑其长度
                                        l += 1
                                    else:
                                        break
                                # # exceptional cases
                                # if t[0] == '"' and context[l:l + 2] == '\'\'':
                                #     t = '\'\'' + t[1:]
                                # elif t == '"' and context[l:l + 2] == '\'\'':
                                #     t = '\'\''

                                l += len(t)
                                if l > s_idx and flag == False:
                                    s_idx = i
                                    flag = True
                                if l >= e_idx:
                                    e_idx = i
                                    break

                            dump.append(dict([('id', id),
                                              ('context', context),
                                              ('question', question),
                                              ('answer', answer),
                                              ('s_idx', s_idx),
                                              ('e_idx', e_idx)]))

        with open('{}l'.format(path), 'w', encoding='utf-8') as f:
            # with open('{}'.format(path), 'w', encoding='utf-8') as f:
            for line in dump:
                json.dump(line, f)
                print('', file=f)


def get_iterator(config):
    def tokenize(text):
        return [token.replace("''", '"').replace("``", '"') for token in nltk.word_tokenize(text)]

    path = 'data/squad'
    dataset_path = path + '/torchtext/'
    train_examples_path = dataset_path + 'dev_examples.pt'
    dev_examples_path = dataset_path + 'dev_examples.pt'

    data_process = SQuAD()

    # 数据预处理
    print("preprocessing data files...")
    if not os.path.exists('{}l'.format(config.train_file)):
        data_process.process('{}'.format(config.train_file))
    if not os.path.exists('{}l'.format(config.dev_file)):
        data_process.process('{}'.format(config.dev_file))

    # 定义数据格式
    RAW = RawField()
    # explicit declaration for torchtext compatibility
    RAW.is_target = False
    CHAR_NESTING = Field(batch_first=True, tokenize=list, lower=True)
    CHAR = NestedField(CHAR_NESTING, tokenize=word_tokenize)
    # include_lengths--是返回填充的minibatch的元组和包含每个示例的长度的列表，还是返回填充的minibatch
    WORD = Field(batch_first=True, tokenize=word_tokenize, lower=True, include_lengths=True)
    LABEL = Field(sequential=False, unk_token=None, use_vocab=False)

    dict_fields = {'id': ('id', RAW),
                   's_idx': ('s_idx', LABEL),
                   'e_idx': ('e_idx', LABEL),
                   'context': [('c_word', WORD), ('c_char', CHAR)],
                   'question': [('q_word', WORD), ('q_char', CHAR)]}

    list_fields = [('id', RAW), ('s_idx', LABEL), ('e_idx', LABEL),
                   ('c_word', WORD), ('c_char', CHAR),
                   ('q_word', WORD), ('q_char', CHAR)]

    # 如果不是第一次加载数据，执行下面的代码，直接加载数据
    if os.path.exists(dataset_path):
        print("loading splits...")
        train_examples = torch.load(train_examples_path)
        dev_examples = torch.load(dev_examples_path)

        train = Dataset(examples=train_examples, fields=list_fields)
        dev = Dataset(examples=dev_examples, fields=list_fields)
    # 如果是第一次加载数据，执行
    else:
        print("building splits...")
        train, dev = TabularDataset.splits(
            path=path,
            train='{}l'.format(config.train_file),
            validation='{}l'.format(config.dev_file),
            format='json',
            fields=dict_fields)

        os.makedirs(dataset_path)
        torch.save(train.examples, train_examples_path)
        torch.save(dev.examples, dev_examples_path)

    # cut too long context in the training set for efficiency.
    if config.context_threshold > 0:
        train.examples = [e for e in train.examples if len(e.c_word) <= config.context_threshold]

    print("building vocab...")
    CHAR.build_vocab(train, dev)
    WORD.build_vocab(train, dev)

    print("building iterators...")
    train_iterator, dev_iterator = BucketIterator.splits((train, dev),
                                                         batch_sizes=[config.batch_size, config.batch_size],
                                                         device=config.device,
                                                         sort_key=lambda x: len(x.c_word))
    return train_iterator, dev_iterator, WORD.vocab, CHAR.vocab
