import numpy as np
from torch.utils.data import Dataset
from transformers import BertTokenizer

def fill(sequence, maxlen, dtype="int64"):
    x = np.zeros(maxlen).astype(dtype)
    trunc = sequence[:maxlen]
    trunc = np.asarray(trunc, dtype = dtype)
    x[:len(trunc)] = trunc
    return x


class Tokenizer(object):
    def __init__(self, max_len, pretrained_bert_name):
        self.tokenizer = BertTokenizer.from_pretrained(pretrained_bert_name)
        # TODO: can pretrained bert name restore models trained by us?
        self.max_len = max_len

    def sentence_to_id(self, text, reverse=False):
        sequence = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(text))
        if len(sequence) == 0:
            sequence = [0]
        if reverse:
            sequence = sequence[::-1]
        return fill(sequence, self.max_len)

class BertDataset(Dataset):
    def __init__(self, path, tokenizer: Tokenizer, is_test = False):
        with open(path, "r", encoding="utf-8", newline="\n", errors="ignore") as fin:
           lines = fin.readlines()
        if lines[-1] == "": lines.pop() 

        interval = 3 if not is_test else 2
        data = []

        for i in range(0, len(lines), interval):
            # context_left, _, context_right = [s.lower().strip() for s in lines[i].partition("$T$")]
            sentence = lines[i].lower().strip()
            aspect = lines[i+1].lower().strip()
            label = None
            if not is_test:
                label = int(lines[i+2].strip()) + 1

            fill_in_sentence = sentence.replace("$T$", aspect)
            bert_input1 = tokenizer.sentence_to_id("[CLS] "+fill_in_sentence+" [SEP] "+aspect+" [SEP]")
            sentence_ids = tokenizer.sentence_to_id(fill_in_sentence)
            aspect_ids = tokenizer.sentence_to_id(aspect)
            bert_input2 = np.append(np.zeros(np.sum(sentence_ids!=0)+2), np.ones(np.sum(aspect_ids!=0)+1))
            bert_input2 = fill(bert_input2, tokenizer.max_len)

            bert_input3 = tokenizer.sentence_to_id("[CLS] "+fill_in_sentence+" [SEP]")
            bert_input4 = tokenizer.sentence_to_id("[CLS] "+aspect+" [ESP]")

            # entry = [bert_input1, bert_input2, bert_input3, bert_input4]
            entry = [bert_input1, bert_input2]

            if not is_test:
                entry.append(label)
            data.append(entry)
        self.data = data

    def __getitem__(self, index):
        return self.data[index]

    def __len__(self):
        return len(self.data)