import torch
from torch.utils.data import Dataset
from transformers import BertTokenizer
import numpy as np
import re

def process_string(s):
    s = re.sub(r"<br />",r" ",s.lower()) # remove <br />
    s = re.sub(r'""',r'"',s)
    s = re.sub(r"([^'\w ])",r" \1 ",s) # 分割非'符号
    s = re.sub(r"(^[^\w]+)",r" \1 ",s)  # 分割句首'标点
    s = re.sub(r"('$)",r" \1",s)  # 分割句末'标点
    # s = re.sub(r"[\s]([^\w]+)",r" \1 ",s) # 分割词首标点
    # s = re.sub(r"([^\w]+[\s\b])",r" \1",s) # 分割词末标点
    s = re.sub(r"(?<=\w)'s",r" is",s)
    s = re.sub(r"(?<=\w)'d",r" would",s)
    s = re.sub(r"(?<=\w)'ll",r" will",s)
    s = re.sub(r"n't",r" not",s)
    s = re.sub(r" +",r" ",s)

    return s


def load_data(path, is_test=False):
    lines = []
    with open(path)as fp:
        while 1:
            try:
                line = fp.readline().strip()
                if len(line)<1:
                    break
                if is_test:
                    review = process_string(line[1:-4])
                    lines.append((review,-1))
                else:
                    label = float(line[-1])
                    review = process_string(line[1:-4])
                    lines.append((review,label))
            except:
                pass
    return lines


class Tokenizer:
    def __init__(self, max_seq_len, pretrained_bert_name):
        self.tokenizer = BertTokenizer.from_pretrained(pretrained_bert_name)
        self.max_seq_len = max_seq_len

    def edit_len(self, text, drop="right"):
        if len(text) > self.max_seq_len:
            return text[:self.max_seq_len]
        else:
            return text + [self.tokenizer.pad_token_id] * (self.max_seq_len - len(text))

    # adjust max seq len
    # def tokens_to_ids(self, tokens):
    #     sequence = self.tokenizer.convert_tokens_to_ids(tokens)
    #     if len(sequence) == 0:
    #         sequence = [0]
    #     return self.edit_len(sequence)

    def text_to_ids(self, text):
        if not text.startswith("[CLS]"):
            text = "[CLS]" + text
        sequence = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(text))
        if len(sequence) == 0:
            sequence = [0]
        return self.edit_len(sequence)


class SentiDataset(Dataset):
    def __init__(self, file_path, tokenizer):
        data = load_data(path=file_path,is_test=False)
        # [(string review, int label)]
        dataset = []
        for line in data:
            text = line[0]
            gold = line[1]
            text_ids = tokenizer.text_to_ids(text)
            att_mask = [1 if x != tokenizer.tokenizer.pad_token_id else 0 for x in text_ids]
            data = {
                "text_ids": torch.tensor(text_ids,dtype=torch.long),
                "attention_mask": torch.tensor(att_mask,dtype=torch.long),
                "label_ids": torch.tensor(gold,dtype=torch.float)
            }
            dataset.append(data)
        self.dataset = dataset

    def __getitem__(self, index):
        return self.dataset[index]

    def __len__(self):
        return len(self.dataset)