import re
from torchtext.vocab import vocab
from torchtext.data import get_tokenizer
from collections import Counter
from tqdm import tqdm
from torch.utils.data import DataLoader
import torch

tokenizer_en = get_tokenizer('basic_english')


def clean_str(string):
    def clean_str(string):
        2

    string = re.sub("[^A-Za-z0-9\-\?\!\.\,]", " ", string).lower()
    string = string.replace("that's", "that is")
    string = string.replace("isn't", "is not")
    string = string.replace("don't", "do not")
    string = string.replace("didn't", "did not")
    string = string.replace("won't", "will not")
    string = string.replace("can't", "can not")
    string = string.replace("you're", "you are")
    string = string.replace("they're", "they are")
    string = string.replace("you'll", "you will")
    string = string.replace("we'll", "we will")
    string = string.replace("what's", "what is")
    string = string.replace("i'm", "i am")
    string = string.replace("let's", "let us")
    return string


def build_vocab(tokenizer, filepath, min_freq, specials=None):
    if specials is None:
        specials = ['<unk>', '<pad>']
    counter = Counter()
    with open(filepath, encoding='utf8') as f:
        for line in tqdm(list(f), ncols=100):
            line = line.strip().split(",")[-1][:-1]
            tokens = tokenizer(clean_str(line))
            counter.update(tokens)
    vocab_ = vocab(counter, min_freq=min_freq, specials=specials)
    vocab_.set_default_index(vocab_['<unk>'])
    return vocab_


class LoadSentenceClassificationDataset:
    def __init__(self, train_file_path=None, min_freq=1, tokenizer=None, batch_size=20, max_sen_len='sam'):
        """
        :param train_file_path: 训练数据路径
        :param min_freq: 最小词频，去掉词频小于min_freq的词
        :param tokenizer: 分词器
        :param batch_size: 每批次样本数量
        :param max_sen_len: 句子序列最大长度，默认为整个训练集中最长句子序列长度，若设置为None，则表示按每个批次最长句子序列进行padding
        """
        self.tokenizer = tokenizer
        self.min_freq = min_freq
        self.specials = ['<unk>', '<pad>']
        self.vocab = build_vocab(filepath=train_file_path,
                                 tokenizer=tokenizer,
                                 min_freq=min_freq,
                                 specials=self.specials)
        self.PAD_IDX = self.vocab['<pad>']
        self.UNK_IDX = self.vocab['<unk>']
        self.batch_size = batch_size
        self.max_sen_len = max_sen_len

    def data_process(self, filepath):
        """
        将每个样本根据词典转换成索引形式，同时返回所有样本中最长句子序列长度
        Args:
            filepath: 需要处理的数据集路径
        Returns:
            data 处理后的数据
            max_len 所有样本中最长句子序列长度
        """
        raw_iter = iter(open(filepath, encoding="utf8"))
        data = []
        max_len = 0
        for raw in tqdm(list(raw_iter), ncols=100):
            line = raw.rstrip("\n").split('","')
            sen, label = line[-1][:-1], line[0][1:]
            sen = clean_str(sen)
            tensor_ = torch.tensor([self.vocab[token] for token in self.tokenizer(sen)], dtype=torch.long)
            label = torch.tensor(int(label) - 1, dtype=torch.long)
            max_len = max(max_len, tensor_.size(0))
            data.append((tensor_, label))
        return data, max_len

    def load_train_val_test_data(self, train_file_paths, test_file_paths):
        train_data, max_sen_len = self.data_process(train_file_paths)
        if self.max_sen_len == "same":
            self.max_sen_len = max_sen_len
        test_data = self.data_process(test_file_paths)[0]
        train_iter = DataLoader(train_data, batch_size=self.batch_size, shuffle=True, collate_fn=self.generate_batch)
        test_iter = DataLoader(test_data, batch_size=self.batch_size, shuffle=True, collate_fn=self.generate_batch)
        return train_iter, test_iter

    def generate_batch(self, data_batch):
        batch_sentence, batch_label = [], []
        for (sen, label) in data_batch:
            batch_sentence.append(sen)
            batch_label.append(label)
        batch_sentence = pad_sequence(batch_sentence,
                                      padding_value=self.PAD_IDX,
                                      batch_first=False,
                                      max_len=self.max_sen_len)
        batch_label = torch.tensor(batch_label, dtype=torch.long)
        return batch_sentence, batch_label


def pad_sequence(sequences, batch_first=False, max_len=None, padding_value=0):
    max_size = sequences[0].size()
    trailing_dims = max_size[1:]
    length = max_len
    max_len = max([s.size(0) for s in sequences])
    if length is not None:
        max_len = max(length, max_len)
    if batch_first:
        out_dims = (len(sequences), max_len) + trailing_dims
    else:
        out_dims = (max_len, len(sequences)) + trailing_dims
    out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value)
    for i, tensor in enumerate(sequences):
        length = tensor.size(0)
        if batch_first:
            out_tensor[i, :length, ...] = tensor
        else:
            out_tensor[:length, i, ...] = tensor
    return out_tensor
