# 循环神经网络
# http://ai.stanford.edu/~amaas/data/sentiment/
import os.path
import re
import rnn.imdb.lib as lib
import torch
from torch.utils.data import DataLoader, Dataset


def tokenize(content):
    content = re.sub("<.*?>", " ", content)  # 干掉尖括号<></>
    filters = ['!', '"', '#', '$', '%', '&', '\(', '\)', '\*', '\+', ',', '-', '\.', '/', ':', ';', '<', '=', '>',
               '\?', '@'
        , '\[', '\\', '\]', '^', '_', '`', '\{', '\|', '\}', '~', '\t', '\n', '\x97', '\x96', '”', '“', ]
    content = re.sub("|".join(filters), " ", content)
    tokens = [i.strip().lower() for i in content.split()]
    return tokens


class ImdbDataset(Dataset):
    def __init__(self, train=True):
        self.train_data_path = r"imdb/aclImdb/train"
        self.test_data_path = r"imdb/aclImdb/test"
        data_path = self.train_data_path if train else self.test_data_path
        # 把pos和neg两个文件夹的路径放入temp_data_path，方便之后遍历文件
        temp_data_path = [os.path.join(data_path, "pos"), os.path.join(data_path, "neg")]
        self.total_file_path = []
        for path in temp_data_path:
            # 所有文件的路径
            self.total_file_path.extend([os.path.join(path, i) for i in os.listdir(path)])

    def __getitem__(self, index):
        file_path = self.total_file_path[index]
        label = 0 if file_path.split("/")[-2] == "neg" else 1
        content = open(file_path).read()
        tokens = tokenize(content)
        return tokens, label

    def __len__(self):
        return len(self.total_file_path)


# collate_fn自定义，这样可以将batch之后的数据重新进行组装，形成我们需要的形式
def collate_fn(batch):
    #  batch是一个列表，其中是一个一个的元组，每个元组是dataset中_getitem__的结果，列表中元组个数是batch_size决定的
    contents, labels = list(zip(*batch))
    contents = [lib.ws.transform(i, length=lib.length) for i in contents]
    return torch.LongTensor(contents), torch.LongTensor(labels)


def get_data_loader(train=True, batch_size=lib.batch_size):
    return DataLoader(ImdbDataset(train), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
