""""
完成数据集准备
"""
from torch.utils.data import Dataset,DataLoader

import lib
from lib import ws
import os
import re
import torch


def tokenlize(content):
    content = re.sub("<.*?>"," ",content)  # 将content 中的特殊字符替换 空格
    fileters = ["\.",":",'!', '"', '#', '$', '%','&',"\\t","\\n","\x97","\x96"]
    # print("|".join(fileters))
    content = re.sub("|".join(fileters)," ",content)
    # print(content)
    tokens = [i.strip().lower() for i in content.split()]
    return tokens


class ImdbDataset(Dataset):

    def __init__(self,train=True):
        super().__init__()
        self.train_data_path = "E:\python\deep learning\code\文本情感分类\data\\aclImdb\\train"
        self.test_data__path = "E:\python\deep learning\code\文本情感分类\data\\aclImdb\\test"

        data_path = self.train_data_path if train else self.test_data__path

        # 把所有的文件名放入列表
        temp_data_path = [os.path.join(data_path,"pos"),os.path.join(data_path,"neg")]  # 也可以使用字符串+的操作

        self.total_file_path = []  # 所有文件地址
        for path in temp_data_path:
            file_name_list = os.listdir(path)
            # print(file_name_list)
            file_path_list=[os.path.join(path,i) for i in file_name_list if i.endswith(".txt")]
            self.total_file_path.extend(file_path_list)

    def __getitem__(self, index):
        file_path = self.total_file_path[index]
        label_str = file_path.split("\\")[-2]  # 获取pos neg
        label = 0 if label_str == "neg" else 1
        # 获取内容
        content = open(file_path,encoding="utf-8").read()  # 获取文件内容
        tokens = tokenlize(content)

        return tokens, label


    def __len__(self):
        return len(self.total_file_path)

def collate_fn(batch): # 重写这个方法防止多次调用zip
    """

    :param batch: ([tokens,label],[tokens,label])
    :return:
    """

    content,label = list(zip(*batch)) # *是多值参数，否则定义为一个参数了
    content = [ws.transform(i,max_len=lib.max_len) for i in content]
    content = torch.LongTensor(content)
    label = torch.LongTensor(label)
    return content,label



def get_dataloder(train=True):
    imdb_dataset = ImdbDataset()
    data_loader = DataLoader(imdb_dataset,batch_size=128,shuffle=True,collate_fn=collate_fn) #
    return data_loader




if __name__=="__main__":
    for idx,(input,target) in enumerate(get_dataloder()):
        print(idx)
        print(input)
        print(target)
        break
