from  torch.utils.data import DataLoader,Dataset
import pandas as pd
import re
import torch
from torch.utils.data import random_split

"""-----------------------参数设置-----------------------"""
max_len=40   # 句子最大长度
batch_size=64   # 每次训练的样本数量
epochs=10       # 训练轮数
lr=0.001     # 学习率
encoder_embedding_dim=512   # 编码器词向量维度
encoder_input_size=512        # 编码器输入层维度
encoder_hidden_size=256      # 编码器隐藏层维度


# ----------------------------数据预处理------------------------------
class Data(Dataset):
    def __init__(self):
        super().__init__()
        with open('./waimai_10k.csv', 'r', encoding='utf-8') as f:
            self.data = pd.read_csv(f)
        self.char2id = {'<pad>': 0, '<sos>': 1, '<eos>': 2, '<unk>': 3}
        self.words = []
        self.xy=[]
        for sentence,labels in zip(self.data['review'],self.data['label']):
            word = re.findall('[a-zA-Z]+|[\u4e00-\u9fa5]|[0-9]+', sentence)
            label=int(labels)
            self.words += word
            self.xy.append((word,label))
        for word in set(self.words):
            self.char2id[word] = len(self.char2id)
        self.id2char = dict(zip(self.char2id.values(), self.char2id.keys()))

    def tranform(self, x, max_len=20, eos=False):
        if len(x) > max_len:
            x = x[:max_len]
        length = len(x)
        if eos == True:
            x += ['<eos>']
        if length < max_len:
            x += ['<pad>'] * (max_len - length)
        x = [self.char2id.get(c, self.char2id['<unk>']) for c in x]
        return x

    def inverseTransform(self, ids):
        return [self.id2char[id] for id in ids]

    def __getitem__(self, item):
        x,y=self.xy[item][0],self.xy[item][1]
        x_lengths = torch.LongTensor([len(x)])
        x=self.tranform(x,max_len)
        x=torch.LongTensor(x)
        y=torch.LongTensor([int(y)])
        return x,y,x_lengths

    def __len__(self):
        return len(self.xy)

data=Data()
encoder_number_embedding=len(data.char2id)
trainSet,testSet=random_split(data,lengths=[0.8,0.2])
trainloader=DataLoader(trainSet,batch_size=batch_size,shuffle=True,drop_last=True)
testloader=DataLoader(testSet,batch_size=batch_size,shuffle=True,drop_last=True)