from torchtext.data import Field
from torchtext import data
from torchtext.vocab import Vectors
from tqdm import tqdm

from one_spacy_split_word import tokenize_en
import torch
import yaml
import csv


# 定义Dataset
class MyDataset(data.Dataset):

    def __init__(self, datatuple, text_field, label_field, test=False):  # datatuple指的是元组('this moive is great',1)
        fields = [("text", text_field), ("label", label_field)]
        lists = []
        if test:
            # 如果为测试集，则不加载label
            for content, label in tqdm(datatuple):
                lists.append(data.Example.fromlist([content, None], fields))
        else:
            for content, label in tqdm(datatuple):
                # Example: Defines a single training or test example.Stores each column of the example as an attribute.
                lists.append(data.Example.fromlist([content, label], fields))
        # 之前是一些预处理操作，此处调用super初始化父类，构造自定义的Dataset类
        super().__init__(lists, fields)


if __name__ == "__main__":
    with open("../config.yaml", 'r', encoding='utf-8') as ymlfile:
        config = yaml.load(ymlfile, Loader=yaml.SafeLoader)
        print(config)

    SRC = Field(tokenize=tokenize_en, init_token=config["init_token"], eos_token=config["eos_token"],
                lower=True, fix_length=config["fix_length"])

    LABEL = Field(sequential=False, use_vocab=False)

    with open('../data/train.csv', encoding='utf-8') as F:
        list_train_data = list(csv.reader(F))

    with open('../data/test.csv', encoding='utf-8') as F:
        list_valid_data = list(csv.reader(F))

    train_dataset = MyDataset(list_train_data[:10], SRC, LABEL)

    # 验证数据集
    valid_dataset = MyDataset(list_valid_data[:10], SRC, LABEL)
    vectors = Vectors(name='../data/glove.6B.300d.txt', unk_init=torch.Tensor.normal_)
    for i in train_dataset:
        print(i.label)
        print(i.text)
    SRC.build_vocab(train_dataset,vectors=vectors)

    print(SRC.vocab.itos)
    print(SRC.vocab.stoi)


