import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
# from vocab import Vocab
from dataset.vocab import Vocab  # 外部文件调用
from torch.nn.utils.rnn import pad_sequence
import numpy as np


# ../data/train.csv ==> 如果当前文件需要运行，以它的相对路径是如此
# ./data/train.csv ==> 如果外部文件需要运行当前这个文件，以外部的相对路径是如此
def read_csv(path="./data/train.csv"):
    pf = pd.read_csv(path, lineterminator="\n")  # 避免数据换行
    data = pf.to_numpy()
    return data[:, :-1]  # 最后一列没有意义


def generate_vocab():
    data = read_csv()
    sentences = data[:, 0]
    tokens = [list(str(sentence)) for sentence in list(sentences)]
    vocab = Vocab(tokens, retired_tokens=['<PAD>', '<CLS>', '<SEP>'])
    return vocab


def sentences2idx(sentences):
    vocab = generate_vocab()
    token_idx = [torch.tensor(vocab.to_idx(['<CLS>'] + list(str(sentence)) + ['<SEP>']))
                 for sentence in list(sentences)]
    return token_idx


class TextClassifierDataset(Dataset):
    def __init__(self, data):
        super().__init__()
        self.sentences = sentences2idx(data[:, 0])
        self.labels = data[:, 1]

    def __len__(self):
        return len(self.sentences)

    def __getitem__(self, index):
        return self.sentences[index], self.labels[index]


# 句子对齐
def collate_fn(batch):
    sentences, labels = zip(*batch)
    # 统计每个句子的有效长度(padding_mask pad掩码：将pad不作为softmax计算)
    sentences_valid_lens = [len(sentence) for sentence in sentences]
    # 让每一个batch_size的数据进行长度对齐，长度不够，<PAD>来凑
    sentences = pad_sequence(sentences, batch_first=True, padding_value=0)
    labels = [float(item) for item in labels]
    return sentences, sentences_valid_lens, torch.tensor(labels)


def generate_loader(batch_size=20):
    data = read_csv()
    dataset = TextClassifierDataset(data)
    return DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)

# if __name__ == '__main__':
#     data = read_csv()
#     for i, item in enumerate(data):
#         print(item)
#         print(int(item[1]))
# str = ""
# indices = []
# for i, item in enumerate(data):
#     if item[1] == -1:
#         str += item[0]
#         indices.append(i)
#     else:
#         str = ""
#         print(str)
