import pandas as pd
import os
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
from config import Config
from pytorch_bert import BertModel, BertTokenizer


class DatasetIter(Dataset):
    """数据加载"""

    def __init__(self, data, device):
        self.data = data
        self.device = device

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        x = self.data[index][0]
        y = self.data[index][1]
        textlength = self.data[index][2]
        mask = self.data[index][3]
        return torch.tensor(x).to(self.device), torch.tensor(y).to(self.device), \
               torch.tensor(textlength).to(self.device), torch.tensor(mask).to(self.device)


def build_dataset(config):
    PAD, CLS = '[PAD]', '[CLS]'  # padding符号, bert中综合信息符号
    def load_dataset(path, seq_length):
        contents = []
        tokenizer = BertTokenizer.from_pretrained(config.bert_path + 'bert-base-uncased-vocab.txt')
        data = pd.read_csv(path, header=None, names=['class', 'title', 'content'])
        x = data['content'].values
        y = data['class'].values - 1

        val_idx = []
        train_idx = []
        for label in range(14):
            train_idx.extend(np.where(y == label)[0][:-1000])
            val_idx.extend(np.where(y == label)[0][-1000:])

        for i in range(len(y)):
            token = tokenizer.tokenize(x[i])
            token = [CLS] + token
            act_length = len(token)
            token_ids = tokenizer.convert_tokens_to_ids(token)

            if act_length < seq_length:
                mask = [1] * len(token_ids) + [0] * (seq_length - act_length)
                token_ids += ([0] * (seq_length - act_length))
            else:
                mask = [1] * seq_length
                token_ids = token_ids[:seq_length]
            contents.append([token_ids, y[i], act_length, mask])
        contents = np.asarray(contents)
        return contents, contents[train_idx], contents[val_idx]

    _, train, val = load_dataset('dbpedia/dbpedia_csv/train.csv', 32)
    test, _, _ = load_dataset('dbpedia/dbpedia_csv/test.csv', 32)
    return train, val, test





