#!/usr/bin/env python  
#-*- coding:utf-8 _*-  
""" 
@author:hello_life 
@license: Apache Licence 
@file: data.py 
@time: 2022/04/21
@software: PyCharm 
description:
"""
import sys,os
sys.path.insert(0,os.path.dirname(os.getcwd()))

import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
from transformers import AlbertTokenizer, DataCollatorWithPadding

from parameters.albert_config import Config

config=Config()
tokenizer=AlbertTokenizer.from_pretrained(config.model_path)

class IMDBDataset(Dataset):
    def __init__(self,data_path):
        self.data=pd.read_csv(data_path)

    def __len__(self):
        assert len(self.data["review"])==len(self.data["sentiment"])
        return len(self.data["review"])

    def __getitem__(self, idx):
        x,y=self.data["review"].iloc[idx],self.data["sentiment"].iloc[idx]
        # x=self.x_transform(y)
        # y=self.y_transform(y)
        return x,y

    def x_transform(self,x):
        """
        x text->tensor
        :param x:
        :return:
        """
        tokens=tokenizer(x,padding="max_length",max_length=300,truncation=True,return_tensors="pt")
        return tokens

    def y_transform(self,y):
        """
        y text->tensor
        :param y:
        :return:
        """
        if y=="positive":
            label=0
        else:
            label=1

        return torch.tensor(label).to(torch.int64)

def collate_fn(batch):
    tokens,labels=[],[]
    for x,y in batch:
        tokens.append(x)
        if y=="positive":
            label=0
        else:
            label=1
        labels.append(label)
    tokens=tokenizer(tokens,padding="max_length",max_length=config.max_length,truncation=True,return_tensors="pt")
    return tokens.to(config.device),torch.tensor(labels).to(torch.int64).to(config.device)

if __name__ == '__main__':
    data_path="../../torch-frame/Data/IMDB Dataset.csv"
    model_path="../from_pretrained"
    data_collator=DataCollatorWithPadding(tokenizer=tokenizer)
    dataset=IMDBDataset()
    train_dataloader=DataLoader(dataset,shuffle=True,batch_size=16,collate_fn=collate_fn)

    for x,y in train_dataloader:
        # print(x.shape,y.shape)
        break