from typing import Optional

import pandas as pd
import torch

from torch.utils.data import DataLoader
# from transformers import AutoTokenizer
# from modelscope import AutoTokenizer

from config import Config, STRING_MASS_MODELSCOPE

class DataGenerator:
    def __init__(self,data:Optional[list] =None):
        self.data=data
    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        return self.data[index]

class XFileLoader:
    def __init__(self, config):
        self.config = config
        self.model_dir = self.config["model_dir"]
        self.data = []
        if self.config.get("mass") == STRING_MASS_MODELSCOPE:
            from modelscope import AutoTokenizer
        else:
            from transformers import AutoTokenizer

        self.tokenizer = AutoTokenizer.from_pretrained(self.model_dir,clean_up_tokenization_spaces=True)

    def load_file_data(self,file_path:str) -> DataLoader:
        csv = pd.read_csv(file_path)
        for t in csv.values:
            # print(t[0])
            # print(t[1])
            input_ids = torch.LongTensor(self.encode_sentence(t[0]))
            target = torch.LongTensor([t[1]])
            self.data.append([[input_ids,target]])
        return self.get_data_loader()

    def get_data_loader(self):
        dg = DataGenerator(self.data)
        return DataLoader(dg, batch_size=self.config["batch_size"], shuffle=True)

    def encode_sentence(self, text):
        input_ids = self.tokenizer.encode(text, padding='max_length',max_length=self.config["max_length"],truncation=True)
        return input_ids

    #补齐或截断输入的序列，使其可以在一个batch内运算
    def padding(self, input_id):
        input_id = input_id[:self.config["max_length"]]
        input_id += [0] * (self.config["max_length"] - len(input_id))
        return input_id


if __name__ == '__main__':
    dl = XFileLoader(Config).load_file_data("D:\\dataset\\大众点评评价数据\\dev.csv")
    for index,t in enumerate(dl):
        print(t)
