import os
import torch
from torch.utils.data import DataLoader, random_split
from transformers import AutoTokenizer
from datasets import load_dataset

def get_dataset_by_dir():
    """"""
    dataset = load_dataset("csv", data_dir="data/", split="train")
    return dataset

def get_splited_dataset():
    dataset = get_dataset_by_dir()
    print(f"num_rows of dataset:{dataset.num_rows}")
    trainset, validset = random_split(dataset=dataset,lengths=[0.9, 0.1])
    return trainset, validset


def get_tokenizer():
    tokenizer = AutoTokenizer.from_pretrained('uer/roberta-base-finetuned-dianping-chinese')
    return tokenizer

def collate_fn(batch):
    tokenizer = get_tokenizer()
    texts, labels = [], []
    for item in batch:
        print(f"===item:{item}")
        """如果使用load_dataset加载的数据就使用下面这种形式"""
        texts.append(item["review"])
        labels.append(item["label"])

    print(f"====texts:{texts}")
    inputs = tokenizer(texts,
                       max_length=128,
                       padding="max_length",
                       truncation=True,
                       return_tensors="pt")
    inputs["labels"] = torch.tensor(labels)
    return inputs

def get_dataloader():
    trainset, validset = get_splited_dataset()
    print(f"trainset:{trainset}")
    print(f"validset:{validset}")

    train_dataloader = DataLoader(trainset, batch_size=32, shuffle=True, collate_fn=collate_fn)
    valid_dataloader = DataLoader(validset, batch_size=64, shuffle=False, collate_fn=collate_fn)
    return train_dataloader, valid_dataloader

def test_dataloader():
    train_dataloader, valid_dataloader = get_dataloader()
    re = next(enumerate(valid_dataloader))[1]
    print("-----------------------------------")
    print(re)
    print("-----------------------------------")

if __name__ == "__main__":
    os.environ['HTTP_PROXY'] = 'http://127.0.0.1:10792'
    os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:10792'

    test_dataloader()