import os

import pandas as pd
import torch
from torch.utils.data import DataLoader
from transformers import AutoTokenizer, DataCollatorWithPadding
from datasets import load_dataset, Dataset, load_from_disk
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
torch.cuda.empty_cache()
torch.cuda.set_device(0)


datasets = load_dataset("madao33/new-title-chinese")
print(datasets)
print("------------------------------1------------------------------------------")
# # 加载数据集合集中的某一项任务
boolq_dataset = load_dataset("super_glue", "boolq", trust_remote_code=True)
# 查看数据集结构
print(boolq_dataset)
# 查看单个样本
print(boolq_dataset["train"][0])
print("-------------------------------2-----------------------------------------")
# 按照数据集划分进行加载
# dataset = load_dataset("madao33/new-title-chinese", split="train")
# dataset = load_dataset("madao33/new-title-chinese", split="train[10:100]")
# dataset = load_dataset("madao33/new-title-chinese", split="train[:50%]")
dataset = load_dataset("madao33/new-title-chinese", split=["train[:50%]", "train[50%:]"])
print(dataset)
# print("-------------------------------3-----------------------------------------")
# 查看数据集
print(datasets["train"][0])
print(datasets["train"][:2])
print(datasets["train"]["title"][:5])
print(datasets["train"].column_names)
print(datasets["train"].features)
print("-------------------------------4-----------------------------------------")
# 数据集划分
dataset = datasets["train"]
dataset = boolq_dataset["train"]
dataset.train_test_split(test_size=0.1, stratify_by_column="label")     # 分类数据集可以按照比例划分
print(dataset.train_test_split(test_size=0.1, stratify_by_column="label"))
print("--------------------------------5----------------------------------------")
# 数据选取与过滤
print(datasets["train"].select([0, 1]))
# 过滤
filter_dataset = datasets["train"].filter(lambda example: "中国" in example["title"])
print(filter_dataset["title"][:5])
print("---------------------------------6---------------------------------------")
# 数据映射
def add_prefix(example):
    example["title"] = 'Prefix: ' + example["title"]
    return example


prefix_dataset = datasets.map(add_prefix)
print(prefix_dataset["train"][:10]["title"])

tokenizer = AutoTokenizer.from_pretrained("bert-base-chinese")


def preprocess_function(example, tokenizer=tokenizer):
    model_inputs = tokenizer(example["content"], max_length=512, truncation=True)
    labels = tokenizer(example["title"], max_length=32, truncation=True)
    # label就是title编码的结果
    model_inputs["labels"] = labels["input_ids"]
    return model_inputs


# processed_datasets = datasets.map(preprocess_function)
# print(processed_datasets)
# processed_datasets = datasets.map(preprocess_function, num_proc=2)
# print(processed_datasets)
# processed_datasets = datasets.map(preprocess_function, batched=True)
# print(processed_datasets)
processed_datasets = datasets.map(preprocess_function, batched=True, remove_columns=datasets["train"].column_names)
print(processed_datasets)
print("---------------------------------7---------------------------------------")
# 保存与加载
processed_datasets.save_to_disk("./processed_data")
processed_datasets = load_from_disk("./processed_data")
print(processed_datasets)
print("---------------------------------8---------------------------------------")
# 直接加载文件作为数据集
dataset = load_dataset("csv", data_files="./ChnSentiCorp_htl_all.csv", split="train")
print(dataset)
dataset = Dataset.from_csv("./ChnSentiCorp_htl_all.csv")
print(dataset)
print("---------------------------------9---------------------------------------")
# 加载文件夹内全部文件作为数据集
dataset = load_dataset("csv", data_files=["./all_data/ChnSentiCorp_htl_all.csv", "./all_data/ChnSentiCorp_htl_all copy.csv"], split='train')
print(dataset)
print("---------------------------------10---------------------------------------")
# 通过预先加载的其他格式转换加载数据集
data = pd.read_csv("./ChnSentiCorp_htl_all.csv")
print(data.head())
dataset = Dataset.from_pandas(data)
print(dataset)
# List格式的数据需要内嵌{}，明确数据字段
data = [{"text": "abc"}, {"text": "def"}]
# data = ["abc", "def"]
print(Dataset.from_list(data))
print("---------------------------------11---------------------------------------")
# 通过自定义加载脚本加载数据集
dataset = load_dataset("json", data_files="./cmrc2018_trial.json", field="data")
print(dataset)
dataset = load_dataset("./load_script.py", split="train", trust_remote_code=True)
print(dataset[0])
print("---------------------------------12---------------------------------------")
# Dataset with DataCollator
dataset = load_dataset("csv", data_files="./ChnSentiCorp_htl_all.csv", split='train')
dataset = dataset.filter(lambda x: x["review"] is not None)
print(dataset)
tokenizer = AutoTokenizer.from_pretrained("bert-base-chinese")
def process_function(examples):
    tokenized_examples = tokenizer(examples["review"], max_length=128, truncation=True)
    tokenized_examples["labels"] = examples["label"]
    return tokenized_examples
tokenized_dataset = dataset.map(process_function, batched=True, remove_columns=dataset.column_names)
print(tokenized_dataset)
print(tokenized_dataset[:3])
collator = DataCollatorWithPadding(tokenizer=tokenizer)
dl = DataLoader(tokenized_dataset, batch_size=4, collate_fn=collator, shuffle=True)
num = 0
for batch in dl:
    print(batch["input_ids"].size())
    num += 1
    if num > 10:
        break
print("---------------------------------13---------------------------------------")