from  datasets import load_dataset

# 加载数据集
# 制定下载路径

ds = load_dataset("madao33/new-title-chinese",cache_dir="data_cache/")

# # 只加载训练集
# train_dataset = load_dataset("madao33/new-title-chinese",split="train",cache_dir="data_cache/")
# print(train_dataset)
#
# # 只加载不一份
# train_dataset_100 = load_dataset("madao33/new-title-chinese",split="train[:100]",cache_dir="data_cache/")
# print(train_dataset_100)
#
# train_dataset_50 = load_dataset("madao33/new-title-chinese",split="train[:50%]",cache_dir="data_cache/")
# print(train_dataset_50)


# 数据选取和过滤
print(ds['train'].select([0,1]))

print(ds['train'].filter(lambda example: '人' in example['title']  ))

for(i,example) in enumerate(ds['train']):
    if i==10:
        break
    print(example)
    print(example['title'])

# 数据映射
def add_prefix(example):
    example['title']='title:'+example['title']
    return example

prefix_dataset=ds.map(add_prefix)

print(prefix_dataset['train'][:2]["title"])

# 开始进行tokenizer进行处理
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("bert-base-chinese")

def prepare_data(examples):
    model_inputs=tokenizer(examples['content'],max_length=512,truncation=True)
    labels=tokenizer(examples['title'],max_length=32,truncation=True)
    model_inputs["labels"]=labels["input_ids"]
    return model_inputs

tokenized_datasets=ds.map(prepare_data,batched=True)

print(tokenized_datasets)
# 如果想去掉数据集的原始字段去掉
tokenized_datasets=tokenized_datasets.remove_columns(['content','title'])
print(tokenized_datasets)

# 或者
print(ds.map(prepare_data,batched=True,remove_columns=['content','title']))

# 保存到本地
tokenized_datasets.save_to_disk("data")

# 从本地加载数据集
from datasets import load_from_disk
local_dataset=loaded_dataset=load_from_disk("data")
print(loaded_dataset)

# 加载csv文件
from datasets import load_dataset

ds=load_dataset("csv",data_files="ChnSentiCorp_htl_all.csv")
print(ds)