from transformers import AutoTokenizer, AutoModelForSequenceClassification
from datasets import load_dataset


model_name = '../chinese-roberta-wwm-ext'  # 模型路径
train_pth = '/home/mbk/rubbish/sen_cls/data/SENTI_ROBUST/train.tsv'
eval_pth = '/home/mbk/rubbish/sen_cls/data/SENTI_ROBUST/dev.tsv'

train_texts = []
train_labels = [] # 假设 0 是负面，1 是正面

eval_texts = []
eval_labels = []

with open(train_pth, 'r') as f:
    data = f.read().split('\n')[1:-1]
    for line in data:
        label, text = tuple(line.split('\t'))
        train_texts.append(text)
        train_labels.append(int(label))

with open(eval_pth, 'r') as f:
    data = f.read().split('\n')[1:-1]
    for line in data:
        qid, label, text = tuple(line.split('\t'))
        eval_texts.append(text)
        eval_labels.append(int(label))


from datasets import Dataset

tokenizer = AutoTokenizer.from_pretrained(model_name)

train_dataset_dict = {"text": train_texts, "label": train_labels}
eval_dataset_dict = {"text": eval_texts, "label": eval_labels}

train_dataset = Dataset.from_dict(train_dataset_dict)
eval_dataset = Dataset.from_dict(eval_dataset_dict)

def tokenize_function(examples):
    return tokenizer(examples["text"], padding="max_length", truncation=True, max_length=256) # max_length 可以调整

tokenized_train_dataset = train_dataset.map(tokenize_function, batched=True)
tokenized_eval_dataset = eval_dataset.map(tokenize_function, batched=True)

# 移除原始文本列，并设置格式为 PyTorch张量
tokenized_train_dataset = tokenized_train_dataset.remove_columns(["text"])
tokenized_train_dataset.set_format("torch")

tokenized_eval_dataset = tokenized_eval_dataset.remove_columns(["text"])
tokenized_eval_dataset.set_format("torch")

print("数据预处理完成。")
print(f"训练样本数量: {len(tokenized_train_dataset)}")
print(f"验证样本数量: {len(tokenized_eval_dataset)}")
print(f"一个训练样本示例: {tokenized_train_dataset[3]}")