import os
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"

import torch
from transformers.file_utils import is_tf_available, is_torch_available
from transformers import BertTokenizerFast, BertForSequenceClassification
from transformers import Trainer, TrainingArguments
import numpy as np
import random
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split

def set_seed(seed: int):
    """
    辅助函数，用于设置random、numpy、torch和/或tf（如果安装了）中的种子，以实现可重复的行为。

    参数:
        seed (:obj:`int`): 要设置的种子。
    """
    random.seed(seed)
    np.random.seed(seed)
    if is_torch_available():
        torch.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
        # 即使CUDA不可用，调用此函数也是安全的。

set_seed(1)

# 我们将要训练的模型是基于未分大小写的 BERT
# 在这里可以查看文本分类模型: https://huggingface.co/models?filter=text-classification
model_name = "bert-base-uncased"

# 每个文档/句子样本的最大序列长度
max_length = 512
# 加载 tokenizer
tokenizer = BertTokenizerFast.from_pretrained(model_name, do_lower_case=True,cache_dir='e:/models/')

def read_20newsgroups(test_size=0.2):
  # 从sklearn的仓库下载并加载20newsgroups数据集
  dataset = fetch_20newsgroups(subset="all", shuffle=True, remove=("headers", "footers", "quotes"),data_home="F:/datas/nlp/")
  documents = dataset.data
  labels = dataset.target

  # 将数据集分为训练集和测试集，并返回数据和标签名称
  return train_test_split(documents, labels, test_size=test_size), dataset.target_names

# 调用函数
(train_texts, valid_texts, train_labels, valid_labels), target_names = read_20newsgroups()

# 对数据集进行分词，当超过max_length时进行截断， 当长度小于max_length时用0进行填充
train_encodings = tokenizer(train_texts, truncation=True, padding=True, max_length=max_length)
valid_encodings = tokenizer(valid_texts, truncation=True, padding=True, max_length=max_length)

