import datasets
import torch

from src.dev import configs
# 添加猴子补丁修复缺失的 REPOCARD_FILENAME

if not hasattr(datasets.config, 'REPOCARD_FILENAME'):
    datasets.config.REPOCARD_FILENAME = 'README.md'
from modelscope.msdatasets import MsDataset
ds = MsDataset.load('simpleai/HC3-Chinese', subset_name='open_qa', split='train')
# from transformers import AutoTokenizer, AutoModel


#
# tokenizer = AutoTokenizer.from_pretrained(configs.PRETRAINED_BERT_PATH)
# model = AutoModel.from_pretrained(configs.PRETRAINED_BERT_PATH)
# text = "我叫小明"
# encoded_input = tokenizer(text, return_tensors='pt')
# print(encoded_input)
# output = model(**encoded_input)

import gc

import torch.nn as NeuronNetwork
import transformers
from transformers import AutoTokenizer, AutoModel


PRETRAIN = configs.PRETRAINED_BERT_PATH
tokenizer = AutoTokenizer.from_pretrained(PRETRAIN)
embed_model: transformers.BertModel = AutoModel.from_pretrained(PRETRAIN)

h = [str(i[0]) for i in ds["human_answers"] if i[0] is not None]
# a = [i[0] for i in ds.select_columns("chatgpt_answers")["chatgpt_answers"]]

hu = tokenizer(h, return_tensors='pt',padding=True,truncation=True,max_length=256)["input_ids"]
# ai = torch.tensor(tokenizer(a, return_tensors='pt'), dtype=torch.int32)
for i in hu:
    print(i,type(i))
    if input()=="000":
        break