##
from transformers import AutoTokenizer, TFAutoModel
SEQ_LEN=50
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
model = TFAutoModel.from_pretrained("bert-base-cased")
tokens=tokenizer.encode_plus('hello world',max_length=SEQ_LEN,
                             truncation=True,padding='max_length',
                             add_special_tokens=True,#添加特殊的padding
                             return_token_type_ids=False,
                             return_attention_mask=True,return_tensors='tf')
tokens #101、102应该是特赦padding、起始及终止
# 每个文本都按照上述去进行处理
# import numpy as np


##  youtube (https://www.youtube.com/watch?v=s3LBdmZb00g)
import os
os.environ['CUDA_DEVECI_ORDER']="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]='0'
import ktrain
from ktrain import text
from sklearn.datasets import fetch_20newsgroups

categories=['alt.atheism','soc.religion.christian','comp.graphics','sci.med','rec.sport.baseball']
train=fetch_20newsgroups(subset='train',
                         categories=categories,
                         shuffle=True,
                         random_state=0)
test =fetch_20newsgroups(subset='test',
                         categories=categories,
                         shuffle=True,
                         random_state=0)
print(test)
test.keys()

x_train=train.data
y_train=train.target
x_test=test.data
y_test=test.target
len(x_train)
len(y_train)

##build ML with transformer
model_name='distilbert-base-uncased'
trans=text.Transformer(model_name,
                       maxlen=512,
                       class_names=categories,)
train_data=trans.preprocess_train(x_train,y_train)
test_data=trans.preprocess_test(x_test,y_test)

model=trans.get_classifier()

learner=ktrain.get_learner(model,
                           train_data=train_data,
                           val_data=test_data,
                           batch_size=16)
'''
# learner.lr_find(show_plot=True,max_epochs=10)  #找出对于次模型最好的learning_rate
有问题啊这个
'''
learner.fit_onecycle(1e-4,epochs=1)
'''这个fit_onecycle好像也有问题'''


learner.validate(class_names=categories)

learner.view_top_losses(n=5,preproc=trans)  #查看损失最高的


x_test[1106]


## predict new data
predictor=ktrain.get_predictor(learner.model,preproc=trans)
x='Jesus Christ is the centrial figure of Christianity'
predictor.predict(x)
# predictor.explain(x)
predictor.save('model')

