import tensorflow as tf
# get gpu device name
device_name=tf.test.gpu_device_name()
import tensorflow_datasets as tfds
(ds_train,ds_test),ds_info=tfds.load('imdb_reviews',
                                     split=(tfds.Split.TRAIN,tfds.Split.TEST),
                                     as_supervised=True,
                                     with_info=True)
print('info',ds_info)

for review,label in tfds.as_numpy(ds_train.take(5)):
    print('review:',review.decode()[0:50],label)

#%%
from transformers import BertTokenizer
tokenizer=BertTokenizer.from_pretrained('bert-base-uncased',do_lower_case=True)
vocabulary=tokenizer.get_vocab()
print(list(vocabulary.keys())[5000:5020])

#%%
max_length_test=20
test_sentence='Test tokenization sentence. Followed by another sentences'

# add specical tokens
test_sentence_with_special_toekns = '[CLS]'+test_sentence+'[SEP]'
tokenized=tokenizer.tokenize(test_sentence_with_special_toekns)
print('tokenized',tokenized)

# conver tokens to ids in WordPiece
input_ids=tokenizer.convert_tokens_to_ids(tokenized)
attention_mask=[1]*len(input_ids)

# precalculation of pad length, so that we can Dense it later on
padding_length = max_length_test - len(input_ids)

# map tokens to WordPiece dictionary and add pad token for those text shorter than our max length
input_data = input_ids + ([0]*padding_length)

# do not focus attention on padded tokens
attention_mask = attention_mask + ([0]*padding_length)

# tokens types, needed for example for question answering, for ourpurpose we will just set 0
token_types_ids = [0]*max_length_test

bert_input={
    'token_ids':input_ids,
    'token_type_ids':token_types_ids,
    'attention_mask':attention_mask
}
print(bert_input)     #这个地方可以参考第1个教程里用的方法，直接转化为 ids

#%%
bert_input = tokenizer.encode_plus(test_sentence,
                                   add_special_tokens=True,
                                   max_length=max_length_test,
                                   pad_to_max_length=True,
                                   return_attention_mask=True,
                                   truncation=True)
print('encoded',bert_input)
#%% Hyperparameters choice
# can be up to 512 for BERT
max_length=512
batch_size=6

#%% Encoding train and test dataset
def convert_example_to_feature(review):
    #combine step for tokenization, WordPiece vector mapping and will add also special tokens and trncate reviews logger than our max length
    return tokenizer.encode_plus(review,
                                 add_special_tokens=True,
                                 max_length=max_length,
                                 pad_to_max_length=True,
                                 return_attention_mask=True,
                                 )
# map to the expected input to TFBertForSequenceClassification, see here
def map_example_to_dict(input_ids,attention_masks,token_type_ids,label):
    return {
        'input_ids':input_ids,
        'token_type_ids':token_type_ids,
        'attention_masks':attention_masks
    },label
def encode_examples(ds,limit=-1):
    # prepare list, so that we can build up final TensorFlow dataset from slices
    input_ids_list=[]
    token_types_ids_list=[]
    attention_mask_list=[]
    label_list=[]
    if (limit>0):
         ds=ds.take(limit)
    # take just 1000 examples for illustration
    for rebiew, label in tfds.as_numpy(ds):
        bert_input=convert_example_to_feature(rebiew.decede())
        input_ids_list.append(bert_input['input_ids'])
        token_types_ids_list.append(bert_input['token_type_ids'])
        attention_mask_list.append(bert_input['attention_mask'])
        label_list.append([label])
    return tf.data.Dataset.from_tensor_slices((input_ids_list, attention_mask_list,token_types_ids_list,label_list)).map(map_example_to_dict)

# train_dataset
ds_train_encoded=encode_examples(ds_train).shuffle(10000).batch(batch_size)
# test dataset
ds_test_encoded=encode_examples(ds_test).batch(batch_size)

#%% Model initialization
from transformers import TFBertForSequenceClassification
import tensorflow as tf
# recommended learning rate for Adam 5e-5,3e-5,2e-5
learning_rate=2e-5
# we will do just 1 epoch for illustration ,thorough multiple epochs might be better as long as we will overfit the model
number_of_epochs=1
#model initialization
model=TFBertForSequenceClassification.from_pretrained('bert-base-uncased')
#classifier adam recommended
optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate,epsilon=1e-8)

# we do not have one_hot vecores, we can use     sparce categorical cross entropy       and       accuracy
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric=tf.keras.metrics.SparseCategoricalAccuracy('accuracy')
model.compile(optimizer=optimizer,loss=loss,metrics=[metric])


#%% Training
bert_history=model.fit(ds_train_encoded,epochs=number_of_epochs,validation_data=test_encoded)


