'''
试试ERNIE
'''
import re
while True:
    str = input()
    print(re.sub(r'\^A', '', str))

# mask测试
import torch
from transformers import BertTokenizer, BertForMaskedLM, BertModel

tokenizer = BertTokenizer.from_pretrained('nghuyong/ernie-1.0')
input_tx = "[CLS] 6月24日外交部例行记者会上，针对近期不少媒体对中印边境冲突事件报道出虚假信息，外交部发言人赵立坚就中印边境问题来龙去脉做了详细澄清。赵立坚说，近期中印边界西段加勒万河谷冲突事件，引发国内外广泛关注。6月19日，已经详细介绍了 [MASK] [MASK] 的来龙去脉，此次事件的是非曲直十分清楚，责任完全不在中方。"
# input_tx = "[CLS] [MASK] [MASK] [MASK] 是中国神魔小说的经典之作，与《三国演义》《水浒传》《红楼梦》并称为中国古典四大名著。[SEP]"
tokenized_text = tokenizer.tokenize(input_tx)
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)

tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([[0] * len(tokenized_text)])

model = BertForMaskedLM.from_pretrained('nghuyong/ernie-1.0')
model.eval()

with torch.no_grad():
    outputs = model(tokens_tensor, token_type_ids=segments_tensors)
    predictions = outputs[0]

predicted_index = [torch.argmax(predictions[0, i]).item() for i in range(0, (len(tokenized_text) - 1))]
predicted_token = [tokenizer.convert_ids_to_tokens([predicted_index[x]])[0] for x in
                   range(1, (len(tokenized_text) - 1))]

print('Predicted token is:', predicted_token)

# forward测试
tokenizer = BertTokenizer.from_pretrained('nghuyong/ernie-1.0')
model = BertModel.from_pretrained('nghuyong/ernie-1.0')
model.eval()
string = 'encode decode bert transformers.'
strings = (string, string+string)
batch = tokenizer(strings, max_length=512,padding=True, truncation=True, return_tensors="pt")
print(batch)
print(batch['input_ids'])
print(batch['attention_mask'])
with torch.no_grad():
    sequence_output, pooled_output = model(batch['input_ids'], attention_mask=batch['attention_mask'])
print(pooled_output.cpu().numpy())
print(pooled_output.cpu().numpy().shape)
