from transformers import BertTokenizer, BertForMaskedLM


import torch


tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")
model = BertForMaskedLM.from_pretrained("bert-base-chinese")


inputs = tokenizer(["并广泛动员社会[MASK]方面的力量"], return_tensors="pt")
labels = tokenizer(["并广泛动员社会各方面的力量"], return_tensors="pt")["input_ids"]  #返回Tensor对象
print(inputs)
print()
print(tokenizer(["并广泛动员社会各方面的力量"], return_tensors="pt"))
print()
print(labels)


outputs = model(**inputs, labels=labels)


loss = outputs.loss
logits = outputs.logits


from transformers import BertTokenizer, BertForNextSentencePrediction


import torch


tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")
model = BertForNextSentencePrediction.from_pretrained("bert-base-chinese")


prompt = "在我的后花园，可以看见墙外有两棵树"
next_sentence1 = "一棵是枣树，另一科也是枣树"
next_sentence2 = "一九二四年九月十五日"


encoding = tokenizer(prompt, next_sentence1, return_tensors="pt")
print(encoding)


outputs = model(**encoding, labels=torch.LongTensor([1]))


logits = outputs.logits


print(logits[0, 0], '\n', logits[0, 1], logits.shape)


encoding = tokenizer(prompt, next_sentence2, return_tensors="pt")
print(encoding)


outputs = model(**encoding, labels=torch.LongTensor([1]))


logits = outputs.logits


print(logits[0, 0], '\n', logits[0, 1], logits.shape)



