#!/usr/bin/env python
# coding: utf-8

# In[1]:


import torch
from pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM
import logging


# In[2]:


logging.basicConfig(level=logging.INFO)


# In[3]:


src_path = 'D:/coding/BERT/bert-base-uncased/bert-base-uncased'
vocab_path = 'D:/coding/BERT/bert-base-uncased/bert-base-uncased-vocab.txt'
tokenizer = BertTokenizer.from_pretrained(vocab_path)


# In[4]:


text = "[CLS] We usually associate blogging with the _ : our children, grandchildren, nieces or nephews.         While the blogging landscape was once _ almost entirely by teens, it has opened to different age groups now. [SEP]"
tokenized_text = tokenizer.tokenize(text)
masked1 = 8
tokenized_text[masked1] = '[MASK]'
masked2 = 28
tokenized_text[masked2] = '[MASK]'
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
segments_ids = [0 for i in range(len(tokenized_text))]
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])


# In[ ]:





# In[5]:


# Load pre-trained model (weights)
model = BertForMaskedLM.from_pretrained(src_path)
model.eval()
# If you have a GPU, put everything on cuda
tokens_tensor = tokens_tensor.to('cuda')
segments_tensors = segments_tensors.to('cuda')
model.to('cuda')
# Predict all tokens
with torch.no_grad():
    predictions = model(tokens_tensor, segments_tensors)


# In[6]:


# confirm we were able to predict 'henson'
#predicted_index = torch.argmax(predictions[0, masked_index]).item()
#predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0]


# In[7]:


A = predictions[0, masked2][tokenizer.convert_tokens_to_ids(['damaged'])]
B = predictions[0, masked2][tokenizer.convert_tokens_to_ids(['occupied'])]
C = predictions[0, masked2][tokenizer.convert_tokens_to_ids(['prepared'])]
D = predictions[0, masked2][tokenizer.convert_tokens_to_ids(['designed'])]
[A.item(),B.item(),C.item(),D.item()]


# In[ ]:




