|
import numpy as np |
|
import pandas as pd |
|
import time |
|
import streamlit as st |
|
import matplotlib.pyplot as plt |
|
import seaborn as sns |
|
|
|
import jax |
|
import jax.numpy as jnp |
|
|
|
import torch |
|
import torch.nn.functional as F |
|
|
|
from transformers import AlbertTokenizer, AlbertForMaskedLM |
|
|
|
|
|
from skeleton_modeling_albert import SkeletonAlbertForMaskedLM |
|
|
|
@st.cache(show_spinner=True,allow_output_mutation=True) |
|
def load_model(): |
|
tokenizer = AlbertTokenizer.from_pretrained('albert-xxlarge-v2') |
|
|
|
model = AlbertForMaskedLM.from_pretrained('albert-xxlarge-v2') |
|
return tokenizer,model |
|
|
|
def clear_data(): |
|
for key in st.session_state: |
|
del st.session_state[key] |
|
|
|
if __name__=='__main__': |
|
|
|
|
|
max_width = 1500 |
|
padding_top = 0 |
|
padding_right = 2 |
|
padding_bottom = 0 |
|
padding_left = 2 |
|
|
|
define_margins = f""" |
|
<style> |
|
.appview-container .main .block-container{{ |
|
max-width: {max_width}px; |
|
padding-top: {padding_top}rem; |
|
padding-right: {padding_right}rem; |
|
padding-left: {padding_left}rem; |
|
padding-bottom: {padding_bottom}rem; |
|
}} |
|
</style> |
|
""" |
|
hide_table_row_index = """ |
|
<style> |
|
tbody th {display:none} |
|
.blank {display:none} |
|
</style> |
|
""" |
|
st.markdown(define_margins, unsafe_allow_html=True) |
|
st.markdown(hide_table_row_index, unsafe_allow_html=True) |
|
|
|
tokenizer,model = load_model() |
|
mask_id = tokenizer('[MASK]').input_ids[1:-1][0] |
|
|
|
sent_1 = st.sidebar.text_input('Sentence 1',value='It is better to play a prank on Samuel than Craig because he gets angry less often.',on_change=clear_data) |
|
sent_2 = st.sidebar.text_input('Sentence 2',value='It is better to play a prank on Samuel than Craig because he gets angry more often.',on_change=clear_data) |
|
input_ids_1 = tokenizer(sent_1).input_ids |
|
input_ids_2 = tokenizer(sent_2).input_ids |
|
input_ids = torch.tensor([input_ids_1,input_ids_2]) |
|
|
|
outputs = SkeletonAlbertForMaskedLM(model,input_ids,interventions = {0:{'lay':[(8,1,[0,1])]}}) |
|
logprobs = F.log_softmax(outputs['logits'], dim = -1) |
|
preds = [torch.multinomial(torch.exp(probs), num_samples=1).squeeze(dim=-1) for probs in logprobs[0]] |
|
st.write([tokenizer.decode([token]) for token in preds]) |
|
|