BigSalmon's picture
Update app.py
04b7a0c
raw
history blame
No virus
2.46 kB
import streamlit as st
import numpy as np
import pandas as pd
import os
import torch
import torch.nn as nn
from transformers import ElectraModel, AutoConfig, GPT2LMHeadModel
from transformers.activations import get_activation
from transformers import AutoTokenizer
st.title('Informal to Formal')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
st.text('''How To Make Prompt:
informal english: space is huge and needs to be explored.
Translated into the Style of Abraham Lincoln: space ( awaits traversal / is a boundless expanse ), a new world whose boundaries are endless.
informal english: i am very ready to do that just that.
Translated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end.
Translated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task.
informal english: meteors are much harder to see, because they are only there for a fraction of a second.
Translated into the Style of Abraham Lincoln: meteors are not readily detectable, lasting for mere fractions of a second.
informal english: national parks are a big part of the us culture.
Translated into the Style of Abraham Lincoln: the culture of the united states is ( inextricably ( bound up with / molded by / enriched by / enlivened by ) its ( serene / picturesque / pristine / breathtaking ) national parks ).
informal english:''')
from transformers import AutoTokenizer, AutoModelWithLMHead
tokenizer = AutoTokenizer.from_pretrained("gpt2")
model = AutoModelWithLMHead.from_pretrained("BigSalmon/MrLincoln12")
with st.form(key='my_form'):
prompt = st.text_area(label='Enter sentence')
submit_button = st.form_submit_button(label='Submit')
if submit_button:
with torch.no_grad():
text = tokenizer.encode(prompt)
myinput, past_key_values = torch.tensor([text]), None
myinput = myinput
myinput= myinput.to(device)
logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
logits = logits[0,-1]
probabilities = torch.nn.functional.softmax(logits)
best_logits, best_indices = logits.topk(60)
best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
text.append(best_indices[0].item())
best_probabilities = probabilities[best_indices].tolist()
words = []
st.write(best_words)