File size: 7,607 Bytes
fc757ce
5e48936
fc757ce
5e48936
 
 
 
7671981
5e48936
 
a08c530
5e48936
 
 
d6cc25a
 
a08c530
5307bf5
 
 
 
 
3619d45
 
 
67c005c
a08c530
 
599c53c
67c005c
 
4c3bc8a
 
 
f474d4a
 
 
880a9a9
 
 
0e34b1b
 
 
df1a01f
 
 
51cb87a
 
 
599c53c
 
7671981
599c53c
6c030df
 
 
df7f0f8
 
6c030df
d6cc25a
 
 
5e48936
f370c95
 
cf031ee
f370c95
 
cf031ee
f370c95
 
cf031ee
f370c95
 
 
 
 
 
686dd20
bb89c93
1f2cac2
 
 
 
 
 
 
 
 
aeb78d5
1f2cac2
 
 
 
46d30f2
1f2cac2
 
46d30f2
1f2cac2
 
 
 
 
 
 
 
 
1796fcd
1f2cac2
1796fcd
aeb78d5
1f2cac2
 
5e48936
d633e6a
5e48936
1f2cac2
aeb78d5
5e48936
 
 
e0e7abc
 
 
 
 
 
 
aa7e559
e0e7abc
 
 
 
1f2cac2
7eaf946
 
 
 
1796fcd
7eaf946
1796fcd
7eaf946
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import streamlit as st
import numpy as np
import pandas as pd
import os
import torch
import torch.nn as nn
from transformers.activations import get_activation
from transformers import AutoTokenizer, AutoModelForCausalLM


st.title('GPT2: To see all prompt outlines: https://huggingface.co/BigSalmon/InformalToFormalLincoln45')

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

@st.cache(allow_output_mutation=True)
def get_model():

    tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln55")
    model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln55")

    #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln51")
    #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln51")

    #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln45")
    #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln49")
    
    #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln43")
    #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln43")

    #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln41")
    #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln41")

    #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln38")
    #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln38")

    #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln37")
    #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln37")

    #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln36")
    #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln36")

    #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/MediumInformalToFormalLincoln")
    #model = AutoModelForCausalLM.from_pretrained("BigSalmon/MediumInformalToFormalLincoln")

    #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln35")
    #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln35")

    #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln31")
    #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln31")
    
    #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln21")
    #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln21")
    
    #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/PointsOneSent")
    #model = AutoModelForCausalLM.from_pretrained("BigSalmon/PointsOneSent")
    
    #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/PointsToSentence")
    #model = AutoModelForCausalLM.from_pretrained("BigSalmon/PointsToSentence")
    
    return model, tokenizer
    
model, tokenizer = get_model()

g = """informal english: garage band has made people who know nothing about music good at creating music.
Translated into the Style of Abraham Lincoln: garage band ( offers the uninitiated in music the ability to produce professional-quality compositions / catapults those for whom music is an uncharted art the ability the realize masterpieces / stimulates music novice's competency to yield sublime arrangements / begets individuals of rudimentary musical talent the proficiency to fashion elaborate suites ).

informal english: chrome extensions can make doing regular tasks much easier to get done.
Translated into the Style of Abraham Lincoln: chrome extensions ( yield the boon of time-saving convenience / ( expedite the ability to / unlock the means to more readily ) accomplish everyday tasks / turbocharges the velocity with which one can conduct their obligations ).

informal english: broadband is finally expanding to rural areas, a great development that will thrust them into modern life.
Translated into the Style of Abraham Lincoln: broadband is ( ( finally / at last / after years of delay ) arriving in remote locations / springing to life in far-flung outposts / inching into even the most backwater corners of the nation ) that will leap-frog them into the twenty-first century.

informal english: google translate has made talking to people who do not share your language easier.
Translated into the Style of Abraham Lincoln: google translate ( imparts communicability to individuals whose native tongue differs / mitigates the trials of communication across linguistic barriers / hastens the bridging of semantic boundaries / mollifies the complexity of multilingual communication / avails itself to the internationalization of discussion / flexes its muscles to abet intercultural conversation / calms the tides of linguistic divergence ).

informal english: corn fields are all across illinois, visible once you leave chicago.
Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago.

informal english: """


def BestProbs(prompt):
  prompt = prompt.strip()
  text = tokenizer.encode(prompt)
  myinput, past_key_values = torch.tensor([text]), None
  myinput = myinput
  logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
  logits = logits[0,-1]
  probabilities = torch.nn.functional.softmax(logits)
  best_logits, best_indices = logits.topk(10)
  best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
  for i in best_words[0:10]:
    print("_______")
    st.write(f"${i} $\n")
    f = (f"${i} $\n")
    m = (prompt + f"{i}")
    BestProbs2(m)
  return f

def BestProbs2(prompt):
  prompt = prompt.strip()
  text = tokenizer.encode(prompt)
  myinput, past_key_values = torch.tensor([text]), None
  myinput = myinput
  logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
  logits = logits[0,-1]
  probabilities = torch.nn.functional.softmax(logits)
  best_logits, best_indices = logits.topk(20)
  best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
  for i in best_words[0:20]:
    print(i)
    st.write(i)

with st.form(key='my_form'):
    prompt = st.text_area(label='Enter sentence', value=g)
    submit_button = st.form_submit_button(label='Submit')
    submit_button2 = st.form_submit_button(label='Fast Forward')
    submit_button3 = st.form_submit_button(label='Fast Forward 2.0')

    if submit_button:
      with torch.no_grad():
        text = tokenizer.encode(prompt)
        myinput, past_key_values = torch.tensor([text]), None
        myinput = myinput
        myinput= myinput.to(device)
        logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
        logits = logits[0,-1]
        probabilities = torch.nn.functional.softmax(logits)
        best_logits, best_indices = logits.topk(250)
        best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
        text.append(best_indices[0].item())
        best_probabilities = probabilities[best_indices].tolist()
        words = []              
        st.write(best_words)
    if submit_button2:
        print("----")
        st.write("___")
        m = BestProbs(prompt)
        st.write("___")
        st.write(m)
        st.write("___")
    if submit_button3:
        print("----")
        st.write("___")
        st.write(BestProbs)