File size: 1,440 Bytes
75d6a4a
bbadada
 
 
 
 
 
 
 
75d6a4a
bbadada
 
 
 
75d6a4a
bbadada
 
4d04a00
 
bbadada
4d04a00
2864b0a
bbadada
 
 
 
 
 
ca871e9
bbadada
 
 
 
4d04a00
bbadada
4d04a00
 
bbadada
 
4d04a00
bbadada
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import streamlit as st
#import numpy as np
#import pandas as pd
#import os
#import torch
#import torch.nn as nn
#from transformers.activations import get_activation
from transformers import TFGPT2LMHeadModel, GPT2Tokenizer
import tensorflow as tf

st.title('DeepWords')
st.text('Still under Construction.')
st.text('Tip: Try writing a sentence and making the model predict final word.')
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

@st.cache(allow_output_mutation=True) # will only run once
def get_model():
  tokenizer = GPT2Tokenizer.from_pretrained("gpt2-medium")
  GPT2 = TFGPT2LMHeadModel.from_pretrained("gpt2-medium", pad_token_id=tokenizer.eos_token_id)
  return GPT2, tokenizer

GPT2, tokenizer = get_model()
c = 5
with st.form(key='my_form'):
    prompt = st.text_input('Enter sentence:', '')
    c = st.number_input('Enter Number of words: ', 1)
    submit_button = st.form_submit_button(label='Submit')
    if submit_button:
      tf.random.set_seed(12)
      input_ids = tokenizer.encode(prompt, return_tensors='tf')
      
      sample_output = GPT2.generate(
                             input_ids, 
                             do_sample = True, 
                             max_length = c, 
                             top_p = 0.8, 
                             top_k = 0)
                             
      st.write(tokenizer.decode(sample_output[0], skip_special_tokens = True), '...')