File size: 865 Bytes
bef7abc
 
3a33679
bef7abc
 
a5f9c84
bef7abc
026d9d8
a5f9c84
026d9d8
bef7abc
5c6eae7
 
 
 
bef7abc
a8d72ba
 
 
 
bef7abc
 
 
5c6eae7
bef7abc
5c6eae7
026d9d8
5c6eae7
026d9d8
5c6eae7
026d9d8
5c6eae7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import streamlit as st
import transformers
import tensorflow


from transformers import AutoTokenizer, TFAutoModelForSeq2SeqLM

model_checkpoint = "Modfiededition/t5-base-fine-tuned-on-jfleg"

tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)

@st.cache(allow_output_mutation=True, suppress_st_warning=True)
def load_model():
    return pipeline("text2text- generation", model=model_checkpoint)
model = load_model()

def infer(input_ids):
    output_sequences = model.generate(inputs["input_ids"]).numpy()[0][1:-1]
    return output_sequences
    
#prompts
st.title("Writing Assistant for you 🦄")

textbox = st.text_area('Write your text:', '', height=200, max_chars=1000)

#inputs = tokenizer("Grammar: "+sent,return_tensors="tf")

#output_sequences = infer(inputs)

#generated_sequences = tokenizer.decode(output_ids)

#st.write(generated_sequences)