deep-learning-analytics commited on
Commit
3aa3a41
1 Parent(s): b7b31e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -1,11 +1,11 @@
1
  import streamlit as st
2
  text = st.text_area('You is here')
3
  ### Run Model
4
- from transformers import T5ForConditionalGeneration, T5Tokenizer, AutoTokenizer, AutoModelForSeq2SeqLM
5
  import torch
6
  torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
7
- tokenizer = AutoTokenizer.from_pretrained('deep-learning-analytics/GrammarCorrector')
8
- model = AutoModelForSeq2SeqLM.from_pretrained('deep-learning-analytics/GrammarCorrector').to(torch_device)
9
 
10
  def correct_grammar(input_text,num_return_sequences=1):
11
  batch = tokenizer([input_text],truncation=True,padding='max_length',max_length=64, return_tensors="pt").to(torch_device)
 
1
  import streamlit as st
2
  text = st.text_area('You is here')
3
  ### Run Model
4
+ from transformers import T5ForConditionalGeneration, T5Tokenizer
5
  import torch
6
  torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
7
+ tokenizer = T5Tokenizer.from_pretrained('deep-learning-analytics/GrammarCorrector')
8
+ model = T5ForConditionalGeneration.from_pretrained('deep-learning-analytics/GrammarCorrector').to(torch_device)
9
 
10
  def correct_grammar(input_text,num_return_sequences=1):
11
  batch = tokenizer([input_text],truncation=True,padding='max_length',max_length=64, return_tensors="pt").to(torch_device)