Mr-Vicky-01 commited on
Commit
76ae1d9
1 Parent(s): a0aa016

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -13
app.py CHANGED
@@ -1,21 +1,22 @@
1
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
2
  import gradio as gr
3
 
4
- checkpoint = "Mr-Vicky-01/English-Tamil-Translator"
5
  tokenizer = AutoTokenizer.from_pretrained(checkpoint)
6
- model = AutoModelForSeq2SeqLM.from_pretrained("Finetuned_model/")
7
 
8
- def language_translator(text):
9
- tokenized = tokenizer([text], return_tensors='pt')
10
- out = model.generate(**tokenized, max_length=128)
11
- return tokenizer.decode(out[0],skip_special_tokens=True)
 
12
 
13
- examples = [
14
- ["hello everyone"],
15
- ["hardwork never fails."],
16
- ["A room without books is like a body without a soul."],
17
- ["The Sun is approximately 4.6 billion years older than Earth."],
18
- ]
19
 
20
- demo = gr.Interface(fn=language_translator, inputs='text',outputs='text',title='English to Tamil Translator',examples=examples)
21
  demo.launch(debug=True,share=True)
 
1
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
2
  import gradio as gr
3
 
4
+ checkpoint = "Mr-Vicky-01/conversational_sumarization"
5
  tokenizer = AutoTokenizer.from_pretrained(checkpoint)
6
+ model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint)
7
 
8
+ def generate_summary(text):
9
+ inputs = tokenizer([text], max_length=1024, return_tensors='pt', truncation=True)
10
+ summary_ids = model.generate(inputs['input_ids'], max_new_tokens=100, do_sample=False)
11
+ summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
12
+ return summary
13
 
14
+ # examples = [
15
+ # ["hello everyone"],
16
+ # ["hardwork never fails."],
17
+ # ["A room without books is like a body without a soul."],
18
+ # ["The Sun is approximately 4.6 billion years older than Earth."],
19
+ # ]
20
 
21
+ demo = gr.Interface(fn=language_translator, inputs='text',outputs='text',title='Text Summarization'), #examples=examples)
22
  demo.launch(debug=True,share=True)