aditi2222 commited on
Commit
a644a56
1 Parent(s): 1e263fe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -36
app.py CHANGED
@@ -1,39 +1,18 @@
1
- import torch
2
- from transformers import BartTokenizer, BartForConditionalGeneration
3
  import gradio as gr
4
- from transformers import AutoTokenizer, AutoModelWithLMHead, TranslationPipeline
 
5
  from transformers import pipeline
6
-
7
-
8
- from transformers import pipeline
9
- import gradio as gr
10
- pipe= pipeline('text2text-generation', model="facebook/m2m100_418M")
11
- def generate_text(inp):
12
- output=pipe(inp, forced_bos_token_id=pipe.tokenizer.get_lang_id('en'))
13
- tln=output[0]
14
- for item in tln:
15
- result=tln[item]
16
- return result
17
- #Gradio Interface
18
-
19
-
20
-
21
-
22
- model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn")
23
- tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
24
-
25
- def article_summarization(result):
26
- input_ = str(result) + ' </s>'
27
- # generate summary
28
- input_ids = tokenizer.encode(result, return_tensors='pt')
29
- summary_ids = model.generate(input_ids,
30
- min_length=20,
31
- max_length=12000)
32
-
33
- summary_text = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
34
- return(summary_text)
35
-
36
-
37
- iface = gr.Interface(fn=article_summarization,title="Summarization in English",description="facebook/bart-large-cnn for summarization in English", inputs=gr.inputs.Textbox(lines=50, placeholder="Enter newpaper article to be summarized"), outputs=["summary_text"])
38
-
39
  iface.launch()
 
 
 
1
  import gradio as gr
2
+ import json
3
+ from transformers import AutoTokenizer, AutoModelWithLMHead
4
  from transformers import pipeline
5
+ tokenizer = AutoTokenizer.from_pretrained("aditi2222/automatic_title_generation")
6
+ model = AutoModelWithLMHead.from_pretrained("aditi2222/automatic_title_generation")
7
+ generator = pipeline(
8
+ task="text-generation",
9
+ model=model,
10
+ tokenizer=tokenizer,
11
+ framework="tf"
12
+ )
13
+ model = AutoModelWithLMHead.from_pretrained("gpt2")
14
+ def green(prompt):
15
+ return generator(prompt, max_length=1000)
16
+
17
+ iface = gr.Interface(fn=green, inputs=gr.inputs.Textbox(lines=2, placeholder="Put prompt here"), outputs=gr.outputs.Textbox(type="str", label="generated text"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  iface.launch()