aditi2222 commited on
Commit
a4dd1ac
1 Parent(s): 47b219e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -16
app.py CHANGED
@@ -1,18 +1,35 @@
 
 
1
  import gradio as gr
2
- import json
3
- from transformers import AutoTokenizer, AutoModelWithLMHead
4
- from transformers import pipeline
5
  tokenizer = AutoTokenizer.from_pretrained("aditi2222/automatic_title_generation")
6
- model = AutoModelWithLMHead.from_pretrained("aditi2222/automatic_title_generation")
7
- generator = pipeline(
8
- task="text-generation",
9
- model=model,
10
- tokenizer=tokenizer,
11
- framework="tf"
12
- )
13
- model = AutoModelWithLMHead.from_pretrained("gpt2")
14
- def green(prompt):
15
- return generator(prompt, max_length=1000)
16
-
17
- iface = gr.Interface(fn=green, inputs=gr.inputs.Textbox(lines=2, placeholder="Put prompt here"), outputs=gr.outputs.Textbox(type="str", label="generated text"))
18
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
  import gradio as gr
4
+
5
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
6
+
7
  tokenizer = AutoTokenizer.from_pretrained("aditi2222/automatic_title_generation")
8
+
9
+ model = AutoModelForSeq2SeqLM.from_pretrained("aditi2222/automatic_title_generation")
10
+
11
+
12
+ def tokenize_data(text):
13
+ # Tokenize the review body
14
+ input_ = str(text) + ' </s>'
15
+ max_len = 120
16
+ # tokenize inputs
17
+ tokenized_inputs = tokenizer(input_, padding='max_length', truncation=True, max_length=max_len, return_attention_mask=True, return_tensors='pt')
18
+
19
+ inputs={"input_ids": tokenized_inputs['input_ids'],
20
+ "attention_mask": tokenized_inputs['attention_mask']}
21
+ return inputs
22
+
23
+ def generate_answers(text):
24
+ inputs = tokenize_data(text)
25
+ results= model.generate(input_ids= inputs['input_ids'], attention_mask=inputs['attention_mask'], do_sample=True,
26
+ max_length=120,
27
+ top_k=120,
28
+ top_p=0.98,
29
+ early_stopping=True,
30
+ num_return_sequences=1)
31
+ answer = tokenizer.decode(results[0], skip_special_tokens=True)
32
+ return answer
33
+
34
+ iface = gr.Interface(fn=generate_answers, inputs=['text'], outputs=["text"])
35
+ iface.launch(inline=False, share=True)