adityabhalla-code commited on
Commit
5f3c2d3
1 Parent(s): 5da87c0

added gradio files

Browse files
Files changed (2) hide show
  1. app.py +44 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio
3
+ from transformers import AutoModelWithLMHead, AutoTokenizer
4
+
5
+ def generate_response(model, tokenizer, prompt, max_length=200):
6
+ input_ids = tokenizer.encode(prompt, return_tensors="pt") # 'pt' for returning pytorch tensor
7
+ # Create the attention mask and pad token id
8
+ attention_mask = torch.ones_like(input_ids)
9
+ pad_token_id = tokenizer.eos_token_id
10
+ output = model.generate(
11
+ input_ids,
12
+ max_length=max_length,
13
+ num_return_sequences=1,
14
+ attention_mask=attention_mask,
15
+ pad_token_id=pad_token_id
16
+ )
17
+ return tokenizer.decode(output[0], skip_special_tokens=True)
18
+
19
+ # Load your model from hub
20
+
21
+ username = "vsen7" # change it to your HuggingFace username
22
+ checkpoint = username + '/Medical_Summary'
23
+ loaded_model = AutoModelWithLMHead.from_pretrained(checkpoint)
24
+
25
+ # Load your tokenizer from hub
26
+ tokenizer = AutoTokenizer.from_pretrained(checkpoint)
27
+
28
+ # Function for response generation
29
+ def generate_query_response(prompt):
30
+ model = loaded_model
31
+ #tokenizer = tokenizer
32
+ response = generate_response(model, tokenizer, prompt)
33
+ return response
34
+
35
+
36
+ # Gradio interface to generate UI link
37
+ iface = gradio.Interface(fn=generate_query_response,
38
+ inputs="textbox",
39
+ outputs="textbox",
40
+ title="Dialogue Summarization",
41
+ description="via gradio",
42
+ allow_flagging="never",
43
+ )
44
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ transformers
2
+ torch
3
+ gradio