jputhalath commited on
Commit
12e2fc9
1 Parent(s): eb0735d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -0
app.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelWithLMHead
4
+
5
+ username = "jputhalath" # change it to your HuggingFace username
6
+ model_path = username + '/MedQnA_Model'
7
+ tokenizer_path = username + '/MedQnA_Tokenizer'
8
+
9
+ loaded_model = AutoModelWithLMHead.from_pretrained(model_path)
10
+ loaded_tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
11
+
12
+ def generate_query_response(prompt, max_length=200):
13
+
14
+ model = loaded_model
15
+ tokenizer = loaded_tokenizer
16
+ input_ids = tokenizer.encode(prompt, return_tensors="pt") # 'pt' for returning pytorch tensor
17
+
18
+ # Create the attention mask and pad token id
19
+ attention_mask = torch.ones_like(input_ids)
20
+ pad_token_id = tokenizer.eos_token_id
21
+
22
+ output = model.generate(
23
+ input_ids,
24
+ max_length=max_length,
25
+ num_return_sequences=1,
26
+ attention_mask=attention_mask,
27
+ pad_token_id=pad_token_id
28
+ )
29
+
30
+ return tokenizer.decode(output[0], skip_special_tokens=True)
31
+
32
+ iface = gradio.Interface(fn=generate_query_response,
33
+ inputs="text",
34
+ outputs="text",
35
+ title = "MedQnA Application")
36
+ iface.launch()