SrdharMahendran commited on
Commit
a3627a8
1 Parent(s): b8f8391

Update app.py

Browse files

code added for Med QnA

Files changed (1) hide show
  1. app.py +30 -5
app.py CHANGED
@@ -1,10 +1,35 @@
1
- import gradio as gr
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
5
 
6
- iface = gr.Interface(fn=greet,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  inputs="text",
8
  outputs="text",
9
  title = "MedQnA Application")
10
- iface.launch()
 
1
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelWithLMHead
2
+ import gradio
3
 
4
+ username = "SrdharMahendran" # change it to your HuggingFace username
5
+ model_path = username + '/MedQnA_Model'
6
+ tokenizer_path = username + '/MedQnA_Tokenizer'
7
 
8
+ loaded_model = AutoModelWithLMHead.from_pretrained(model_path)
9
+ loaded_tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
10
+
11
+ def generate_query_response(prompt, max_length=200):
12
+
13
+ model = loaded_model
14
+ tokenizer = loaded_tokenizer
15
+ input_ids = tokenizer.encode(prompt, return_tensors="pt") # 'pt' for returning pytorch tensor
16
+
17
+ # Create the attention mask and pad token id
18
+ attention_mask = torch.ones_like(input_ids)
19
+ pad_token_id = tokenizer.eos_token_id
20
+
21
+ output = model.generate(
22
+ input_ids,
23
+ max_length=max_length,
24
+ num_return_sequences=1,
25
+ attention_mask=attention_mask,
26
+ pad_token_id=pad_token_id
27
+ )
28
+
29
+ return tokenizer.decode(output[0], skip_special_tokens=True)
30
+
31
+ iface = gradio.Interface(fn=generate_query_response,
32
  inputs="text",
33
  outputs="text",
34
  title = "MedQnA Application")
35
+ iface.launch()