File size: 1,330 Bytes
ae1dc47
281af70
ae1dc47
 
 
281af70
edda5b8
281af70
f9db121
281af70
b40d807
 
 
ae1dc47
4dcb436
b40d807
ae1dc47
 
c40b81d
ae1dc47
 
 
c4f4d6f
42bc81f
 
c4f4d6f
42bc81f
ae1dc47
 
 
 
 
 
c4f4d6f
ae1dc47
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import os
import gradio as gr
import torch
import numpy as np
from transformers import pipeline

name_list = ['microsoft/biogpt', 'stanford-crfm/BioMedLM']

examples = [['COVID-19 is'],['A 65-year-old female patient with a past medical history of']] 

import torch
print(f"Is CUDA available: {torch.cuda.is_available()}")
print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")

pipe_biogpt = pipeline("text-generation", model="microsoft/biogpt")
pipe_biomedlm = pipeline("text-generation", model="stanford-crfm/BioMedLM", device="cuda:0")

title = "Compare generative biomedical LLMs!"
description = "This demo compares [BioGPT](https://huggingface.co/microsoft/biogpt) and [BioMedLM](https://huggingface.co/stanford-crfm/BioMedLM). **Disclaimer:** this demo was made for research purposes only and should not be used for medical purposes."

def inference(text):
  output_biogpt = pipe_biogpt(text, max_length=100)[0]["generated_text"]
  output_biomedlm = pipe_biomedlm(text, max_length=100)[0]["generated_text"]
  return [
      output_biogpt, 
      output_biomedlm
  ]

io = gr.Interface(
  inference,
  gr.Textbox(lines=3),
  outputs=[
    gr.Textbox(lines=3, label="BioGPT"),
    gr.Textbox(lines=3, label="BioMedLM")
  ],
  title=title,
  description=description,
  examples=examples
)
io.launch()