File size: 6,848 Bytes
502fcf4
 
 
 
4ab0011
502fcf4
 
 
4ab0011
502fcf4
 
 
7ac37fe
502fcf4
 
 
4ab0011
 
 
 
 
1995421
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
502fcf4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1995421
4ab0011
 
502fcf4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4ab0011
502fcf4
 
 
4ab0011
502fcf4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c6ec32d
502fcf4
4ab0011
 
502fcf4
 
 
4ab0011
502fcf4
 
 
4ab0011
502fcf4
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
import gradio as gr
import torch
import whisper
from transformers import pipeline
from transformers import AutoTokenizer, AutoModelForTokenClassification

### β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”

title="Whisper to Biomedical NER"

### β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”

whisper_model = whisper.load_model("medium")

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

tokenizer = AutoTokenizer.from_pretrained("d4data/biomedical-ner-all")
model = AutoModelForTokenClassification.from_pretrained("d4data/biomedical-ner-all")

biomed_ner_pipe = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple") # pass device=0 if using gpu

def parse_ber(text):
    raw = biomed_ner_pipe(text)
    ner_content = {
        "text": text,
        "entities": [
            {
                "entity": x["entity_group"],
                "word": x["word"],
                "score": x["score"],
                "start": x["start"],
                "end": x["end"],
            }
            for x in raw
        ]
    }
    return ner_content
    
def translate_and_classify(audio):
    
    print("""
    β€”
    Sending audio to Whisper ...
    β€”
    """)
    audio = whisper.load_audio(audio)
    audio = whisper.pad_or_trim(audio)
    
    mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device)
    
    _, probs = whisper_model.detect_language(mel)
    
    transcript_options = whisper.DecodingOptions(task="transcribe", fp16 = False)
    translate_options = whisper.DecodingOptions(task="translate", fp16 = False)
    
    transcription = whisper.decode(whisper_model, mel, transcript_options)
    translation = whisper.decode(whisper_model, mel, translate_options)
    
    print("Language Spoken: " + transcription.language)
    print("Transcript: " + transcription.text)  
    print("Translated: " + translation.text)
    
    detected_ner = parse_ner(translation.text)
    print("Detected Named Entities: ", detected_ner)
    return transcription.text, detected_ner

css = """
        .gradio-container {
            font-family: 'IBM Plex Sans', sans-serif;
        }
        .gr-button {
            color: white;
            border-color: black;
            background: black;
        }
        input[type='range'] {
            accent-color: black;
        }
        .dark input[type='range'] {
            accent-color: #dfdfdf;
        }
        .container {
            max-width: 730px;
            margin: auto;
            padding-top: 1.5rem;
        }
        #gallery {
            min-height: 22rem;
            margin-bottom: 15px;
            margin-left: auto;
            margin-right: auto;
            border-bottom-right-radius: .5rem !important;
            border-bottom-left-radius: .5rem !important;
        }
        #gallery>div>.h-full {
            min-height: 20rem;
        }
        .details:hover {
            text-decoration: underline;
        }
        .gr-button {
            white-space: nowrap;
        }
        .gr-button:focus {
            border-color: rgb(147 197 253 / var(--tw-border-opacity));
            outline: none;
            box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
            --tw-border-opacity: 1;
            --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
            --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
            --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
            --tw-ring-opacity: .5;
        }
        #advanced-btn {
            font-size: .7rem !important;
            line-height: 19px;
            margin-top: 12px;
            margin-bottom: 12px;
            padding: 2px 8px;
            border-radius: 14px !important;
        }
        #advanced-options {
            display: none;
            margin-bottom: 20px;
        }
        .footer {
            margin-bottom: 45px;
            margin-top: 35px;
            text-align: center;
            border-bottom: 1px solid #e5e5e5;
        }
        .footer>p {
            font-size: .8rem;
            display: inline-block;
            padding: 0 10px;
            transform: translateY(10px);
            background: white;
        }
        .dark .footer {
            border-color: #303030;
        }
        .dark .footer>p {
            background: #0b0f19;
        }
"""
with gr.Blocks(css = css) as demo:
    gr.Markdown("""
                ## Biomedical Named Entity Recognition From Speech with Whisper
            """)
    gr.HTML('''
     <p style="margin-bottom: 10px; font-size: 94%">
                Whisper is a general-purpose speech recognition model released by OpenAI that can perform multilingual speech recognition as well as speech translation and language identification. Combined with an biomedical named entity recognition model,this allows for detecting key terms directly from speech in multiple languages and can potentially be used to assist in data-driven analysis in clinical settings related to physical and mental health
              </p>
              ''')
    
    with gr.Column():
            #gr.Markdown(""" ### Record audio """)
        with gr.Tab("Record Audio"):
            audio_input_r = gr.Audio(label = 'Record Audio Input',source="microphone",type="filepath")
            transcribe_audio_r = gr.Button('Transcribe')
        
        with gr.Tab("Upload Audio as File"):
            audio_input_u = gr.Audio(label = 'Upload Audio',source="upload",type="filepath")
            transcribe_audio_u = gr.Button('Transcribe')

        with gr.Row():
            transcript_output = gr.Textbox(label="Transcription in the language you spoke", lines = 3)
            biomed_ner_output = gr.HighlightedText(label = "Detected Named Entities")
    
    transcribe_audio_r.click(translate_and_classify, inputs = audio_input_r, outputs = [transcript_output,biomed_ner_output])
    transcribe_audio_u.click(translate_and_classify, inputs = audio_input_u, outputs = [transcript_output,biomed_ner_output])   
    gr.HTML('''
        <div class="footer">
                    <p>Whisper Model by <a href="https://github.com/openai/whisper" style="text-decoration: underline;" target="_blank">OpenAI</a> -   
                    <a href="https://huggingface.co/d4data/biomedical-ner-all" style="text-decoration: underline;" target="_blank">Biomedical NER Model</a>
                    </p>
        </div>
        ''')
    gr.Markdown("![visitor badge](https://visitor-badge.glitch.me/badge?page_id=RamAnanth1.whisper_biomd_ner)")
    
    
demo.launch()