File size: 3,689 Bytes
34acdd0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c77178a
34acdd0
 
c77178a
 
 
 
 
34acdd0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
"""
Original Algorithm:
- https://github.com/GreenCUBIC/AudiogramDigitization

Source:
- huggingface app
    - https://huggingface.co/spaces/aravinds1811/neural-style-transfer/blob/main/app.py
    - https://huggingface.co/spaces/keras-io/ocr-for-captcha/blob/main/app.py
    - https://huggingface.co/spaces/hugginglearners/image-style-transfer/blob/main/app.py
    - https://tmabraham.github.io/blog/gradio_hf_spaces_tutorial
- huggingface push
    - https://huggingface.co/welcome
"""
import os 
import sys 
from pathlib import Path 

from PIL import Image
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np 
import gradio as gr

sys.path.append(os.path.join(os.path.dirname(__file__), "src"))
from digitizer.digitization import generate_partial_annotation, extract_thresholds

EXAMPLES_PATH = Path('./examples')

max_length = 5
img_width = 200
img_height = 50

  
def load_image(path, zoom=1):
    return OffsetImage(plt.imread(path), zoom=zoom)

def plot_audiogram(digital_result):
    thresholds = pd.DataFrame(digital_result)

    # Figure
    fig = plt.figure()
    ax = fig.add_subplot(111)

    # x axis
    axis = [250, 500, 1000, 2000, 4000, 8000, 16000]
    ax.set_xscale('log')
    ax.xaxis.tick_top()
    ax.xaxis.set_major_formatter(plt.FuncFormatter('{:.0f}'.format))
    ax.set_xlabel('Frequency (Hz)')
    ax.xaxis.set_label_position('top') 
    ax.set_xlim(125,16000)
    plt.xticks(axis)

    # y axis
    ax.set_ylim(-20, 120)
    ax.invert_yaxis()
    ax.set_ylabel('Threshold (dB HL)')

    plt.grid()

    for conduction in ("air", "bone"):
        for masking in (True, False):
            for ear in ("left", "right"):
                symbol_name = f"{ear}_{conduction}_{'unmasked' if not masking else 'masked'}"
                selection = thresholds[(thresholds.conduction == conduction) & (thresholds.ear == ear) & (thresholds.masking == masking)]
                selection = selection.sort_values("frequency")

                # Plot the symbols
                for i, threshold in selection.iterrows():
                    ab = AnnotationBbox(load_image(f"src/digitizer/assets/symbols/{symbol_name}.png", zoom=0.1), (threshold.frequency, threshold.threshold), frameon=False)
                    ax.add_artist(ab)

                # Add joining line for air conduction thresholds
                if conduction == "air":
                    plt.plot(selection.frequency, selection.threshold, color="red" if ear == "right" else "blue", linewidth=0.5)

    return plt.gcf()

# Function for Audiogram Digit Recognition
def audiogram_digit_recognition(img_path):
    digital_result = extract_thresholds(img_path, gpu=False)
    return [plot_audiogram(digital_result), digital_result]


output = [gr.Plot(), gr.JSON()]
examples = [
            f'{EXAMPLES_PATH}/audiogram_example01.png',
            f'{EXAMPLES_PATH}/audiogram_example02.png'
            ]

iface = gr.Interface(
    fn=audiogram_digit_recognition,
    inputs = gr.inputs.Image(type='filepath'),
    outputs = output , #"image",
    title=" AudiogramDigitization",
	description = "facilitate the digitization of audiology reports based on pytorch",
    article = "Algorithm Authors: <a href=\"francoischarih@sce.carleton.ca\">Francois Charih \
                    and <a href=\"jrgreen@sce.carleton.ca\"> James R. Green </a>. \
                    Based on the AudiogramDigitization  <a href=\"https://github.com/GreenCUBIC/AudiogramDigitization\">github repo</a>",
    examples = examples,
    allow_flagging='never', 
    cache_examples=False,
)


iface.launch(
    enable_queue=True, debug=False, inbrowser=False
)