davidscripka commited on
Commit
42fc253
1 Parent(s): de66b6c

Added title and description to Gradio app

Browse files
Files changed (1) hide show
  1. app.py +24 -0
app.py CHANGED
@@ -51,7 +51,31 @@ def process_audio(audio, state=collections.defaultdict(partial(collections.deque
51
  return plot, state
52
 
53
  # Create Gradio interface and launch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  gr_int = gr.Interface(
 
 
55
  css = ".flex {flex-direction: column} .gr-panel {width: 100%}",
56
  fn=process_audio,
57
  inputs=[
 
51
  return plot, state
52
 
53
  # Create Gradio interface and launch
54
+
55
+ desc = """
56
+ This is a demo of the pre-trained models included in the latest release
57
+ of the [openWakeWord](https://github.com/dscripka/openWakeWord) library.
58
+
59
+ Click on the "record from microphone" button below to start capturing.
60
+ The real-time scores from each model will be shown in the line plot. Hover over
61
+ each line to see the name of the corresponding model.
62
+
63
+ Different models will respond to different wake words/phrases (see [the model docs](https://github.com/dscripka/openWakeWord/tree/main/docs/models) for more details).
64
+ If everything is working properly,
65
+ you should see a spike in the score for a given model after speaking a related word/phrase. Below are some suggested phrases to try!
66
+
67
+ | Model Name | Word/Phrase |
68
+ | --- | --- |
69
+ | alexa | "alexa" |
70
+ | hey_mycroft | "hey mycroft"|
71
+ | weather | "what's the weather", "tell me today's weather" |
72
+ | x_minute_timer | "set a timer for 1 minute", "create 1 hour alarm" |
73
+
74
+ """
75
+
76
  gr_int = gr.Interface(
77
+ title = "openWakeWord Live Demo",
78
+ description = desc,
79
  css = ".flex {flex-direction: column} .gr-panel {width: 100%}",
80
  fn=process_audio,
81
  inputs=[