drguilhermeapolinario commited on
Commit
4041491
1 Parent(s): 32fae2e

Update silence_detection.js

Browse files
Files changed (1) hide show
  1. silence_detection.js +36 -54
silence_detection.js CHANGED
@@ -1,57 +1,39 @@
1
- <script>
2
- let audioContext;
3
- let analyser;
4
- let microphone;
5
- let javascriptNode;
6
-
7
- function detectSilence(stream, silence_delay, min_decibels) {
8
- audioContext = new AudioContext();
9
- analyser = audioContext.createAnalyser();
10
- microphone = audioContext.createMediaStreamSource(stream);
11
- javascriptNode = audioContext.createScriptProcessor(2048, 1, 1);
12
-
13
- analyser.smoothingTimeConstant = 0.8;
14
- analyser.fftSize = 1024;
15
-
16
- microphone.connect(analyser);
17
- analyser.connect(javascriptNode);
18
- javascriptNode.connect(audioContext.destination);
19
-
20
- let silence_start = performance.now();
21
- let triggered = false;
22
-
23
- javascriptNode.onaudioprocess = function() {
24
- var array = new Uint8Array(analyser.frequencyBinCount);
25
- analyser.getByteFrequencyData(array);
26
- var values = 0;
27
-
28
- var length = array.length;
29
- for (var i = 0; i < length; i++) {
30
- values += (array[i]);
31
- }
32
-
33
- var average = values / length;
34
-
35
- if(average < min_decibels) {
36
- if(!triggered){
37
- silence_start = performance.now();
38
- triggered = true;
39
- }
40
- if(triggered && (performance.now() - silence_start ) > silence_delay){
41
- document.querySelector('#component-3').click();
42
- audioContext.close();
43
- }
44
- } else {
45
- triggered = false;
46
  }
47
  }
48
- }
49
 
50
- navigator.mediaDevices.getUserMedia({ audio: true, video: false })
51
- .then(function(stream) {
52
- detectSilence(stream, 2000, 30);
53
- })
54
- .catch(function(err) {
55
- console.log('Error: ' + err);
56
- });
57
- </script>
 
1
+ with gr.Blocks() as demo:
2
+ with gr.Row():
3
+ web_search = gr.Checkbox(label="Web Search", value=False)
4
+ input_audio = gr.Audio(source="microphone", type="filepath", streaming=True)
5
+ output_audio = gr.Audio(label="AI Response", autoplay=True)
6
+
7
+ is_recording = gr.State(False)
8
+ last_interaction_time = gr.State(time.time())
9
+
10
+ def toggle_recording():
11
+ return not is_recording.value
12
+
13
+ def process_audio(audio, web_search, is_rec):
14
+ current_time = time.time()
15
+ if is_rec and (current_time - last_interaction_time.value > 2):
16
+ last_interaction_time.value = current_time
17
+ return transcribe_and_respond(audio, web_search), False
18
+ return None, is_rec
19
+
20
+ input_audio.stream(process_audio, inputs=[input_audio, web_search, is_recording], outputs=[output_audio, is_recording])
21
+
22
+ demo.load(toggle_recording, outputs=[is_recording])
23
+
24
+ # Carregando o arquivo JavaScript
25
+ demo.load(None, js="""
26
+ async () => {
27
+ try {
28
+ const script = document.createElement('script');
29
+ script.src = 'silence_detection.js';
30
+ document.head.appendChild(script);
31
+ console.log('Silence detection script loaded successfully');
32
+ } catch (error) {
33
+ console.error('Error loading silence detection script:', error);
 
 
 
 
 
 
 
 
 
 
 
 
34
  }
35
  }
36
+ """)
37
 
38
+ if __name__ == "__main__":
39
+ demo.queue(max_size=200).launch()