File size: 12,116 Bytes
125809c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
792da40
125809c
 
45ee4ac
125809c
 
792da40
125809c
 
 
 
 
 
 
 
 
 
 
3d84af1
125809c
3d84af1
125809c
 
 
 
3d84af1
125809c
3d84af1
125809c
 
 
 
 
 
 
 
6f363a2
7b0cd17
 
6f363a2
 
 
 
 
 
 
 
 
7b0cd17
125809c
6f363a2
 
 
 
 
 
 
125809c
6f363a2
3d84af1
6f363a2
3d84af1
6f363a2
 
2836de6
6f363a2
 
 
 
 
 
 
 
 
 
 
2836de6
2b51aaa
 
 
 
 
 
 
 
 
 
2836de6
2b51aaa
 
 
 
 
 
 
 
 
 
7b0cd17
2b51aaa
 
7b0cd17
2b51aaa
 
7b0cd17
2b51aaa
 
2836de6
2b51aaa
6f363a2
bf95d6f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6f363a2
 
 
 
 
 
895289d
6f363a2
 
 
 
 
 
 
 
 
895289d
6f363a2
 
895289d
 
6f363a2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
895289d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17793fe
 
 
 
 
 
 
 
 
895289d
6f363a2
 
 
 
895289d
 
 
34b4eab
6f363a2
895289d
6f363a2
17793fe
 
895289d
 
 
6f363a2
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
# import gradio as gr
# import os

# HF_TOKEN = os.getenv('HW_Token')
# hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "save_audio")

# import gradio as gr
# import os

# class TextFileReader:
#     def __init__(self):
#         self.lines = []
#         self.current_index = 0

#     def read_lines(self, file):
#         self.lines = file.decode('utf-8').splitlines()
#         self.current_index = 0
#         return self.get_current_line()

#     def get_current_line(self):
#         if 0 <= self.current_index < len(self.lines):
#             return self.lines[self.current_index]
#         else:
#             return "End of file reached."

#     def forward_line(self):
#         self.current_index = min(self.current_index + 1, len(self.lines) - 1)
#         return self.get_current_line()

#     def backward_line(self):
#         self.current_index = max(self.current_index - 1, 0)
#         return self.get_current_line()

# reader = TextFileReader()

# # Define a function to save the text lines to a file
# def save_text_lines(file):
#     lines = reader.read_lines(file)
#     with open("text_lines.txt", "w") as f:
#         f.write("\n".join(reader.lines))
#     return lines

# # Define a function to save the audio file and corresponding text
# def save_audio_text(audio, text):
#     if not os.path.exists("recordings"):
#         os.makedirs("/recordings")

#     # Debugging to print out the structure of the audio variable
#     print("Received audio data:", audio)

#     # Check if audio is a dictionary and contains 'data'
#     if isinstance(audio, dict) and 'data' in audio:
#         audio_data = audio['data']
#         audio_path = f"/recordings/line_{reader.current_index}.wav"
#         text_path = f"/recordings/line_{reader.current_index}.txt"
        
#         with open(audio_path, "wb") as f:
#             f.write(audio_data)
            
#         with open(text_path, "w") as f:
#             f.write(text)
        
#         # Move to the next line after saving
#         next_line = reader.forward_line()
#         return next_line
#     else:
#         return "Audio data is not in the expected format."

# # Define the Gradio interface
# with gr.Blocks() as demo:
#     with gr.Row():
#         file_upload = gr.File(label="Upload a text file", type="binary")
#         generate_button = gr.Button("Generate Lines")
    
#     current_line = gr.Textbox(label="Current Line")
    
#     def update_output(file):
#         lines = reader.read_lines(file)
#         save_text_lines(file)  # Save the text lines to a file
#         return lines

#     generate_button.click(fn=update_output, inputs=file_upload, outputs=current_line)
    
#     with gr.Row():
#         audio_record = gr.Audio(sources=["microphone","upload"], type="filepath")
#         save_button = gr.Button("Save Audio and Next Line")

#     save_button.click(fn=save_audio_text, inputs=[audio_record, current_line], outputs=current_line)

# demo.launch()

# import gradio as gr


# def calculator(num1, operation, num2):
#     if operation == "add":
#         return num1 + num2
#     elif operation == "subtract":
#         return num1 - num2
#     elif operation == "multiply":
#         return num1 * num2
#     elif operation == "divide":
#         return num1 / num2


# iface = gr.Interface(
#     calculator,
#     ["number", gr.Radio(["add", "subtract", "multiply", "divide"]), "number"],
#     "number",
#     allow_flagging="manual",
#     flagging_options=["correct", "wrong"]
# )

# iface.launch()

# import os

# HF_TOKEN = os.getenv('HF_TOKEN')
# hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "crowdsourced")

# iface = gr.Interface(
#     calculator,
#     ["number", gr.Radio(["add", "subtract", "multiply", "divide"]), "number"],
#     "number",
#     description="Check out the crowd-sourced dataset at: [https://huggingface.co/Sajjo/crowdsourced](https://huggingface.co/Sajjo/crowdsourced)",
#     allow_flagging="manual",
#     flagging_options=["wrong sign", "off by one", "other"],
#     flagging_callback=hf_writer
# )

# iface.launch()

# import numpy as np
# import gradio as gr

# def sepia(input_img, strength):
#     sepia_filter = strength * np.array(
#         [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]
#     ) + (1-strength) * np.identity(3)
#     sepia_img = input_img.dot(sepia_filter.T)
#     sepia_img /= sepia_img.max()
#     return sepia_img

# callback = gr.CSVLogger()

# with gr.Blocks() as demo:
#     with gr.Row():
#         with gr.Column():
#             img_input = gr.Image()
#             strength = gr.Slider(0, 1, 0.5)
#         img_output = gr.Image()
#     with gr.Row():
#         btn = gr.Button("Flag")
        
#     # This needs to be called at some point prior to the first call to callback.flag()
#     callback.setup([img_input, strength, img_output], "flagged_data_points")

#     img_input.change(sepia, [img_input, strength], img_output)
#     strength.change(sepia, [img_input, strength], img_output)
    
#     # We can choose which components to flag -- in this case, we'll flag all of them
#     btn.click(lambda *args: callback.flag(args), [img_input, strength, img_output], None, preprocess=False)

# demo.launch()

# import gradio as gr
# import os
# import wave
# import tempfile
# import numpy as np

# # Global variables to store file and line index
# file_index = 0
# line_index = 0
# lines = []

# # Hugging Face token and dataset saver
# HF_TOKEN = os.getenv('HF_TOKEN')
# hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "crowdsourced-calculator-demo")

# # Function to read lines from a file
# def read_lines_from_file(file_path):
#     global lines
#     with open(file_path, 'r') as file:
#         lines = file.readlines()

# # Function to save audio to a WAV file
# def save_audio_to_file(audio):
#     sample_rate, data = audio  # audio is a tuple (sample_rate, data)
    
#     # Save the audio data as a WAV file in a temporary location
#     with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
#         with wave.open(tmp_file.name, 'wb') as wav_file:
#             wav_file.setnchannels(1)  # Mono audio
#             wav_file.setsampwidth(2)  # 2 bytes per sample (16-bit PCM)
#             wav_file.setframerate(sample_rate)
#             wav_file.writeframes(data.tobytes())
        
#         # Return the path to the saved WAV file
#         return tmp_file.name

# # Function to save data to the Hugging Face dataset
# def save_to_hf_dataset(text, audio_path):
#     with open(audio_path, "rb") as f:
#         audio_data = f.read()
#     hf_writer.save({"text": text, "audio": audio_data})

# # Gradio interface function
# def audio_capture_interface():
#     global file_index, line_index, lines
    
#     # Initial file to read
#     files = os.listdir('./audio_samples')
#     read_lines_from_file(os.path.join('./audio_samples', files[file_index]))

#     # Define the interface components
#     audio_input = gr.Audio(source="microphone", type="numpy", label="Speak and click submit")
#     output_text = gr.Textbox(label="Status", placeholder="Status will appear here")

#     # Function to capture and process the audio input
#     def process_audio(audio):
#         global line_index, lines
        
#         try:
#             text_line = lines[line_index].strip()
#             file_path = save_audio_to_file(audio)
#             save_to_hf_dataset(text_line, file_path)
#             return f"Audio saved to {file_path} and uploaded to Hugging Face Dataset."
#         except Exception as e:
#             return f"Error saving audio: {str(e)}"

#     # Function to handle navigation buttons
#     def navigate_lines(button):
#         global line_index, lines
        
#         if button == 'forward':
#             line_index = min(line_index + 1, len(lines) - 1)
#         elif button == 'previous':
#             line_index = max(line_index - 1, 0)
        
#         output_text.value = lines[line_index]

#     # Create the Gradio interface
#     with gr.Blocks() as iface:
#         with gr.Row():
#             gr.Textbox(label="Text", value=lines[line_index], interactive=False)
#         with gr.Row():
#             audio_input.render()
#         with gr.Row():
#             gr.Button("Previous").click(lambda: navigate_lines('previous'), outputs=output_text)
#             gr.Button("Forward").click(lambda: navigate_lines('forward'), outputs=output_text)
#             gr.Button("Submit").click(process_audio, inputs=audio_input, outputs=output_text)

#     return iface

# # Launch the interface
# iface = audio_capture_interface()
# iface.launch()

import gradio as gr
import os
import wave
import tempfile
import numpy as np

# Global variables to store line index and lines
line_index = 0
lines = []

# Hugging Face token and dataset saver
HF_TOKEN = os.getenv('HF_TOKEN')
hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "crowdsourced-calculator-demo")

# Function to read lines from a file
def read_lines_from_file(file_path):
    global lines, line_index
    with open(file_path, 'r') as file:
        lines = file.readlines()
    line_index = 0  # Reset line index when a new file is loaded
    return lines[line_index].strip() if lines else "No lines found in the file."

# Function to save audio to a WAV file
def save_audio_to_file(audio):
    sample_rate, data = audio  # audio is a tuple (sample_rate, data)
    
    # Save the audio data as a WAV file in a temporary location
    with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
        with wave.open(tmp_file.name, 'wb') as wav_file:
            wav_file.setnchannels(1)  # Mono audio
            wav_file.setsampwidth(2)  # 2 bytes per sample (16-bit PCM)
            wav_file.setframerate(sample_rate)
            wav_file.writeframes(data.tobytes())
        
        # Return the path to the saved WAV file
        return tmp_file.name

# Function to save data to the Hugging Face dataset
def save_to_hf_dataset(text, audio_path):
    with open(audio_path, "rb") as f:
        audio_data = f.read()
    hf_writer.save({"text": text, "audio": audio_data})

# Function to capture and process the audio input
def process_audio(audio):
    global line_index, lines
    try:
        text_line = lines[line_index].strip()
        file_path = save_audio_to_file(audio)
        save_to_hf_dataset(text_line, file_path)
        return f"Audio saved to {file_path} and uploaded to Hugging Face Dataset."
    except Exception as e:
        return f"Error saving audio: {str(e)}"

# Function to handle navigation buttons
def navigate_lines(button):
    global line_index, lines
    if button == 'forward':
        line_index = min(line_index + 1, len(lines) - 1)
    elif button == 'previous':
        line_index = max(line_index - 1, 0)
    return lines[line_index].strip() if lines else "No lines found."

# Function to handle file upload
def upload_file(file):
    if file is not None:
        file_path = file.name
        return read_lines_from_file(file_path)
    else:
        return "No file uploaded."

# Gradio interface function
def audio_capture_interface():
    with gr.Blocks() as iface:
        with gr.Row():
            file_upload = gr.File(label="Upload a text file", file_types=["text"])
            text_display = gr.Textbox(label="Text", value="Please upload a file to begin.", interactive=False)
        with gr.Row():
            audio_input = gr.Audio(sources=["microphone","upload"], type="filepath", label="Speak and click submit")
        with gr.Row():
            status_output = gr.Textbox(label="Status", placeholder="Status will appear here")
        with gr.Row():
            gr.Button("Previous").click(lambda: navigate_lines('previous'), None, text_display)
            gr.Button("Forward").click(lambda: navigate_lines('forward'), None, text_display)
            gr.Button("Submit").click(process_audio, inputs=audio_input, outputs=status_output)

        file_upload.upload(upload_file, outputs=text_display)

    return iface

# Launch the interface
iface = audio_capture_interface()
iface.launch()