ngxson HF staff commited on
Commit
42bc647
·
1 Parent(s): 34f3d3d

add opening sound FX

Browse files
.gitattributes CHANGED
@@ -32,4 +32,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.xz filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
 
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
32
  *.xz filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *.wav filter=lfs diff=lfs merge=lfs -text
36
  *tfevents* filter=lfs diff=lfs merge=lfs -text
front/src/components/PodcastGenerator.tsx CHANGED
@@ -8,6 +8,7 @@ import {
8
  loadWavAndDecode,
9
  pickRand,
10
  } from '../utils/utils';
 
11
 
12
  interface GenerationStep {
13
  turn: PodcastTurn;
@@ -130,6 +131,8 @@ export const PodcastGenerator = ({
130
  step.audioBuffer = await loadWavAndDecode(url);
131
  if (i === 0) {
132
  outputWav = step.audioBuffer;
 
 
133
  } else {
134
  const lastStep = steps[i - 1];
135
  outputWav = joinAudio(
 
8
  loadWavAndDecode,
9
  pickRand,
10
  } from '../utils/utils';
11
+ import openingSoundSrc from '../opening-sound.wav';
12
 
13
  interface GenerationStep {
14
  turn: PodcastTurn;
 
131
  step.audioBuffer = await loadWavAndDecode(url);
132
  if (i === 0) {
133
  outputWav = step.audioBuffer;
134
+ const openingSound = await loadWavAndDecode(openingSoundSrc);
135
+ outputWav = joinAudio(openingSound, outputWav!, -1);
136
  } else {
137
  const lastStep = steps[i - 1];
138
  outputWav = joinAudio(
front/src/utils/utils.ts CHANGED
@@ -219,7 +219,24 @@ export const loadWavAndDecode = async (url: string): Promise<AudioBuffer> => {
219
  throw new Error('AudioContext is not supported on this browser');
220
  }
221
  const audioCtx = new AudioContext();
222
- const audioBuffer = await audioCtx.decodeAudioData(arrayBuffer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223
  return audioBuffer;
224
  };
225
 
 
219
  throw new Error('AudioContext is not supported on this browser');
220
  }
221
  const audioCtx = new AudioContext();
222
+ let audioBuffer = await audioCtx.decodeAudioData(arrayBuffer);
223
+ // force mono
224
+ if (audioBuffer.numberOfChannels > 1) {
225
+ const monoBuffer = new AudioContext().createBuffer(
226
+ 1,
227
+ audioBuffer.length,
228
+ audioBuffer.sampleRate
229
+ );
230
+ const monoData = monoBuffer.getChannelData(0);
231
+ for (let i = 0; i < audioBuffer.length; i++) {
232
+ let sum = 0;
233
+ for (let channel = 0; channel < audioBuffer.numberOfChannels; channel++) {
234
+ sum += audioBuffer.getChannelData(channel)[i];
235
+ }
236
+ monoData[i] = sum / audioBuffer.numberOfChannels;
237
+ }
238
+ audioBuffer = monoBuffer;
239
+ }
240
  return audioBuffer;
241
  };
242