Spaces:
Running
Running
File size: 3,729 Bytes
6bcb42f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
import WavEncoder from 'wav-encoder';
export const SOUND_BYTE_LIMIT = 10 * 1000 * 1000; // 10mb
const _computeRMS = function (samples, start, end, scaling = 0.55) {
const length = end - start;
if (length === 0) return 0;
// Calculate RMS, adapted from https://github.com/Tonejs/Tone.js/blob/master/Tone/component/Meter.js#L88
let sum = 0;
for (let i = start; i < end; i++) {
const sample = samples[i];
sum += sample ** 2;
}
const rms = Math.sqrt(sum / length);
const val = rms / scaling;
return Math.sqrt(val);
};
const computeRMS = (samples, scaling) => _computeRMS(samples, 0, samples.length, scaling);
const computeChunkedRMS = function (samples, chunkSize = 1024) {
const sampleCount = samples.length;
const chunkLevels = [];
for (let i = 0; i < sampleCount; i += chunkSize) {
const maxIndex = Math.min(sampleCount, i + chunkSize);
chunkLevels.push(_computeRMS(samples, i, maxIndex));
}
return chunkLevels;
};
const encodeAndAddSoundToVM = function (vm, samples, sampleRate, name, callback) {
WavEncoder.encode({
sampleRate: sampleRate,
channelData: [samples]
}).then(wavBuffer => {
const vmSound = {
format: '',
dataFormat: 'wav',
rate: sampleRate,
sampleCount: samples.length
};
// Create an asset from the encoded .wav and get resulting md5
const storage = vm.runtime.storage;
vmSound.asset = storage.createAsset(
storage.AssetType.Sound,
storage.DataFormat.WAV,
new Uint8Array(wavBuffer),
null,
true // generate md5
);
vmSound.assetId = vmSound.asset.assetId;
// update vmSound object with md5 property
vmSound.md5 = `${vmSound.assetId}.${vmSound.dataFormat}`;
// The VM will update the sound name to a fresh name
vmSound.name = name;
vm.addSound(vmSound).then(() => {
if (callback) callback();
});
});
};
/**
@typedef SoundBuffer
@type {Object}
@property {Float32Array} samples Array of audio samples
@property {number} sampleRate Audio sample rate
*/
/**
* Downsample the given buffer to try to reduce file size below SOUND_BYTE_LIMIT
* @param {SoundBuffer} buffer - Buffer to resample
* @param {function(SoundBuffer):Promise<SoundBuffer>} resampler - resampler function
* @returns {SoundBuffer} Downsampled buffer with half the sample rate
*/
const downsampleIfNeeded = (buffer, resampler) => {
const {samples, sampleRate} = buffer;
const encodedByteLength = samples.length * 2; /* bitDepth 16 bit */
// Resolve immediately if already within byte limit
if (encodedByteLength < SOUND_BYTE_LIMIT) {
return Promise.resolve({samples, sampleRate});
}
// TW: Don't check if the sound will still fit at this reduced sample rate.
// Instead the GUI will show a warning if it's too large.
return resampler({samples, sampleRate}, 22050);
};
/**
* Drop every other sample of an audio buffer as a last-resort way of downsampling.
* @param {SoundBuffer} buffer - Buffer to resample
* @returns {SoundBuffer} Downsampled buffer with half the sample rate
*/
const dropEveryOtherSample = buffer => {
const newLength = Math.floor(buffer.samples.length / 2);
const newSamples = new Float32Array(newLength);
for (let i = 0; i < newLength; i++) {
newSamples[i] = buffer.samples[i * 2];
}
return {
samples: newSamples,
sampleRate: buffer.sampleRate / 2
};
};
export {
computeRMS,
computeChunkedRMS,
encodeAndAddSoundToVM,
downsampleIfNeeded,
dropEveryOtherSample
};
|