This example shows how to use two audio sources, and alter one of them based on the other. In this case we create an audio Ducker, that will lower the volume of the primary track if the secondary track produces sound.
The ScriptProcessorNode will send regular events to its audioprocess handler. In this handler, which is linked to the secondary audio source, we calculate the "loudness" of the audio, and use it to alter a dynamic compressor on the primary audio source. Both are then sent to the speakers/headphones of the user. The result is very abrupt volume changes in the primary audio track when sound is detected in the secondary audio track. We could make this smoother by using an average, and using a delay line to change the volume before the secondary audio is detected, but the process should be clear in this example.
//The DuckingNode will heavily compress the primary source when the secondary source produces sound
class DuckingNode {
constructor(context) {
let blocksize = 2048;
let normalThreshold = -50;
let duckThreshold = 0.15;
//Creating nodes
this.compressor = context.createDynamicsCompressor();
this.processor = context.createScriptProcessor(blocksize, 2, 2);
//Configuring nodes
this.compressor.threshold.value = normalThreshold;
this.compressor.knee.value = 0;
this.compressor.ratio.value = 12;
this.compressor.reduction.value = -20;
this.compressor.attack.value = 0;
this.compressor.release.value = 1.5;
let self = this;
this.processor.onaudioprocess = function(audioProcessingEvent) {
let inputBuffer = audioProcessingEvent.inputBuffer;
let outputBuffer = audioProcessingEvent.outputBuffer;
let rms;
let total = 0.0;
let len = blocksize * outputBuffer.numberOfChannels;
for (let channel = 0; channel < outputBuffer.numberOfChannels; channel++) {
let inputData = inputBuffer.getChannelData(channel);
let outputData = outputBuffer.getChannelData(channel);
for (let sample = 0; sample < inputBuffer.length; sample++) {
// make output equal to the same as the input
outputData[sample] = inputData[sample];
total += Math.abs(inputData[sample]);
}
}
//Root mean square
rms = Math.sqrt( total / len );
//We set the threshold to at least 'normalThreshold'
self.compressor.threshold.value = normalThreshold + Math.min(rms - duckThreshold, 0) * 5 * normalThreshold;
}
}
get primaryInput () {
return this.compressor;
}
get secondaryInput () {
return this.processor;
}
connectPrimary(node) {
this.compressor.connect(node);
}
connectSecondary(node) {
this.processor.connect(node);
}
};
let audioContext = new (window.AudioContext || window.webkitAudioContext)();
//Select two <audio> elements on the page. Ideally they have the autoplay attribute
let musicElement = document.getElementById("music");
let voiceoverElement = document.getElementById("voiceover");
//We create source nodes from them
let musicSourceNode = audioContext.createMediaElementSource(musicElement);
let voiceoverSourceNode = audioContext.createMediaElementSource(voiceoverElement);
let duckingNode = new DuckingNode(audioContext);
//Connect everything up
musicSourceNode.connect(duckingNode.primaryInput);
voiceoverSourceNode.connect(duckingNode.secondaryInput);
duckingNode.connectPrimary(audioContext.destination);
duckingNode.connectSecondary(audioContext.destination);
Part of this example comes from this answer by Kevin Ennis and the code on mdn.