实时更改两个音频源

此示例显示如何使用两个音频源,并根据另一个更改其中一个音频源。在这种情况下,我们创建一个音频 Ducker,如果辅助音轨产生声音,它将降低主音轨的音量。

ScriptProcessorNode 将定期事件发送到其 audioprocess 处理程序。在这个链接到辅助音频源的处理程序中,我们计算音频的响度,并用它来改变主音频源上的动态压缩器。然后将两者发送到用户的扬声器/耳机。结果是当在次要音频轨道中检测到声音时主要音频轨道中的音量变化非常突然。我们可以通过使用平均值来使这更平滑,并且在检测到辅助音频之前使用延迟线来改变音量,但是在该示例中该过程应该是清楚的。

//The DuckingNode will heavily compress the primary source when the secondary source produces sound
class DuckingNode {
  constructor(context) {
    let blocksize = 2048;
    let normalThreshold = -50;
    let duckThreshold = 0.15;
  
    //Creating nodes
    this.compressor = context.createDynamicsCompressor();
    this.processor = context.createScriptProcessor(blocksize, 2, 2);
    
    //Configuring nodes
    this.compressor.threshold.value = normalThreshold;
    this.compressor.knee.value = 0;
    this.compressor.ratio.value = 12;
    this.compressor.reduction.value = -20;
    this.compressor.attack.value = 0;
    this.compressor.release.value = 1.5;
    
    let self = this;
    this.processor.onaudioprocess = function(audioProcessingEvent) {
      let inputBuffer = audioProcessingEvent.inputBuffer;
      let outputBuffer = audioProcessingEvent.outputBuffer;
      let rms;
      let total = 0.0;
      let len = blocksize * outputBuffer.numberOfChannels;

      for (let channel = 0; channel < outputBuffer.numberOfChannels; channel++) {
        let inputData = inputBuffer.getChannelData(channel);
        let outputData = outputBuffer.getChannelData(channel);

        for (let sample = 0; sample < inputBuffer.length; sample++) {
          // make output equal to the same as the input
          outputData[sample] = inputData[sample];
          total += Math.abs(inputData[sample]);
        }
      }
      
      //Root mean square
      rms = Math.sqrt( total / len );

      //We set the threshold to at least 'normalThreshold'
      self.compressor.threshold.value = normalThreshold + Math.min(rms - duckThreshold, 0) * 5 * normalThreshold;
    }
  }
  
  get primaryInput () {
    return this.compressor;
  }
  
  get secondaryInput () {
    return this.processor;
  }
  
  connectPrimary(node) {
    this.compressor.connect(node);
  }
  
  connectSecondary(node) {
    this.processor.connect(node);
  }
};
let audioContext = new (window.AudioContext || window.webkitAudioContext)();

//Select two <audio> elements on the page. Ideally they have the autoplay attribute
let musicElement = document.getElementById("music");
let voiceoverElement = document.getElementById("voiceover");

//We create source nodes from them
let musicSourceNode = audioContext.createMediaElementSource(musicElement);
let voiceoverSourceNode = audioContext.createMediaElementSource(voiceoverElement);

let duckingNode = new DuckingNode(audioContext);

//Connect everything up
musicSourceNode.connect(duckingNode.primaryInput);
voiceoverSourceNode.connect(duckingNode.secondaryInput);
duckingNode.connectPrimary(audioContext.destination);
duckingNode.connectSecondary(audioContext.destination);

这个例子中的一部分来自这个答案凯文·恩尼斯和代码 MDN