第七色在线视频,2021少妇久久久久久久久久,亚洲欧洲精品成人久久av18,亚洲国产精品特色大片观看完整版,孙宇晨将参加特朗普的晚宴

為了賬號(hào)安全,請(qǐng)及時(shí)綁定郵箱和手機(jī)立即綁定
已解決430363個(gè)問題,去搜搜看,總會(huì)有你想問的

如何使用 AudioWorklet 獲取麥克風(fēng)音量

如何使用 AudioWorklet 獲取麥克風(fēng)音量

大話西游666 2022-10-27 16:58:27
我有興趣在 Javscript 中連續(xù)讀取麥克風(fēng)音量。StackOverflow 上的許多現(xiàn)有解決方案(請(qǐng)參閱此處、此處和此處)使用BaseAudioContext.createScriptProcessor()自 2014 年起已棄用的解決方案。我想在我的項(xiàng)目中使用面向未來的代碼,所以任何人都可以分享一個(gè)如何使用新的讀取麥克風(fēng)音量的現(xiàn)代最小示例AudioWorkletNode嗎?
查看完整描述

1 回答

?
RISEBY

TA貢獻(xiàn)1856條經(jīng)驗(yàn) 獲得超5個(gè)贊

讓我們看一下需要了解的一些要點(diǎn):

  • 這一切的改變都是為了避免延遲,創(chuàng)建了自己的線程,也就是說,運(yùn)行在音頻渲染線程(AudioWorkletGlobalScope)上。

  • 這種新的實(shí)現(xiàn)方式有兩部分:AudioWorkletProcessor 和 AudioWorkletNode。

  • AudioWorkletNode 至少需要兩件事:一個(gè) AudioContext 對(duì)象和作為字符串的處理器名稱。處理器定義可以通過新的 Audio Worklet 對(duì)象的 addModule () 調(diào)用來加載和注冊(cè)。

  • 包括 AudioWorklet 在內(nèi)的 Worklet API 僅在安全上下文中可用。在這種情況下,我們可以使用 localhost,但有必要知道這一點(diǎn)。

  • 我們需要至少從 AudioWorkletProcessor 向 AudioWorkletNode 傳達(dá)當(dāng)前值,或者在本例中是音量以對(duì)其進(jìn)行任何操作。

  • 有必要訪問您的計(jì)算機(jī)的麥克風(fēng)navigator.getUserMedia

/** Declare a context for AudioContext object */

let audioContext

// Creating a list of colors for led

const ledColor = [

    "#064dac",

    "#064dac",

    "#064dac",

    "#06ac5b",

    "#15ac06",

    "#4bac06",

    "#80ac06",

    "#acaa06",

    "#ac8b06",

    "#ac5506",

]

let isFirtsClick = true

let listeing = false


function onMicrophoneDenied() {

    console.log('denied')

}


/**

 * This method updates leds

 * depending the volume detected

 * 

 * @param {Float} vol value of volume detected from microphone

 */

function leds(vol) {

    let leds = [...document.getElementsByClassName('led')]

    let range = leds.slice(0, Math.round(vol))


    for (var i = 0; i < leds.length; i++) {

        leds[i].style.boxShadow = "-2px -2px 4px 0px #a7a7a73d, 2px 2px 4px 0px #0a0a0e5e";

        leds[i].style.height = "22px"

    }


    for (var i = 0; i < range.length; i++) {

        range[i].style.boxShadow = `5px 2px 5px 0px #0a0a0e5e inset, -2px -2px 1px 0px #a7a7a73d inset, -2px -2px 30px 0px ${ledColor[i]} inset`;

        range[i].style.height = "25px"

    }

}


/**

 * Method used to create a comunication between

 * AudioWorkletNode, Microphone and AudioWorkletProcessor

 * 

 * @param {MediaStream} stream If user grant access to microphone, this gives you

 * a MediaStream object necessary in this implementation

 */

async function onMicrophoneGranted(stream) {

    // Instanciate just in the first time

    // when button is pressed

    if (isFirtsClick) {

        // Initialize AudioContext object

        audioContext = new AudioContext()


        // Adding an AudioWorkletProcessor

        // from another script with addModule method

        await audioContext.audioWorklet.addModule('vumeter-processor.js')


        // Creating a MediaStreamSource object

        // and sending a MediaStream object granted by 

        // the user

        let microphone = audioContext.createMediaStreamSource(stream)


        // Creating AudioWorkletNode sending

        // context and name of processor registered

        // in vumeter-processor.js

        const node = new AudioWorkletNode(audioContext, 'vumeter')


        // Listing any message from AudioWorkletProcessor in its

        // process method here where you can know

        // the volume level

        node.port.onmessage  = event => {

            let _volume = 0

            let _sensibility = 5 // Just to add any sensibility to our ecuation

            if (event.data.volume)

                _volume = event.data.volume;

            leds((_volume * 100) / _sensibility)

        }


        // Now this is the way to

        // connect our microphone to

        // the AudioWorkletNode and output from audioContext

        microphone.connect(node).connect(audioContext.destination)


        isFirtsClick = false

    }


    // Just to know if button is on or off

    // and stop or resume the microphone listening

    let audioButton = document.getElementsByClassName('audio-control')[0]

    if (listeing) {

        audioContext.suspend()

        audioButton.style.boxShadow = "-2px -2px 4px 0px #a7a7a73d, 2px 2px 4px 0px #0a0a0e5e"

        audioButton.style.fontSize = "25px"

    } else {

        audioContext.resume()

        audioButton.style.boxShadow = "5px 2px 5px 0px #0a0a0e5e inset, -2px -2px 1px 0px #a7a7a73d inset"

        audioButton.style.fontSize = "24px"

    }


    listeing = !listeing

}


function activeSound () {

    // Tell user that this

    // program wants to use

    // the microphone

    try {

        navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;

        

        navigator.getUserMedia(

            { audio: true, video: false },

            onMicrophoneGranted,

            onMicrophoneDenied

        );

    } catch(e) {

        alert(e)

    }

}


document.getElementById('audio').addEventListener('click', () => {

    activeSound()

})

在本節(jié)中,您可以知道麥克風(fēng)的音量:


const SMOOTHING_FACTOR = 0.8;

const MINIMUM_VALUE = 0.00001;


// This is the way to register an AudioWorkletProcessor

// it's necessary to declare a name, in this case

// the name is "vumeter"

registerProcessor('vumeter', class extends AudioWorkletProcessor {


  _volume

  _updateIntervalInMS

  _nextUpdateFrame


  constructor () {

    super();

    this._volume = 0;

    this._updateIntervalInMS = 25;

    this._nextUpdateFrame = this._updateIntervalInMS;

    this.port.onmessage = event => {

      if (event.data.updateIntervalInMS)

        this._updateIntervalInMS = event.data.updateIntervalInMS;

    }

  }


  get intervalInFrames () {

    return this._updateIntervalInMS / 1000 * sampleRate;

  }


  process (inputs, outputs, parameters) {

    const input = inputs[0];


    // Note that the input will be down-mixed to mono; however, if no inputs are

    // connected then zero channels will be passed in.

    if (input.length > 0) {

      const samples = input[0];

      let sum = 0;

      let rms = 0;


      // Calculated the squared-sum.

      for (let i = 0; i < samples.length; ++i)

        sum += samples[i] * samples[i];


      // Calculate the RMS level and update the volume.

      rms = Math.sqrt(sum / samples.length);

      this._volume = Math.max(rms, this._volume * SMOOTHING_FACTOR);


      // Update and sync the volume property with the main thread.

      this._nextUpdateFrame -= samples.length;

      if (this._nextUpdateFrame < 0) {

        this._nextUpdateFrame += this.intervalInFrames;

        this.port.postMessage({volume: this._volume});

      }

    }

    

    return true;

  }

});

最后這是 html,您可以在其中顯示檢測到的音量:


<div class="container">

    <span>Microphone</span>

    <div class="volumen-wrapper">

        <div class="led"></div>

        <div class="led"></div>

        <div class="led"></div>

        <div class="led"></div>

        <div class="led"></div>

                

        <div class="led"></div>

        <div class="led"></div>

        <div class="led"></div>

        <div class="led"></div>

        <div class="led"></div>

    </div>


    <div class="control-audio-wrapper">

        <div id="audio" class="audio-control">&#127908;</div>

    </div>

</div>

<script type="module" src="./index.js"></script>

這是結(jié)果

http://img1.sycdn.imooc.com//635a48580001957106320381.jpg

這是我在 codepen中的實(shí)現(xiàn)


查看完整回答
反對(duì) 回復(fù) 2022-10-27
  • 1 回答
  • 0 關(guān)注
  • 354 瀏覽
慕課專欄
更多

添加回答

舉報(bào)

0/150
提交
取消
微信客服

購課補(bǔ)貼
聯(lián)系客服咨詢優(yōu)惠詳情

幫助反饋 APP下載

慕課網(wǎng)APP
您的移動(dòng)學(xué)習(xí)伙伴

公眾號(hào)

掃描二維碼
關(guān)注慕課網(wǎng)微信公眾號(hào)