tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

SpeechRecognition-concurrentMediaStreamTrack-manual.https.html (2631B)


      1 <!DOCTYPE html>
      2 <html lang="en">
      3 <title>SpeechRecognition Concurrent MediaStreamTracks</title>
      4 
      5 <script src="/resources/testharness.js"></script>
      6 <script src="/resources/testharnessreport.js"></script>
      7 
      8 <script>
      9 async function getAudioTrackFromFile(filePath) {
     10    const audioContext = new AudioContext();
     11    const response = await fetch(filePath);
     12    const arrayBuffer = await response.arrayBuffer();
     13    const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
     14    const source = audioContext.createBufferSource();
     15    source.buffer = audioBuffer;
     16 
     17    const destination = audioContext.createMediaStreamDestination();
     18    source.connect(destination);
     19    source.start();
     20 
     21    return destination.stream.getAudioTracks()[0];
     22 }
     23 
     24 promise_test(async (t) => {
     25    const lang = "en-US";
     26    window.SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
     27 
     28    // Create two SpeechRecognition instances
     29    const speechRecognition1 = new SpeechRecognition();
     30    speechRecognition1.processLocally = false;
     31    speechRecognition1.lang = lang;
     32    const speechRecognition2 = new SpeechRecognition();
     33    speechRecognition2.processLocally = false;
     34    speechRecognition2.lang = lang;
     35 
     36    const audioTrack1 = await getAudioTrackFromFile("/media/speech.wav");
     37    const audioTrack2 = await getAudioTrackFromFile("/media/speech.wav");
     38 
     39    assert_true(audioTrack1 instanceof MediaStreamTrack, "Audio track 1 should be a valid MediaStreamTrack");
     40    assert_true(audioTrack2 instanceof MediaStreamTrack, "Audio track 2 should be a valid MediaStreamTrack");
     41 
     42    const recognitionPromise1 = new Promise((resolve) => {
     43        speechRecognition1.onresult = (event) => {
     44            const transcript = event.results[0][0].transcript;
     45            resolve(transcript);
     46        };
     47    });
     48 
     49    const recognitionPromise2 = new Promise((resolve) => {
     50        speechRecognition2.onresult = (event) => {
     51            const transcript = event.results[0][0].transcript;
     52            resolve(transcript);
     53        };
     54    });
     55 
     56    speechRecognition1.start(audioTrack1);
     57    speechRecognition2.start(audioTrack2);
     58 
     59    const transcript1 = await recognitionPromise1;
     60    const transcript2 = await recognitionPromise2;
     61 
     62    assert_equals(transcript1.toLowerCase(), "this is a sentence in a single segment", "Speech recognition 1 should correctly recognize speech");
     63    assert_equals(transcript2.toLowerCase(), "this is a sentence in a single segment", "Speech recognition 2 should correctly recognize speech");
     64 }, "Two SpeechRecognition instances should simultaneously recognize speech from audio files.");
     65 </script>
     66 </html>