SpeechRecognition-phrases-manual.https.html (3825B)
1 <!DOCTYPE html> 2 <html lang="en"> 3 <meta name="timeout" content="long"> 4 <title>SpeechRecognition Phrases</title> 5 6 <script src="/resources/testdriver.js"></script> 7 <script src="/resources/testdriver-vendor.js"></script> 8 <script src="/resources/testharness.js"></script> 9 <script src="/resources/testharnessreport.js"></script> 10 11 <script> 12 async function getAudioTrackFromFile(filePath) { 13 const audioContext = new AudioContext(); 14 const response = await fetch(filePath); 15 const arrayBuffer = await response.arrayBuffer(); 16 const audioBuffer = await audioContext.decodeAudioData(arrayBuffer); 17 const source = audioContext.createBufferSource(); 18 source.buffer = audioBuffer; 19 20 const destination = audioContext.createMediaStreamDestination(); 21 source.connect(destination); 22 source.start(); 23 24 return destination.stream.getAudioTracks()[0]; 25 } 26 27 promise_test(async (t) => { 28 window.SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; 29 30 // Install en-US for on-device speech recognition. 31 const installOptions = { langs: ["en-US"], processLocally: true }; 32 const installPromise = test_driver.bless( 33 "Install on-device en-US speech recognition", 34 () => SpeechRecognition.install(installOptions) 35 ); 36 assert_true( 37 installPromise instanceof Promise, 38 "SpeechRecognition.install() should return a Promise." 39 ); 40 const installResult = await installPromise; 41 assert_true( 42 installResult, 43 "SpeechRecognition.install() for en-US should resolve with true." 44 ); 45 46 // Verify the audio track for recognition context exists. 47 const audioTrack = await getAudioTrackFromFile("/media/recognition_context.mp3"); 48 assert_true( 49 audioTrack instanceof MediaStreamTrack, 50 "Audio track should be a valid MediaStreamTrack" 51 ); 52 53 // Create the first speech recognition with a mode that does not support contextual biasing. 54 // Note that this may vary between browsers in the future. 55 const recognition1 = new SpeechRecognition(); 56 recognition1.processLocally = false; 57 recognition1.lang = "en-US"; 58 59 recognition1.onerror = function(event) { 60 assert_equals( 61 event.error, 62 "phrases-not-supported", 63 "First speech recognition should throw a phrases-not-supported error" 64 ); 65 }; 66 67 recognition1.phrases.push(new SpeechRecognitionPhrase("test", 1.0)); 68 69 // Create the second speech recognition with a mode that supports contextual biasing. 70 const recognition2 = new SpeechRecognition(); 71 recognition2.processLocally = true; 72 recognition2.lang = "en-US"; 73 74 // On-device speech recognition should not throw an error. 75 recognition2.onerror = t.unreached_func("recognition2 should not error"); 76 77 recognition2.phrases = [ 78 new SpeechRecognitionPhrase("ASIC", 3.0), 79 new SpeechRecognitionPhrase("FPGA", 3.0) 80 ]; 81 82 const recognitionPromise = new Promise((resolve) => { 83 recognition2.onresult = (event) => { 84 const transcript = event.results[0][0].transcript; 85 const words = transcript.toLowerCase().split(' '); 86 // Resolve when the last word is "expectations". 87 if (words.length > 0 && words[words.length - 1] === "expectations") { 88 resolve(transcript); 89 } 90 }; 91 }); 92 recognition2.start(audioTrack); 93 94 const transcript = await recognitionPromise; 95 assert_equals( 96 transcript.toLowerCase(), 97 "the report confirmed that the asic's throughput and " + 98 "the fpga's latency were both below expectations", 99 "Second speech recognition should correctly recognize the phrases" 100 ); 101 }, "SpeechRecognition should recognize speech with the given contextual information."); 102 </script> 103 </html>