audiobuffersource-start.html (6017B)
1 <!DOCTYPE html> 2 <html> 3 <head> 4 <title> 5 audiobuffersource-start.html 6 </title> 7 <script src="/resources/testharness.js"></script> 8 <script src="/resources/testharnessreport.js"></script> 9 <script src="/webaudio/resources/audit-util.js"></script> 10 <script src="/webaudio/resources/audiobuffersource-testing.js"></script> 11 </head> 12 <body> 13 <script id="layout-test-code"> 14 15 // The following test cases assume an AudioBuffer of length 8 whose PCM 16 // data is a linear ramp, 0, 1, 2, 3,... 17 18 const tests = [ 19 20 { 21 description: 22 'start(when): implicitly play whole buffer from beginning to end', 23 offsetFrame: 'none', 24 durationFrames: 'none', 25 renderFrames: 16, 26 playbackRate: 1, 27 expected: [0, 1, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0] 28 }, 29 30 { 31 description: 32 'start(when, 0): play whole buffer from beginning to end explicitly giving offset of 0', 33 offsetFrame: 0, 34 durationFrames: 'none', 35 renderFrames: 16, 36 playbackRate: 1, 37 expected: [0, 1, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0] 38 }, 39 40 { 41 description: 42 'start(when, 0, 8_frames): play whole buffer from beginning to end explicitly giving offset of 0 and duration of 8 frames', 43 offsetFrame: 0, 44 durationFrames: 8, 45 renderFrames: 16, 46 playbackRate: 1, 47 expected: [0, 1, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0] 48 }, 49 50 { 51 description: 52 'start(when, 4_frames): play with explicit non-zero offset', 53 offsetFrame: 4, 54 durationFrames: 'none', 55 renderFrames: 16, 56 playbackRate: 1, 57 expected: [4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] 58 }, 59 60 { 61 description: 62 'start(when, 4_frames, 4_frames): play with explicit non-zero offset and duration', 63 offsetFrame: 4, 64 durationFrames: 4, 65 renderFrames: 16, 66 playbackRate: 1, 67 expected: [4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] 68 }, 69 70 { 71 description: 72 'start(when, 7_frames): play with explicit non-zero offset near end of buffer', 73 offsetFrame: 7, 74 durationFrames: 1, 75 renderFrames: 16, 76 playbackRate: 1, 77 expected: [7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] 78 }, 79 80 { 81 description: 82 'start(when, 8_frames): play with explicit offset at end of buffer', 83 offsetFrame: 8, 84 durationFrames: 0, 85 renderFrames: 16, 86 playbackRate: 1, 87 expected: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] 88 }, 89 90 { 91 description: 92 'start(when, 9_frames): play with explicit offset past end of buffer', 93 offsetFrame: 8, 94 durationFrames: 0, 95 renderFrames: 16, 96 playbackRate: 1, 97 expected: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] 98 }, 99 100 // When the duration exceeds the buffer, just play to the end of the 101 // buffer. (This is different from the case when we're looping, which is 102 // tested in loop-comprehensive.) 103 { 104 description: 105 'start(when, 0, 15_frames): play with whole buffer, with long duration (clipped)', 106 offsetFrame: 0, 107 durationFrames: 15, 108 renderFrames: 16, 109 playbackRate: 1, 110 expected: [0, 1, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0] 111 }, 112 113 // Enable test when AudioBufferSourceNode hack is fixed: 114 // https://bugs.webkit.org/show_bug.cgi?id=77224 { description: 115 // "start(when, 3_frames, 3_frames): play a middle section with explicit 116 // offset and duration", 117 // offsetFrame: 3, durationFrames: 3, renderFrames: 16, playbackRate: 118 // 1, expected: [4,5,6,7,0,0,0,0,0,0,0,0,0,0,0,0] }, 119 120 ]; 121 122 const sampleRate = 44100; 123 const bufferFrameLength = 8; 124 const testSpacingFrames = 32; 125 const testSpacingSeconds = testSpacingFrames / sampleRate; 126 const totalRenderLengthFrames = tests.length * testSpacingFrames; 127 128 promise_test(async t => { 129 const context = new OfflineAudioContext( 130 /* channels */ 1, 131 /* length */ totalRenderLengthFrames, 132 /* rate */ sampleRate); 133 134 const buffer = createTestBuffer(context, bufferFrameLength); 135 136 // Schedule each scenario at a unique, non‑overlapping time offset. 137 tests.forEach((test, index) => { 138 const source = new AudioBufferSourceNode(context); 139 source.buffer = buffer; 140 source.playbackRate.value = test.playbackRate; 141 source.connect(context.destination); 142 143 const startTime = index * testSpacingSeconds; 144 145 if (test.offsetFrame === 'none' && test.durationFrames === 'none') { 146 source.start(startTime); 147 } else if (test.durationFrames === 'none') { 148 source.start(startTime, test.offsetFrame / sampleRate); 149 } else { 150 source.start( 151 startTime, 152 test.offsetFrame / sampleRate, 153 test.durationFrames / sampleRate); 154 } 155 }); 156 157 const rendered = await context.startRendering(); 158 const renderedData = rendered.getChannelData(0); 159 160 // Validate every scenario’s rendered segment. 161 tests.forEach((test, index) => { 162 const begin = index * testSpacingFrames; 163 const end = begin + test.renderFrames; 164 const actual = renderedData.slice(begin, end); 165 166 assert_array_equals( 167 actual, 168 test.expected, 169 `${test.description} – rendered output matches expectation`); 170 }); 171 }, 'AudioBufferSourceNode start() – sub‑sample scheduling semantics'); 172 </script> 173 </body> 174 </html>