tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

VideoTrackGenerator.https.html (12965B)


      1 <!DOCTYPE html>
      2 <html>
      3 <head>
      4 <title>MediaStream Insertable Streams - VideoTrackGenerator</title>
      5 <script src="/resources/testharness.js"></script>
      6 <script src="/resources/testharnessreport.js"></script>
      7 <script src="/webrtc/RTCPeerConnection-helper.js"></script>
      8 </head>
      9 <body>
     10  <p class="instructions">If prompted, use the accept button to give permission to use your audio and video devices.</p>
     11  <h1 class="instructions">Description</h1>
     12  <p class="instructions">This test checks that generating video MediaStreamTracks from VideoTrackGenerator works as expected.</p>
     13  <script>
     14 
     15    const pixelColour = [50, 100, 150, 255];
     16    const height = 240;
     17    const width = 320;
     18    function makeVideoFrame(timestamp) {
     19      const canvas = new OffscreenCanvas(width, height);
     20 
     21      const ctx = canvas.getContext('2d', {alpha: false});
     22      ctx.fillStyle = `rgba(${pixelColour.join()})`;
     23      ctx.fillRect(0, 0, width, height);
     24 
     25      return new VideoFrame(canvas, {timestamp, alpha: 'discard'});
     26    }
     27 
     28    async function getVideoFrame() {
     29      const stream = await getNoiseStream({video: true});
     30      const input_track = stream.getTracks()[0];
     31      const processor = new MediaStreamTrackProcessor(input_track);
     32      const reader = processor.readable.getReader();
     33      const result = await reader.read();
     34      input_track.stop();
     35      return result.value;
     36    }
     37 
     38    function assertPixel(t, bytes, expected, epsilon = 5) {
     39      for (let i = 0; i < bytes.length; i++) {
     40        t.step(() => {
     41          assert_less_than(Math.abs(bytes[i] - expected[i]),  epsilon, "Mismatched pixel");
     42        });
     43      }
     44    }
     45 
     46    async function initiateSingleTrackCall(t, track, output) {
     47      const caller = new RTCPeerConnection();
     48      t.add_cleanup(() => caller.close());
     49      const callee = new RTCPeerConnection();
     50      t.add_cleanup(() => callee.close());
     51      caller.addTrack(track);
     52      t.add_cleanup(() => track.stop());
     53 
     54      exchangeIceCandidates(caller, callee);
     55      // Wait for the first track.
     56      const e = await exchangeOfferAndListenToOntrack(t, caller, callee);
     57      output.srcObject = new MediaStream([e.track]);
     58      // Exchange answer.
     59      await exchangeAnswer(caller, callee);
     60      await waitForConnectionStateChange(callee, ['connected']);
     61    }
     62 
     63    promise_test(async t => {
     64      const videoFrame = await getVideoFrame();
     65      const originalWidth = videoFrame.displayWidth;
     66      const originalHeight = videoFrame.displayHeight;
     67      const originalTimestamp = videoFrame.timestamp;
     68      const generator = new VideoTrackGenerator();
     69      t.add_cleanup(() => generator.track.stop());
     70 
     71      // Use a MediaStreamTrackProcessor as a sink for |generator| to verify
     72      // that |processor| actually forwards the frames written to its writable
     73      // field.
     74      const processor = new MediaStreamTrackProcessor(generator);
     75      const reader = processor.readable.getReader();
     76      const readerPromise = new Promise(async resolve => {
     77        const result = await reader.read();
     78        assert_equals(result.value.displayWidth, originalWidth);
     79        assert_equals(result.value.displayHeight, originalHeight);
     80        assert_equals(result.value.timestamp, originalTimestamp);
     81        resolve();
     82      });
     83 
     84      generator.writable.getWriter().write(videoFrame);
     85      return readerPromise;
     86    }, 'Tests that VideoTrackGenerator forwards frames to sink');
     87 
     88    promise_test(async t => {
     89      const videoFrame = makeVideoFrame(1);
     90      const originalWidth = videoFrame.displayWidth;
     91      const originalHeight = videoFrame.displayHeight;
     92      const generator = new VideoTrackGenerator();
     93      t.add_cleanup(() => generator.track.stop());
     94 
     95      const video = document.createElement("video");
     96      video.autoplay = true;
     97      video.width = 320;
     98      video.height = 240;
     99      video.srcObject = new MediaStream([generator.track]);
    100      video.play();
    101 
    102      // Wait for the video element to be connected to the generator and
    103      // generate the frame.
    104      video.onloadstart = () => generator.writable.getWriter().write(videoFrame);
    105 
    106      return new Promise((resolve)=> {
    107        video.ontimeupdate = t.step_func(() => {
    108          const canvas = document.createElement("canvas");
    109          canvas.width = originalWidth;
    110          canvas.height = originalHeight;
    111          const context = canvas.getContext('2d');
    112          context.drawImage(video, 0, 0);
    113          // Pick a pixel in the centre of the video and check that it has the colour of the frame provided.
    114          const pixel = context.getImageData(videoFrame.displayWidth/2, videoFrame.displayHeight/2, 1, 1);
    115          assertPixel(t, pixel.data, pixelColour);
    116          resolve();
    117        });
    118      });
    119    }, 'Tests that frames are actually rendered correctly in a stream used for a video element.');
    120 
    121    promise_test(async t => {
    122      const generator = new VideoTrackGenerator();
    123      t.add_cleanup(() => generator.track.stop());
    124 
    125      // Write frames for the duration of the test.
    126      const writer = generator.writable.getWriter();
    127      let timestamp = 0;
    128      const intervalId = setInterval(
    129          t.step_func(async () => {
    130            if (generator.track.readyState === 'live') {
    131              timestamp++;
    132              await writer.write(makeVideoFrame(timestamp));
    133            }
    134          }),
    135          40);
    136      t.add_cleanup(() => clearInterval(intervalId));
    137 
    138      const video = document.createElement('video');
    139      video.autoplay = true;
    140      video.width = width;
    141      video.height = height;
    142      video.muted = true;
    143 
    144      await initiateSingleTrackCall(t, generator.track, video);
    145 
    146      return new Promise(resolve => {
    147        video.ontimeupdate = t.step_func(() => {
    148          const canvas = document.createElement('canvas');
    149          canvas.width = width;
    150          canvas.height = height;
    151          const context = canvas.getContext('2d');
    152          context.drawImage(video, 0, 0);
    153          // Pick a pixel in the centre of the video and check that it has the
    154          // colour of the frame provided.
    155          const pixel = context.getImageData(width / 2, height / 2, 1, 1);
    156          // Encoding/decoding can add noise, so increase the threshhold to 8.
    157          assertPixel(t, pixel.data, pixelColour, 8);
    158          resolve();
    159        });
    160      });
    161    }, 'Tests that frames are actually rendered correctly in a stream sent over a peer connection.');
    162 
    163 
    164    promise_test(async t => {
    165      const generator = new VideoTrackGenerator();
    166      t.add_cleanup(() => generator.track.stop());
    167 
    168      const inputCanvas = new OffscreenCanvas(width, height);
    169 
    170      const inputContext = inputCanvas.getContext('2d', {alpha: false});
    171      // draw four quadrants
    172      const colorUL = [255, 0, 0, 255];
    173      inputContext.fillStyle = `rgba(${colorUL.join()})`;
    174      inputContext.fillRect(0, 0, width / 2, height / 2);
    175      const colorUR = [255, 255, 0, 255];
    176      inputContext.fillStyle = `rgba(${colorUR.join()})`;
    177      inputContext.fillRect(width / 2, 0, width / 2, height / 2);
    178      const colorLL = [0, 255, 0, 255];
    179      inputContext.fillStyle = `rgba(${colorLL.join()})`;
    180      inputContext.fillRect(0, height / 2, width / 2, height / 2);
    181      const colorLR = [0, 255, 255, 255];
    182      inputContext.fillStyle = `rgba(${colorLR.join()})`;
    183      inputContext.fillRect(width / 2, height / 2, width / 2, height / 2);
    184 
    185      // Write frames for the duration of the test.
    186      const writer = generator.writable.getWriter();
    187      let timestamp = 0;
    188      const intervalId = setInterval(
    189          t.step_func(async () => {
    190            if (generator.track.readyState === 'live') {
    191              timestamp++;
    192              await writer.write(new VideoFrame(
    193                  inputCanvas, {timestamp: timestamp, alpha: 'discard'}));
    194            }
    195          }),
    196          40);
    197      t.add_cleanup(() => clearInterval(intervalId));
    198 
    199      const caller = new RTCPeerConnection();
    200      t.add_cleanup(() => caller.close());
    201      const callee = new RTCPeerConnection();
    202      t.add_cleanup(() => callee.close());
    203      const sender = caller.addTrack(generator.track);
    204 
    205      exchangeIceCandidates(caller, callee);
    206      // Wait for the first track.
    207      const e = await exchangeOfferAndListenToOntrack(t, caller, callee);
    208 
    209      // Exchange answer.
    210      await exchangeAnswer(caller, callee);
    211      await waitForConnectionStateChange(callee, ['connected']);
    212      const params = sender.getParameters();
    213      params.encodings.forEach(e => e.scaleResolutionDownBy = 2);
    214      sender.setParameters(params);
    215 
    216      const processor = new MediaStreamTrackProcessor(e.track);
    217      const reader = processor.readable.getReader();
    218 
    219      // The first frame may not have had scaleResolutionDownBy applied
    220      const numTries = 5;
    221      for (let i = 1; i <= numTries; i++) {
    222        const {value: outputFrame} = await reader.read();
    223        if (outputFrame.displayWidth !== width / 2) {
    224          assert_less_than(i, numTries, `First ${numTries} frames were the wrong size.`);
    225          outputFrame.close();
    226          continue;
    227        }
    228 
    229        assert_equals(outputFrame.displayWidth, width / 2);
    230        assert_equals(outputFrame.displayHeight, height / 2);
    231 
    232        const outputCanvas = new OffscreenCanvas(width / 2, height / 2);
    233        const outputContext = outputCanvas.getContext('2d', {alpha: false});
    234        outputContext.drawImage(outputFrame, 0, 0);
    235        outputFrame.close();
    236        // Check the four quadrants
    237        const pixelUL = outputContext.getImageData(width / 8, height / 8, 1, 1);
    238        assertPixel(t, pixelUL.data, colorUL);
    239        const pixelUR =
    240            outputContext.getImageData(width * 3 / 8, height / 8, 1, 1);
    241        assertPixel(t, pixelUR.data, colorUR);
    242        const pixelLL =
    243            outputContext.getImageData(width / 8, height * 3 / 8, 1, 1);
    244        assertPixel(t, pixelLL.data, colorLL);
    245        const pixelLR =
    246            outputContext.getImageData(width * 3 / 8, height * 3 / 8, 1, 1);
    247        assertPixel(t, pixelLR.data, colorLR);
    248        break;
    249      }
    250    }, 'Tests that frames are sent correctly with RTCRtpEncodingParameters.scaleResolutionDownBy.');
    251 
    252    promise_test(async t => {
    253      const generator = new VideoTrackGenerator();
    254      t.add_cleanup(() => generator.track.stop());
    255 
    256      const writer = generator.writable.getWriter();
    257      const frame = makeVideoFrame(1);
    258      await writer.write(frame);
    259 
    260      assert_equals(generator.track.kind, "video");
    261      assert_equals(generator.track.readyState, "live");
    262    }, "Tests that creating a VideoTrackGenerator works as expected");
    263 
    264    promise_test(async t => {
    265      const generator = new VideoTrackGenerator();
    266      t.add_cleanup(() => generator.track.stop());
    267 
    268      const writer = generator.writable.getWriter();
    269      const frame = makeVideoFrame(1);
    270      await writer.write(frame);
    271 
    272      assert_throws_dom("InvalidStateError", () => frame.clone(), "VideoFrame wasn't destroyed on write.");
    273    }, "Tests that VideoFrames are destroyed on write.");
    274 
    275    promise_test(async t => {
    276      const generator = new VideoTrackGenerator();
    277      t.add_cleanup(() => generator.track.stop());
    278 
    279      const writer = generator.writable.getWriter();
    280      const frame = makeVideoFrame(1);
    281      assert_throws_js(TypeError, writer.write(frame));
    282    }, "Mismatched frame and generator kind throws on write.");
    283 
    284  promise_test(async t => {
    285      const generator = new VideoTrackGenerator();
    286    t.add_cleanup(() => generator.track.stop());
    287 
    288    // Use a MediaStreamTrackProcessor as a sink for |generator| to verify
    289    // that |processor| actually forwards the frames written to its writable
    290    // field.
    291    const processor = new MediaStreamTrackProcessor(generator.track);
    292    const reader = processor.readable.getReader();
    293    const videoFrame = makeVideoFrame(1);
    294 
    295    const writer = generator.writable.getWriter();
    296    const videoFrame1 = makeVideoFrame(1);
    297    writer.write(videoFrame1);
    298    const result1 = await reader.read();
    299    assert_equals(result1.value.timestamp, 1);
    300    generator.muted = true;
    301 
    302    // This frame is expected to be discarded.
    303    const videoFrame2 = makeVideoFrame(2);
    304    writer.write(videoFrame2);
    305    generator.muted = false;
    306 
    307    const videoFrame3 = makeVideoFrame(3);
    308    writer.write(videoFrame3);
    309    const result3 = await reader.read();
    310    assert_equals(result3.value.timestamp, 3);
    311 
    312    // Set up a read ahead of time, then mute, enqueue and unmute.
    313    const promise5 = reader.read();
    314    generator.muted = true;
    315    writer.write(makeVideoFrame(4)); // Expected to be discarded.
    316    generator.muted = false;
    317    writer.write(makeVideoFrame(5));
    318    const result5 = await promise5;
    319    assert_equals(result5.value.timestamp, 5);
    320  }, 'Tests that VideoTrackGenerator forwards frames only when unmuted');
    321 
    322  // Note - tests for mute/unmute events will be added once
    323  // https://github.com/w3c/mediacapture-transform/issues/81 is resolved
    324 
    325  </script>
    326 </body>
    327 </html>