single-face-detection.https.html (10630B)
1 <!DOCTYPE html> 2 <script src="/resources/testharness.js"></script> 3 <script src="/resources/testharnessreport.js"></script> 4 <script src="resources/single-detection-helpers.js"></script> 5 <body> 6 </body> 7 <script> 8 const imageTests = { 9 center: { 10 name: "face-center.jpg", 11 face: {boundingBox: {left: 312, right: 512, top: 238, bottom: 438}, fuzziness: 25}, 12 mouth: {position: {x: 414, y: 379}, fuzzinessX: 30, fuzzinessY: 20}, 13 leftEye: {position: {x: 378, y: 293}, fuzzinessX: 20, fuzzinessY: 10}, 14 rightEye: {position: {x: 448, y: 292}, fuzzinessX: 20, fuzzinessY: 10}, 15 nose: {position: {x: 412, y: 335}, fuzzinessX: 20, fuzzinessY: 35}}, 16 bottomLeft: { 17 name: "face-bottom-left.jpg", 18 face: {boundingBox: {left: 96, right: 387, top: 281, bottom: 572}, fuzziness: 15}, 19 mouth: {position: {x: 248, y: 483}, fuzzinessX: 45, fuzzinessY: 25}, 20 leftEye: {position: {x: 196, y: 359}, fuzzinessX: 25, fuzzinessY: 10}, 21 rightEye: {position: {x: 296, y: 357}, fuzzinessX: 25, fuzzinessY: 10}, 22 nose: {position: {x: 244, y: 419}, fuzzinessX: 30, fuzzinessY: 50}}, 23 bottomRight: { 24 name: "face-bottom-right.jpg", 25 face: {boundingBox: {left: 445, right: 733, top: 284, bottom: 572}, fuzziness: 10}, 26 mouth: {position: {x: 593, y: 487}, fuzzinessX: 45, fuzzinessY: 25}, 27 leftEye: {position: {x: 542, y: 363}, fuzzinessX: 25, fuzzinessY: 10}, 28 rightEye: {position: {x: 641, y: 361}, fuzzinessX: 25, fuzzinessY: 10}, 29 nose: {position: {x: 590, y: 423}, fuzzinessX: 30, fuzzinessY: 50}}, 30 topLeft: { 31 name: "face-top-left.jpg", 32 face: {boundingBox: {left: 101, right: 387, top: 119, bottom: 405}, fuzziness: 10}, 33 mouth: {position: {x: 246, y: 322}, fuzzinessX: 45, fuzzinessY: 25}, 34 leftEye: {position: {x: 194, y: 198}, fuzzinessX: 25, fuzzinessY: 10}, 35 rightEye: {position: {x: 295, y: 196}, fuzzinessX: 25, fuzzinessY: 10}, 36 nose: {position: {x: 243, y: 258}, fuzzinessX: 30, fuzzinessY: 50}}, 37 topRight: { 38 name: "face-top-right.jpg", 39 face: {boundingBox: {left: 451, right: 735, top: 124, bottom: 408}, fuzziness: 10}, 40 mouth: {position: {x: 594, y: 326}, fuzzinessX: 45, fuzzinessY: 25}, 41 leftEye: {position: {x: 542, y: 202}, fuzzinessX: 25, fuzzinessY: 10}, 42 rightEye: {position: {x: 642, y: 200}, fuzzinessX: 25, fuzzinessY: 10}, 43 nose: {position: {x: 591, y: 261}, fuzzinessX: 30, fuzzinessY: 50}}}; 44 45 const videoTests = { 46 "faces.mov": [ 47 {time: 0.5, test: imageTests.center}, 48 {time: 1.5, test: imageTests.bottomLeft}, 49 {time: 2.5, test: imageTests.bottomRight}, 50 {time: 3.5, test: imageTests.topLeft}, 51 {time: 4.5, test: imageTests.topRight}]}; 52 53 // All the fields in FaceDetectorOptions are hints, so they can't be tested. 54 const faceDetector = new FaceDetector(); 55 56 async function testImage(imageBitmapSource, test) { 57 const detectedFaces = await faceDetector.detect(imageBitmapSource); 58 assert_equals(detectedFaces.length, 1); 59 const detectedFace = detectedFaces[0]; 60 checkBoundingBox(detectedFace.boundingBox, test.face.boundingBox, test.face.fuzziness); 61 if (detectedFace.landmarks) { 62 var mouthCount = 0; 63 var eyeCount = 0; 64 var noseCount = 0; 65 for (landmark of detectedFace.landmarks) { 66 checkPointsLieWithinBoundingBox(landmark.locations, detectedFace.boundingBox); 67 switch (landmark.type) { 68 case "mouth": 69 checkPointsAreNear(landmark.locations, test.mouth.position, test.mouth.fuzzinessX, test.mouth.fuzzinessY); 70 ++mouthCount; 71 break; 72 case "eye": 73 // handled below 74 ++eyeCount; 75 break; 76 case "nose": 77 checkPointsAreNear(landmark.locations, test.nose.position, test.nose.fuzzinessX, test.nose.fuzzinessY); 78 ++noseCount; 79 break; 80 default: 81 assert(false); 82 } 83 } 84 assert_less_than_equal(mouthCount, 1); 85 assert_true(eyeCount == 0 || eyeCount == 2, "There should be 2 eyes (or the implementation doesn't support detecting eyes)"); 86 assert_less_than_equal(noseCount, 1); 87 88 const [leftEye, rightEye] = detectedFace.landmarks.filter(landmark => landmark.type == "eye").toSorted(function(landmarkA, landmarkB) { 89 // The left eye has a smaller X coordinate than the right eye. 90 const locationsA = landmarkA.locations.map(location => location.x); 91 const locationsB = landmarkB.locations.map(location => location.x); 92 const locationA = locationsA.reduce((a, b) => a + b) / locationsA.length; 93 const locationB = locationsB.reduce((a, b) => a + b) / locationsB.length; 94 return locationA - locationB; 95 }); 96 checkPointsAreNear(leftEye.locations, test.leftEye.position, test.leftEye.fuzzinessX, test.leftEye.fuzzinessY); 97 checkPointsAreNear(rightEye.locations, test.rightEye.position, test.rightEye.fuzzinessX, test.rightEye.fuzzinessY); 98 } 99 } 100 101 promise_test(async t => { 102 for (const [key, imageTest] of Object.entries(imageTests)) { 103 const imageElement = document.createElement("img"); 104 imageElement.src = `resources/${imageTest.name}`; 105 await imageLoadedPromise(imageElement); 106 assert_true(imageElement.complete, "Image element should have loaded successfully"); 107 await testImage(imageElement, imageTest); 108 } 109 }, "HTMLImageElement"); 110 111 // Intentionally don't test SVGImageElement. The spec https://html.spec.whatwg.org/multipage/canvas.html#canvasimagesource says it's supposed to be 112 // a CanvasImageSource, but neither WebKit nor Blink actually seem to implement that. 113 114 promise_test(async t => { 115 for (const [name, tests] of Object.entries(videoTests)) { 116 const videoElement = document.createElement("video"); 117 document.body.appendChild(videoElement); 118 videoElement.src = `resources/${name}`; 119 const loadedPromise = videoLoadedPromise(videoElement); 120 videoElement.load(); 121 await loadedPromise; 122 for (const test of tests) { 123 await seekTo(videoElement, test.time); 124 await testImage(videoElement, test.test); 125 } 126 document.body.removeChild(videoElement); 127 } 128 }, "HTMLVideoElement"); 129 130 promise_test(async t => { 131 for (const [key, imageTest] of Object.entries(imageTests)) { 132 const imageElement = document.createElement("img"); 133 imageElement.src = `resources/${imageTest.name}`; 134 await imageLoadedPromise(imageElement); 135 assert_true(imageElement.complete, "Image element should have loaded successfully"); 136 const canvasElement = document.createElement("canvas"); 137 canvasElement.width = imageElement.width; 138 canvasElement.height = imageElement.height; 139 const context = canvasElement.getContext("2d"); 140 context.drawImage(imageElement, 0, 0); 141 await testImage(canvasElement, imageTest); 142 } 143 }, "HTMLCanvasElement"); 144 145 promise_test(async t => { 146 for (const [key, imageTest] of Object.entries(imageTests)) { 147 const imageElement = document.createElement("img"); 148 imageElement.src = `resources/${imageTest.name}`; 149 await imageLoadedPromise(imageElement); 150 assert_true(imageElement.complete, "Image element should have loaded successfully"); 151 const imageBitmap = await createImageBitmap(imageElement); 152 await testImage(imageBitmap, imageTest); 153 } 154 }, "ImageBitmap"); 155 156 promise_test(async t => { 157 for (const [key, imageTest] of Object.entries(imageTests)) { 158 const imageElement = document.createElement("img"); 159 imageElement.src = `resources/${imageTest.name}`; 160 await imageLoadedPromise(imageElement); 161 assert_true(imageElement.complete, "Image element should have loaded successfully"); 162 const offscreenCanvas = new OffscreenCanvas(imageElement.width, imageElement.height); 163 const context = offscreenCanvas.getContext("2d"); 164 context.drawImage(imageElement, 0, 0); 165 await testImage(offscreenCanvas, imageTest); 166 } 167 }, "OffscreenCanvas"); 168 169 promise_test(async t => { 170 for (const [name, tests] of Object.entries(videoTests)) { 171 const videoElement = document.createElement("video"); 172 document.body.appendChild(videoElement); 173 videoElement.src = `resources/${name}`; 174 const loadedPromise = videoLoadedPromise(videoElement); 175 videoElement.load(); 176 await loadedPromise; 177 for (const test of tests) { 178 await seekTo(videoElement, test.time); 179 const videoFrame = new VideoFrame(videoElement); 180 await testImage(videoFrame, test.test); 181 videoFrame.close(); 182 } 183 document.body.removeChild(videoElement); 184 } 185 }, "VideoFrame"); 186 187 promise_test(async t => { 188 for (const [key, imageTest] of Object.entries(imageTests)) { 189 const imageElement = document.createElement("img"); 190 imageElement.src = `resources/${imageTest.name}`; 191 await imageLoadedPromise(imageElement); 192 assert_true(imageElement.complete, "Image element should have loaded successfully"); 193 const canvasElement = document.createElement("canvas"); 194 canvasElement.width = imageElement.width; 195 canvasElement.height = imageElement.height; 196 const context = canvasElement.getContext("2d"); 197 context.drawImage(imageElement, 0, 0); 198 const blob = await new Promise(function(resolve, reject) { 199 canvasElement.toBlob(function(blob) { 200 return resolve(blob); 201 }); 202 }); 203 await testImage(blob, imageTest); 204 } 205 }, "Blob"); 206 207 promise_test(async t => { 208 for (const [key, imageTest] of Object.entries(imageTests)) { 209 const imageElement = document.createElement("img"); 210 imageElement.src = `resources/${imageTest.name}`; 211 await imageLoadedPromise(imageElement); 212 assert_true(imageElement.complete, "Image element should have loaded successfully"); 213 const canvasElement = document.createElement("canvas"); 214 canvasElement.width = imageElement.width; 215 canvasElement.height = imageElement.height; 216 const context = canvasElement.getContext("2d"); 217 context.drawImage(imageElement, 0, 0); 218 const imageData = context.getImageData(0, 0, canvasElement.width, canvasElement.height); 219 await testImage(imageData, imageTest); 220 } 221 }, "ImageData"); 222 223 </script>