measure.html (16936B)
1 <!DOCTYPE html> 2 <html> 3 <head> 4 <meta charset="UTF-8" /> 5 <title>window.performance User Timing measure() method is working properly</title> 6 <link rel="author" title="Microsoft" href="http://www.microsoft.com/" /> 7 <link rel="help" href="https://w3c.github.io/user-timing/#dom-performance-measure"/> 8 <script src="/resources/testharness.js"></script> 9 <script src="/resources/testharnessreport.js"></script> 10 <script src="/common/performance-timeline-utils.js"></script> 11 <script src="resources/webperftestharness.js"></script> 12 13 <script> 14 // test data 15 var startMarkName = "mark_start"; 16 var startMarkValue; 17 var endMarkName = "mark_end"; 18 var endMarkValue; 19 var measures; 20 var testThreshold = 20; 21 22 // test measures 23 var measureTestDelay = 200; 24 var TEST_MEASURES = 25 [ 26 { 27 name: "measure_no_start_no_end", 28 startMark: undefined, 29 endMark: undefined, 30 startTime: undefined, 31 duration: undefined, 32 entryType: "measure", 33 entryMatch: undefined, 34 order: undefined, 35 found: false 36 }, 37 { 38 name: "measure_start_no_end", 39 startMark: "mark_start", 40 endMark: undefined, 41 startTime: undefined, 42 duration: undefined, 43 entryType: "measure", 44 entryMatch: undefined, 45 order: undefined, 46 found: false 47 }, 48 { 49 name: "measure_start_end", 50 startMark: "mark_start", 51 endMark: "mark_end", 52 startTime: undefined, 53 duration: undefined, 54 entryType: "measure", 55 entryMatch: undefined, 56 order: undefined, 57 found: false 58 }, 59 { 60 name: "measure_no_start_end", 61 startMark: undefined, 62 endMark: "mark_end", 63 startTime: undefined, 64 duration: undefined, 65 entryType: "measure", 66 entryMatch: undefined, 67 order: undefined, 68 found: false 69 }, 70 // intentional duplicate of the first measure, used to confirm names can be re-used 71 { 72 name: "measure_no_start_no_end", 73 startMark: undefined, 74 endMark: undefined, 75 startTime: undefined, 76 duration: undefined, 77 entryType: "measure", 78 entryMatch: undefined, 79 order: undefined, 80 found: false 81 } 82 ]; 83 // the index of the duplicate "measure_no_start_no_end" 84 const duplicate_index = TEST_MEASURES.map(m=>m.name).lastIndexOf('measure_no_start_no_end'); 85 86 setup({explicit_done: true}); 87 88 test_namespace(); 89 90 function onload_test() 91 { 92 // test for existence of User Timing and Performance Timeline interface 93 if (!has_required_interfaces()) 94 { 95 test_true(false, 96 "The User Timing and Performance Timeline interfaces, which are required for this test, " + 97 "are defined."); 98 99 done(); 100 } 101 else 102 { 103 // create the start mark for the test measures 104 window.performance.mark(startMarkName); 105 106 // get the start mark's value 107 startMarkValue = window.performance.getEntriesByName(startMarkName)[0].startTime; 108 109 // create the test end mark using the test delay; this will allow for a significant difference between 110 // the mark values that should be represented in the duration of measures using these marks 111 step_timeout(measure_test_cb, measureTestDelay); 112 } 113 } 114 115 function measure_test_cb() 116 { 117 // create the end mark for the test measures 118 window.performance.mark(endMarkName); 119 120 // get the end mark's value 121 endMarkValue = window.performance.getEntriesByName(endMarkName)[0].startTime; 122 123 // loop through all measure scenarios and create the corresponding measures 124 for (var i in TEST_MEASURES) 125 { 126 var scenario = TEST_MEASURES[i]; 127 128 if (scenario.startMark == undefined && scenario.endMark == undefined) 129 { 130 // both startMark and endMark are undefined, don't provide either parameters 131 window.performance.measure(scenario.name); 132 133 // when startMark isn't provided to the measure() call, a DOMHighResTimeStamp corresponding 134 // to the navigationStart attribute with a timebase of the same attribute is used; this is 135 // equivalent to 0 136 scenario.startTime = 0; 137 138 // when endMark isn't provided to the measure() call, a DOMHighResTimeStamp corresponding to 139 // the current time with a timebase of the navigationStart attribute is used 140 scenario.duration = (new Date()) - window.performance.timing.navigationStart; 141 } 142 else if (scenario.startMark != undefined && scenario.endMark == undefined) 143 { 144 // only startMark is defined, provide startMark and don't provide endMark 145 window.performance.measure(scenario.name, scenario.startMark); 146 147 // when startMark is provided to the measure() call, the value of the mark whose name is 148 // provided is used for the startMark 149 scenario.startTime = startMarkValue; 150 151 // when endMark isn't provided to the measure() call, a DOMHighResTimeStamp corresponding to 152 // the current time with a timebase of the navigationStart attribute is used 153 scenario.duration = window.performance.now() - 154 startMarkValue; 155 } 156 else if (scenario.startMark != undefined && scenario.endMark != undefined) 157 { 158 // both startMark and endMark are defined, provide both parameters 159 window.performance.measure(scenario.name, scenario.startMark, scenario.endMark); 160 161 // when startMark is provided to the measure() call, the value of the mark whose name is 162 // provided is used for the startMark 163 scenario.startTime = startMarkValue; 164 165 // when endMark is provided to the measure() call, the value of the mark whose name is 166 // provided is used for the endMark 167 scenario.duration = endMarkValue - startMarkValue; 168 } 169 else if (scenario.startMark == undefined && scenario.endMark != undefined) 170 { 171 // endMark is defined but startMark is undefined, provide both parameters 172 window.performance.measure(scenario.name, scenario.startMark, scenario.endMark); 173 174 // when startMark isn't provided to the measure() call, a DOMHighResTimeStamp corresponding 175 // to the navigationStart attribute with a timebase of the same attribute is used; this is 176 // equivalent to 0 177 scenario.startTime = 0; 178 179 // when endMark is provided to the measure() call, the value of the mark whose name is 180 // provided is used for the endMark 181 scenario.duration = endMarkValue; 182 } else 183 { 184 test_true(false, 'Test measure scenario unhandled'); 185 } 186 } 187 188 // test that expected measures are returned by getEntriesByName 189 for (var i in TEST_MEASURES) 190 { 191 entries = window.performance.getEntriesByName(TEST_MEASURES[i].name); 192 // for all test measures, the test will be validate the test measure against the first entry returned 193 // by getEntriesByName(), except for the last measure, where since it is a duplicate measure, the test 194 // will validate it against the second entry returned by getEntriesByName() 195 test_measure(entries[(i == duplicate_index ? 1 : 0)], 196 "window.performance.getEntriesByName(\"" + TEST_MEASURES[i].name + "\")[" + 197 (i == duplicate_index ? 1 : 0) + "]", 198 TEST_MEASURES[i].name, 199 TEST_MEASURES[i].startTime, 200 TEST_MEASURES[i].duration); 201 TEST_MEASURES[i].entryMatch = entries[(i == duplicate_index ? 1 : 0)]; 202 } 203 204 // test that expected measures are returned by getEntriesByName with the entryType parameter provided 205 for (var i in TEST_MEASURES) 206 { 207 entries = window.performance.getEntriesByName(TEST_MEASURES[i].name, "measure"); 208 209 test_true(match_entries(entries[(i == duplicate_index ? 1 : 0)], TEST_MEASURES[i].entryMatch), 210 "window.performance.getEntriesByName(\"" + TEST_MEASURES[i].name + "\", \"measure\")[" + 211 (i == duplicate_index ? 1 : 0) + "] returns an object containing the \"" + TEST_MEASURES[i].name + 212 "\" measure in the correct order, and its value matches the \"" + TEST_MEASURES[i].name + 213 "\" measure returned by window.performance.getEntriesByName(\"" + TEST_MEASURES[i].name + 214 "\")"); 215 } 216 217 // test that expected measures are returned by getEntries 218 entries = get_test_entries(window.performance.getEntries(), "measure"); 219 220 test_measure_list(entries, "window.performance.getEntries()", TEST_MEASURES); 221 222 // test that expected measures are returned by getEntriesByType 223 entries = window.performance.getEntriesByType("measure"); 224 225 test_measure_list(entries, "window.performance.getEntriesByType(\"measure\")", TEST_MEASURES); 226 227 done(); 228 } 229 230 function match_entries(entry1, entry2, threshold) 231 { 232 if (threshold == undefined) 233 { 234 threshold = 0; 235 } 236 237 var pass = true; 238 239 // match name 240 pass = pass && (entry1.name == entry2.name); 241 242 // match startTime 243 pass = pass && (Math.abs(entry1.startTime - entry2.startTime) <= testThreshold); 244 245 // match entryType 246 pass = pass && (entry1.entryType == entry2.entryType); 247 248 // match duration 249 pass = pass && (Math.abs(entry1.duration - entry2.duration) <= testThreshold); 250 251 return pass; 252 } 253 254 function test_measure(measureEntry, measureEntryCommand, expectedName, expectedStartTime, expectedDuration) 255 { 256 // test name 257 test_true(measureEntry.name == expectedName, measureEntryCommand + ".name == \"" + expectedName + "\""); 258 259 // test startTime; since for a mark, the startTime is always equal to a mark's value or the value of a 260 // navigation timing attribute, the actual startTime should match the expected value exactly 261 test_true(Math.abs(measureEntry.startTime - expectedStartTime) == 0, 262 measureEntryCommand + ".startTime is correct"); 263 264 // test entryType 265 test_true(measureEntry.entryType == "measure", measureEntryCommand + ".entryType == \"measure\""); 266 267 // test duration, allow for an acceptable threshold in the difference between the actual duration and the 268 // expected value for the duration 269 test_true(Math.abs(measureEntry.duration - expectedDuration) <= testThreshold, measureEntryCommand + 270 ".duration is approximately correct (up to " + testThreshold + "ms difference allowed)"); 271 } 272 273 function test_measure_list(measureEntryList, measureEntryListCommand, measureScenarios) 274 { 275 // give all entries a "found" property that can be set to ensure it isn't tested twice 276 for (var i in measureEntryList) 277 { 278 measureEntryList[i].found = false; 279 } 280 281 for (var i in measureScenarios) 282 { 283 measureScenarios[i].found = false; 284 285 for (var j in measureEntryList) 286 { 287 if (match_entries(measureEntryList[j], measureScenarios[i]) && !measureEntryList[j].found) 288 { 289 test_true(match_entries(measureEntryList[j], measureScenarios[i].entryMatch), 290 measureEntryListCommand + " returns an object containing the \"" + 291 measureScenarios[i].name + "\" measure, and it's value matches the measure " + 292 "returned by window.performance.getEntriesByName(\"" + measureScenarios[i].name + 293 "\")[" + (i == duplicate_index ? 1 : 0) + "]."); 294 295 measureEntryList[j].found = true; 296 measureScenarios[i].found = true; 297 break; 298 } 299 } 300 301 if (!measureScenarios[i].found) 302 { 303 test_true(false, 304 measureEntryListCommand + " returns an object containing the \"" + 305 measureScenarios[i].name + "\" measure."); 306 } 307 } 308 309 // verify order of output of getEntriesByType 310 var startTimeCurr = 0; 311 var pass = true; 312 for (var i in measureEntryList) 313 { 314 if (measureEntryList[i].startTime < startTimeCurr) 315 { 316 pass = false; 317 } 318 startTimeCurr = measureEntryList[i].startTime; 319 } 320 test_true(pass, 321 measureEntryListCommand + " returns an object containing all test " + 322 "measures in order."); 323 } 324 325 function get_test_entries(entryList, entryType) 326 { 327 var testEntries = new Array(); 328 329 // filter entryList 330 for (var i in entryList) 331 { 332 if (entryList[i].entryType == entryType) 333 { 334 testEntries.push(entryList[i]); 335 } 336 } 337 338 return testEntries; 339 } 340 </script> 341 </head> 342 <body onload="onload_test();"> 343 <h1>Description</h1> 344 <p>This test validates that the performance.measure() method is working properly. This test creates the 345 following measures to test this method: 346 <ul> 347 <li>"measure_no_start_no_end": created using a measure() call without a startMark or endMark 348 provided</li> 349 <li>"measure_start_no_end": created using a measure() call with only the startMark provided</li> 350 <li>"measure_start_end": created using a measure() call with both a startMark or endMark provided</li> 351 <li>"measure_no_start_end": created using a measure() call with only the endMark provided</li> 352 <li>"measure_no_start_no_end": duplicate of the first measure, used to confirm names can be re-used</li> 353 </ul> 354 After creating each measure, the existence of these measures is validated by calling 355 performance.getEntriesByName() (both with and without the entryType parameter provided), 356 performance.getEntriesByType(), and performance.getEntries() 357 </p> 358 359 <div id="log"></div> 360 </body> 361 </html>