VideoUtils.cpp (41951B)
1 /* This Source Code Form is subject to the terms of the Mozilla Public 2 * License, v. 2.0. If a copy of the MPL was not distributed with this 3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 4 5 #include "VideoUtils.h" 6 7 #include <stdint.h> 8 9 #include "CubebUtils.h" 10 #include "H264.h" 11 #include "ImageContainer.h" 12 #include "MediaContainerType.h" 13 #include "MediaResource.h" 14 #include "TimeUnits.h" 15 #include "mozilla/Base64.h" 16 #include "mozilla/SchedulerGroup.h" 17 #include "mozilla/SharedThreadPool.h" 18 #include "mozilla/StaticPrefs_accessibility.h" 19 #include "mozilla/StaticPrefs_media.h" 20 #include "mozilla/TaskQueue.h" 21 #include "mozilla/dom/ContentChild.h" 22 #include "nsCharSeparatedTokenizer.h" 23 #include "nsContentTypeParser.h" 24 #include "nsIConsoleService.h" 25 #include "nsINetworkLinkService.h" 26 #include "nsIRandomGenerator.h" 27 #include "nsMathUtils.h" 28 #include "nsNetCID.h" 29 #include "nsServiceManagerUtils.h" 30 #include "nsThreadUtils.h" 31 32 #ifdef XP_WIN 33 # include "WMFDecoderModule.h" 34 #endif 35 36 namespace mozilla { 37 38 using gfx::ColorRange; 39 using gfx::CICP::ColourPrimaries; 40 using gfx::CICP::MatrixCoefficients; 41 using gfx::CICP::TransferCharacteristics; 42 using layers::PlanarYCbCrImage; 43 using media::TimeUnit; 44 45 double ToMicrosecondResolution(double aSeconds) { 46 double integer; 47 modf(aSeconds * USECS_PER_S, &integer); 48 return integer / USECS_PER_S; 49 } 50 51 CheckedInt64 SaferMultDiv(int64_t aValue, uint64_t aMul, uint64_t aDiv) { 52 if (aMul > INT64_MAX || aDiv > INT64_MAX) { 53 return CheckedInt64(INT64_MAX) + 1; // Return an invalid checked int. 54 } 55 int64_t mul = AssertedCast<int64_t>(aMul); 56 int64_t div = AssertedCast<int64_t>(aDiv); 57 int64_t major = aValue / div; 58 int64_t remainder = aValue % div; 59 return CheckedInt64(remainder) * mul / div + CheckedInt64(major) * mul; 60 } 61 62 // Converts from number of audio frames to microseconds, given the specified 63 // audio rate. 64 CheckedInt64 FramesToUsecs(int64_t aFrames, uint32_t aRate) { 65 return SaferMultDiv(aFrames, USECS_PER_S, aRate); 66 } 67 68 // Converts from microseconds to number of audio frames, given the specified 69 // audio rate. 70 CheckedInt64 UsecsToFrames(int64_t aUsecs, uint32_t aRate) { 71 return SaferMultDiv(aUsecs, aRate, USECS_PER_S); 72 } 73 74 // Format TimeUnit as number of frames at given rate. 75 CheckedInt64 TimeUnitToFrames(const TimeUnit& aTime, uint32_t aRate) { 76 return aTime.IsValid() ? UsecsToFrames(aTime.ToMicroseconds(), aRate) 77 : CheckedInt64(INT64_MAX) + 1; 78 } 79 80 nsresult SecondsToUsecs(double aSeconds, int64_t& aOutUsecs) { 81 // This must be a >= test, because int64_t(double(INT64_MAX)) 82 // overflows and gives INT64_MIN. 83 if (aSeconds * double(USECS_PER_S) >= double(INT64_MAX)) { 84 return NS_ERROR_FAILURE; 85 } 86 aOutUsecs = int64_t(aSeconds * double(USECS_PER_S)); 87 return NS_OK; 88 } 89 90 static int32_t ConditionDimension(float aValue) { 91 // This will exclude NaNs and too-big values. 92 if (aValue > 1.0 && aValue <= float(INT32_MAX)) { 93 return int32_t(NS_round(aValue)); 94 } 95 return 0; 96 } 97 98 void ScaleDisplayByAspectRatio(gfx::IntSize& aDisplay, float aAspectRatio) { 99 if (aAspectRatio > 1.0) { 100 // Increase the intrinsic width 101 aDisplay.width = 102 ConditionDimension(aAspectRatio * AssertedCast<float>(aDisplay.width)); 103 } else { 104 // Increase the intrinsic height 105 aDisplay.height = 106 ConditionDimension(AssertedCast<float>(aDisplay.height) / aAspectRatio); 107 } 108 } 109 110 static int64_t BytesToTime(int64_t offset, int64_t length, int64_t durationUs) { 111 NS_ASSERTION(length > 0, "Must have positive length"); 112 double r = double(offset) / double(length); 113 if (r > 1.0) { 114 r = 1.0; 115 } 116 return int64_t(double(durationUs) * r); 117 } 118 119 media::TimeIntervals GetEstimatedBufferedTimeRanges( 120 mozilla::MediaResource* aStream, int64_t aDurationUsecs) { 121 media::TimeIntervals buffered; 122 // Nothing to cache if the media takes 0us to play. 123 if (aDurationUsecs <= 0 || !aStream) { 124 return buffered; 125 } 126 127 // Special case completely cached files. This also handles local files. 128 if (aStream->IsDataCachedToEndOfResource(0)) { 129 buffered += media::TimeInterval(TimeUnit::Zero(), 130 TimeUnit::FromMicroseconds(aDurationUsecs)); 131 return buffered; 132 } 133 134 int64_t totalBytes = aStream->GetLength(); 135 136 // If we can't determine the total size, pretend that we have nothing 137 // buffered. This will put us in a state of eternally-low-on-undecoded-data 138 // which is not great, but about the best we can do. 139 if (totalBytes <= 0) { 140 return buffered; 141 } 142 143 int64_t startOffset = aStream->GetNextCachedData(0); 144 while (startOffset >= 0) { 145 int64_t endOffset = aStream->GetCachedDataEnd(startOffset); 146 // Bytes [startOffset..endOffset] are cached. 147 NS_ASSERTION(startOffset >= 0, "Integer underflow in GetBuffered"); 148 NS_ASSERTION(endOffset >= 0, "Integer underflow in GetBuffered"); 149 150 int64_t startUs = BytesToTime(startOffset, totalBytes, aDurationUsecs); 151 int64_t endUs = BytesToTime(endOffset, totalBytes, aDurationUsecs); 152 if (startUs != endUs) { 153 buffered += media::TimeInterval(TimeUnit::FromMicroseconds(startUs), 154 TimeUnit::FromMicroseconds(endUs)); 155 } 156 startOffset = aStream->GetNextCachedData(endOffset); 157 } 158 return buffered; 159 } 160 161 void DownmixStereoToMono(mozilla::AudioDataValue* aBuffer, uint32_t aFrames) { 162 MOZ_ASSERT(aBuffer); 163 const int channels = 2; 164 for (uint32_t fIdx = 0; fIdx < aFrames; ++fIdx) { 165 #ifdef MOZ_SAMPLE_TYPE_FLOAT32 166 float sample = 0.0; 167 #else 168 int sample = 0; 169 #endif 170 // The sample of the buffer would be interleaved. 171 sample = (aBuffer[fIdx * channels] + aBuffer[fIdx * channels + 1]) * 0.5f; 172 aBuffer[fIdx * channels] = aBuffer[fIdx * channels + 1] = sample; 173 } 174 } 175 176 uint32_t DecideAudioPlaybackChannels(const AudioInfo& info) { 177 if (StaticPrefs::accessibility_monoaudio_enable()) { 178 return 1; 179 } 180 181 if (StaticPrefs::media_forcestereo_enabled()) { 182 return 2; 183 } 184 185 return info.mChannels; 186 } 187 188 uint32_t DecideAudioPlaybackSampleRate(const AudioInfo& aInfo, 189 bool aShouldResistFingerprinting) { 190 bool resampling = StaticPrefs::media_resampling_enabled(); 191 192 uint32_t rate = 0; 193 194 if (resampling) { 195 rate = 48000; 196 } else if (aInfo.mRate >= 44100) { 197 // The original rate is of good quality and we want to minimize unecessary 198 // resampling, so we let cubeb decide how to resample (if needed). Cap to 199 // 384kHz for good measure. 200 rate = std::min<unsigned>(aInfo.mRate, 384000u); 201 } else { 202 // We will resample all data to match cubeb's preferred sampling rate. 203 rate = CubebUtils::PreferredSampleRate(aShouldResistFingerprinting); 204 if (rate > 768000) { 205 // bogus rate, fall back to something else; 206 rate = 48000; 207 } 208 } 209 MOZ_DIAGNOSTIC_ASSERT(rate, "output rate can't be 0."); 210 211 return rate; 212 } 213 214 bool IsDefaultPlaybackDeviceMono() { 215 return CubebUtils::MaxNumberOfChannels() == 1; 216 } 217 218 bool IsVideoContentType(const nsCString& aContentType) { 219 constexpr auto video = "video"_ns; 220 return FindInReadable(video, aContentType); 221 } 222 223 bool IsValidVideoRegion(const gfx::IntSize& aFrame, 224 const gfx::IntRect& aPicture, 225 const gfx::IntSize& aDisplay) { 226 return aFrame.width > 0 && aFrame.width <= PlanarYCbCrImage::MAX_DIMENSION && 227 aFrame.height > 0 && 228 aFrame.height <= PlanarYCbCrImage::MAX_DIMENSION && 229 aFrame.width * aFrame.height <= MAX_VIDEO_WIDTH * MAX_VIDEO_HEIGHT && 230 aPicture.width > 0 && 231 aPicture.width <= PlanarYCbCrImage::MAX_DIMENSION && 232 aPicture.x < PlanarYCbCrImage::MAX_DIMENSION && 233 aPicture.x + aPicture.width < PlanarYCbCrImage::MAX_DIMENSION && 234 aPicture.height > 0 && 235 aPicture.height <= PlanarYCbCrImage::MAX_DIMENSION && 236 aPicture.y < PlanarYCbCrImage::MAX_DIMENSION && 237 aPicture.y + aPicture.height < PlanarYCbCrImage::MAX_DIMENSION && 238 aPicture.width * aPicture.height <= 239 MAX_VIDEO_WIDTH * MAX_VIDEO_HEIGHT && 240 aDisplay.width > 0 && 241 aDisplay.width <= PlanarYCbCrImage::MAX_DIMENSION && 242 aDisplay.height > 0 && 243 aDisplay.height <= PlanarYCbCrImage::MAX_DIMENSION && 244 aDisplay.width * aDisplay.height <= MAX_VIDEO_WIDTH * MAX_VIDEO_HEIGHT; 245 } 246 247 already_AddRefed<SharedThreadPool> GetMediaThreadPool(MediaThreadType aType) { 248 const char* name; 249 uint32_t threads = 4; 250 switch (aType) { 251 case MediaThreadType::PLATFORM_DECODER: 252 name = "MediaPDecoder"; 253 break; 254 case MediaThreadType::WEBRTC_CALL_THREAD: 255 name = "WebrtcCallThread"; 256 threads = 1; 257 break; 258 case MediaThreadType::WEBRTC_WORKER: 259 name = "WebrtcWorker"; 260 break; 261 case MediaThreadType::MDSM: 262 name = "MediaDecoderStateMachine"; 263 threads = 1; 264 break; 265 case MediaThreadType::PLATFORM_ENCODER: 266 name = "MediaPEncoder"; 267 break; 268 default: 269 MOZ_FALLTHROUGH_ASSERT("Unexpected MediaThreadType"); 270 case MediaThreadType::SUPERVISOR: 271 name = "MediaSupervisor"; 272 break; 273 } 274 275 RefPtr<SharedThreadPool> pool = 276 SharedThreadPool::Get(nsDependentCString(name), threads); 277 278 // Ensure a larger stack for platform decoder threads 279 bool needsLargerStacks = aType == MediaThreadType::PLATFORM_DECODER; 280 // On Windows, platform encoder threads require larger stacks as well for 281 // libaom. 282 #ifdef XP_WIN 283 needsLargerStacks |= aType == MediaThreadType::PLATFORM_ENCODER; 284 #endif 285 if (needsLargerStacks) { 286 const uint32_t minStackSize = 512 * 1024; 287 uint32_t stackSize; 288 MOZ_ALWAYS_SUCCEEDS(pool->GetThreadStackSize(&stackSize)); 289 if (stackSize < minStackSize) { 290 MOZ_ALWAYS_SUCCEEDS(pool->SetThreadStackSize(minStackSize)); 291 } 292 } 293 294 return pool.forget(); 295 } 296 297 bool ExtractVPXCodecDetails(const nsAString& aCodec, uint8_t& aProfile, 298 uint8_t& aLevel, uint8_t& aBitDepth) { 299 uint8_t dummyChromaSubsampling = 1; 300 VideoColorSpace dummyColorspace; 301 return ExtractVPXCodecDetails(aCodec, aProfile, aLevel, aBitDepth, 302 dummyChromaSubsampling, dummyColorspace); 303 } 304 305 bool ExtractVPXCodecDetails(const nsAString& aCodec, uint8_t& aProfile, 306 uint8_t& aLevel, uint8_t& aBitDepth, 307 uint8_t& aChromaSubsampling, 308 VideoColorSpace& aColorSpace) { 309 // Assign default value. 310 aChromaSubsampling = 1; 311 auto splitter = aCodec.Split(u'.'); 312 auto fieldsItr = splitter.begin(); 313 auto fourCC = *fieldsItr; 314 315 if (!fourCC.EqualsLiteral("vp09") && !fourCC.EqualsLiteral("vp08")) { 316 // Invalid 4CC 317 return false; 318 } 319 ++fieldsItr; 320 uint8_t primary, transfer, matrix, range; 321 uint8_t* fields[] = {&aProfile, &aLevel, &aBitDepth, &aChromaSubsampling, 322 &primary, &transfer, &matrix, &range}; 323 int fieldsCount = 0; 324 nsresult rv; 325 for (; fieldsItr != splitter.end(); ++fieldsItr, ++fieldsCount) { 326 if (fieldsCount > 7) { 327 // No more than 8 fields are expected. 328 return false; 329 } 330 *(fields[fieldsCount]) = 331 static_cast<uint8_t>((*fieldsItr).ToInteger(&rv, 10)); 332 // We got invalid field value, parsing error. 333 NS_ENSURE_SUCCESS(rv, false); 334 } 335 // Mandatory Fields 336 // <sample entry 4CC>.<profile>.<level>.<bitDepth>. 337 // Optional Fields 338 // <chromaSubsampling>.<colourPrimaries>.<transferCharacteristics>. 339 // <matrixCoefficients>.<videoFullRangeFlag> 340 // First three fields are mandatory(we have parsed 4CC). 341 if (fieldsCount < 3) { 342 // Invalid number of fields. 343 return false; 344 } 345 // Start to validate the parsing value. 346 347 // profile should be 0,1,2 or 3. 348 // See https://www.webmproject.org/vp9/profiles/ 349 if (aProfile > 3) { 350 // Invalid profile. 351 return false; 352 } 353 354 // level, See https://www.webmproject.org/vp9/mp4/#semantics_1 355 switch (aLevel) { 356 case 10: 357 case 11: 358 case 20: 359 case 21: 360 case 30: 361 case 31: 362 case 40: 363 case 41: 364 case 50: 365 case 51: 366 case 52: 367 case 60: 368 case 61: 369 case 62: 370 break; 371 default: 372 // Invalid level. 373 return false; 374 } 375 376 if (aBitDepth != 8 && aBitDepth != 10 && aBitDepth != 12) { 377 // Invalid bitDepth: 378 return false; 379 } 380 381 if (fieldsCount == 3) { 382 // No more options. 383 return true; 384 } 385 386 // chromaSubsampling should be 0,1,2,3...4~7 are reserved. 387 if (aChromaSubsampling > 3) { 388 return false; 389 } 390 391 if (fieldsCount == 4) { 392 // No more options. 393 return true; 394 } 395 396 // It is an integer that is defined by the "Colour primaries" 397 // section of ISO/IEC 23001-8:2016 Table 2. 398 // We treat reserved value as false case. 399 if (primary == 0 || primary == 3 || primary > 22) { 400 // reserved value. 401 return false; 402 } 403 if (primary > 12 && primary < 22) { 404 // 13~21 are reserved values. 405 return false; 406 } 407 aColorSpace.mPrimaries = static_cast<ColourPrimaries>(primary); 408 409 if (fieldsCount == 5) { 410 // No more options. 411 return true; 412 } 413 414 // It is an integer that is defined by the 415 // "Transfer characteristics" section of ISO/IEC 23001-8:2016 Table 3. 416 // We treat reserved value as false case. 417 if (transfer == 0 || transfer == 3 || transfer > 18) { 418 // reserved value. 419 return false; 420 } 421 aColorSpace.mTransfer = static_cast<TransferCharacteristics>(transfer); 422 423 if (fieldsCount == 6) { 424 // No more options. 425 return true; 426 } 427 428 // It is an integer that is defined by the 429 // "Matrix coefficients" section of ISO/IEC 23001-8:2016 Table 4. 430 // We treat reserved value as false case. 431 if (matrix == 3 || matrix > 11) { 432 return false; 433 } 434 aColorSpace.mMatrix = static_cast<MatrixCoefficients>(matrix); 435 436 // If matrixCoefficients is 0 (RGB), then chroma subsampling MUST be 3 437 // (4:4:4). 438 if (aColorSpace.mMatrix == MatrixCoefficients::MC_IDENTITY && 439 aChromaSubsampling != 3) { 440 return false; 441 } 442 443 if (fieldsCount == 7) { 444 // No more options. 445 return true; 446 } 447 448 // videoFullRangeFlag indicates the black level and range of the luma and 449 // chroma signals. 0 = legal range (e.g. 16-235 for 8 bit sample depth); 450 // 1 = full range (e.g. 0-255 for 8-bit sample depth). 451 aColorSpace.mRange = static_cast<ColorRange>(range); 452 return range <= 1; 453 } 454 455 bool ExtractH264CodecDetails(const nsAString& aCodec, uint8_t& aProfile, 456 uint8_t& aConstraint, H264_LEVEL& aLevel, 457 H264CodecStringStrictness aStrictness) { 458 // H.264 codecs parameters have a type defined as avcN.PPCCLL, where 459 // N = avc type. avc3 is avcc with SPS & PPS implicit (within stream) 460 // PP = profile_idc, CC = constraint_set flags, LL = level_idc. 461 // We ignore the constraint_set flags, as it's not clear from any 462 // documentation what constraints the platform decoders support. 463 // See 464 // http://blog.pearce.org.nz/2013/11/what-does-h264avc1-codecs-parameters.html 465 // for more details. 466 if (aCodec.Length() != strlen("avc1.PPCCLL")) { 467 return false; 468 } 469 470 // Verify the codec starts with "avc1." or "avc3.". 471 const nsAString& sample = Substring(aCodec, 0, 5); 472 if (!sample.EqualsASCII("avc1.") && !sample.EqualsASCII("avc3.")) { 473 return false; 474 } 475 476 // Extract the profile_idc, constraint_flags and level_idc. 477 nsresult rv = NS_OK; 478 aProfile = Substring(aCodec, 5, 2).ToInteger(&rv, 16); 479 NS_ENSURE_SUCCESS(rv, false); 480 481 // Constraint flags are stored on the 6 most significant bits, first two bits 482 // are reserved_zero_2bits. 483 aConstraint = Substring(aCodec, 7, 2).ToInteger(&rv, 16); 484 NS_ENSURE_SUCCESS(rv, false); 485 486 uint8_t level = Substring(aCodec, 9, 2).ToInteger(&rv, 16); 487 NS_ENSURE_SUCCESS(rv, false); 488 489 if (level == 9) { 490 level = static_cast<uint8_t>(H264_LEVEL::H264_LEVEL_1_b); 491 } else if (level <= 5) { 492 level *= 10; 493 } 494 495 if (aStrictness == H264CodecStringStrictness::Lenient) { 496 aLevel = static_cast<H264_LEVEL>(level); 497 return true; 498 } 499 500 // Check if valid level value 501 aLevel = static_cast<H264_LEVEL>(level); 502 if (aLevel < H264_LEVEL::H264_LEVEL_1 || 503 aLevel > H264_LEVEL::H264_LEVEL_6_2) { 504 return false; 505 } 506 if ((level % 10) > 2) { 507 if (level != 13) { 508 return false; 509 } 510 } 511 512 return true; 513 } 514 515 bool IsH265ProfileRecognizable(uint8_t aProfile, 516 int32_t aProfileCompabilityFlags) { 517 enum Profile { 518 eUnknown, 519 eHighThroughputScreenExtended, 520 eScalableRangeExtension, 521 eScreenExtended, 522 e3DMain, 523 eScalableMain, 524 eMultiviewMain, 525 eHighThroughput, 526 eRangeExtension, 527 eMain10, 528 eMain, 529 eMainStillPicture 530 }; 531 Profile p = eUnknown; 532 533 // Spec A.3.8 534 if (aProfile == 11 || (aProfileCompabilityFlags & 0x800)) { 535 p = eHighThroughputScreenExtended; 536 } 537 // Spec H.11.1.2 538 if (aProfile == 10 || (aProfileCompabilityFlags & 0x400)) { 539 p = eScalableRangeExtension; 540 } 541 // Spec A.3.7 542 if (aProfile == 9 || (aProfileCompabilityFlags & 0x200)) { 543 p = eScreenExtended; 544 } 545 // Spec I.11.1.1 546 if (aProfile == 8 || (aProfileCompabilityFlags & 0x100)) { 547 p = e3DMain; 548 } 549 // Spec H.11.1.1 550 if (aProfile == 7 || (aProfileCompabilityFlags & 0x80)) { 551 p = eScalableMain; 552 } 553 // Spec G.11.1.1 554 if (aProfile == 6 || (aProfileCompabilityFlags & 0x40)) { 555 p = eMultiviewMain; 556 } 557 // Spec A.3.6 558 if (aProfile == 5 || (aProfileCompabilityFlags & 0x20)) { 559 p = eHighThroughput; 560 } 561 // Spec A.3.5 562 if (aProfile == 4 || (aProfileCompabilityFlags & 0x10)) { 563 p = eRangeExtension; 564 } 565 // Spec A.3.3 566 // NOTICE: Do not change the order of below sections 567 if (aProfile == 2 || (aProfileCompabilityFlags & 0x4)) { 568 p = eMain10; 569 } 570 // Spec A.3.2 571 // When aProfileCompabilityFlags[1] is equal to 1, 572 // aProfileCompabilityFlags[2] should be equal to 1 as well. 573 if (aProfile == 1 || (aProfileCompabilityFlags & 0x2)) { 574 p = eMain; 575 } 576 // Spec A.3.4 577 // When aProfileCompabilityFlags[3] is equal to 1, 578 // aProfileCompabilityFlags[1] and 579 // aProfileCompabilityFlags[2] should be equal to 1 as well. 580 if (aProfile == 3 || (aProfileCompabilityFlags & 0x8)) { 581 p = eMainStillPicture; 582 } 583 584 return p != eUnknown; 585 } 586 587 bool ExtractH265CodecDetails(const nsAString& aCodec, uint8_t& aProfile, 588 uint8_t& aLevel, nsTArray<uint8_t>& aConstraints) { 589 // HEVC codec id consists of: 590 const size_t maxHevcCodecIdLength = 591 5 + // 'hev1.' or 'hvc1.' prefix (5 chars) 592 4 + // profile, e.g. '.A12' (max 4 chars) 593 9 + // profile_compatibility, dot + 32-bit hex number (max 9 chars) 594 5 + // tier and level, e.g. '.H120' (max 5 chars) 595 18; // up to 6 constraint bytes, bytes are dot-separated and hex-encoded. 596 597 if (aCodec.Length() > maxHevcCodecIdLength) { 598 return false; 599 } 600 601 // Verify the codec starts with "hev1." or "hvc1.". 602 const nsAString& sample = Substring(aCodec, 0, 5); 603 if (!sample.EqualsASCII("hev1.") && !sample.EqualsASCII("hvc1.")) { 604 return false; 605 } 606 607 nsresult rv; 608 CheckedUint8 profile; 609 int32_t compabilityFlags = 0; 610 CheckedUint8 level = 0; 611 nsTArray<uint8_t> constraints; 612 613 auto splitter = aCodec.Split(u'.'); 614 size_t count = 0; 615 for (auto iter = splitter.begin(); iter != splitter.end(); ++iter, ++count) { 616 const auto& fieldStr = *iter; 617 if (fieldStr.IsEmpty()) { 618 return false; 619 } 620 621 if (count == 0) { 622 MOZ_RELEASE_ASSERT(fieldStr.EqualsASCII("hev1") || 623 fieldStr.EqualsASCII("hvc1")); 624 continue; 625 } 626 627 if (count == 1) { // profile 628 Maybe<uint8_t> validProfileSpace; 629 if (fieldStr.First() == u'A' || fieldStr.First() == u'B' || 630 fieldStr.First() == u'C') { 631 validProfileSpace.emplace(1 + (fieldStr.First() - 'A')); 632 } 633 // If fieldStr.First() is not A, B, C or a digit, ToInteger() should fail. 634 profile = validProfileSpace ? Substring(fieldStr, 1).ToInteger(&rv) 635 : fieldStr.ToInteger(&rv); 636 if (NS_FAILED(rv) || !profile.isValid() || profile.value() > 0x1F) { 637 return false; 638 } 639 continue; 640 } 641 642 if (count == 2) { // profile compatibility flags 643 compabilityFlags = fieldStr.ToInteger(&rv, 16); 644 NS_ENSURE_SUCCESS(rv, false); 645 continue; 646 } 647 648 if (count == 3) { // tier and level 649 Maybe<uint8_t> validProfileTier; 650 if (fieldStr.First() == u'L' || fieldStr.First() == u'H') { 651 validProfileTier.emplace(fieldStr.First() == u'L' ? 0 : 1); 652 } 653 // If fieldStr.First() is not L, H, or a digit, ToInteger() should fail. 654 level = validProfileTier ? Substring(fieldStr, 1).ToInteger(&rv) 655 : fieldStr.ToInteger(&rv); 656 if (NS_FAILED(rv) || !level.isValid()) { 657 return false; 658 } 659 continue; 660 } 661 662 // The rest is constraint bytes. 663 if (count > 10) { 664 return false; 665 } 666 667 CheckedUint8 byte(fieldStr.ToInteger(&rv, 16)); 668 if (NS_FAILED(rv) || !byte.isValid()) { 669 return false; 670 } 671 constraints.AppendElement(byte.value()); 672 } 673 674 if (count < 4 /* Parse til level at least */ || constraints.Length() > 6 || 675 !IsH265ProfileRecognizable(profile.value(), compabilityFlags)) { 676 return false; 677 } 678 679 aProfile = profile.value(); 680 aLevel = level.value(); 681 aConstraints = std::move(constraints); 682 return true; 683 } 684 685 bool ExtractAV1CodecDetails(const nsAString& aCodec, uint8_t& aProfile, 686 uint8_t& aLevel, uint8_t& aTier, uint8_t& aBitDepth, 687 bool& aMonochrome, bool& aSubsamplingX, 688 bool& aSubsamplingY, uint8_t& aChromaSamplePosition, 689 VideoColorSpace& aColorSpace) { 690 auto fourCC = Substring(aCodec, 0, 4); 691 692 if (!fourCC.EqualsLiteral("av01")) { 693 // Invalid 4CC 694 return false; 695 } 696 697 // Format is: 698 // av01.N.NN[MH].NN.B.BBN.NN.NN.NN.B 699 // where 700 // N = decimal digit 701 // [] = single character 702 // B = binary digit 703 // Field order: 704 // <sample entry 4CC>.<profile>.<level><tier>.<bitDepth> 705 // [.<monochrome>.<chromaSubsampling> 706 // .<colorPrimaries>.<transferCharacteristics>.<matrixCoefficients> 707 // .<videoFullRangeFlag>] 708 // 709 // If any optional field is found, all the rest must be included. 710 // 711 // Parsing stops but does not fail upon encountering unexpected characters 712 // at the end of an otherwise well-formed string. 713 // 714 // See https://aomediacodec.github.io/av1-isobmff/#codecsparam 715 716 struct AV1Field { 717 uint8_t* field; 718 size_t length; 719 }; 720 uint8_t monochrome; 721 uint8_t subsampling; 722 uint8_t primary; 723 uint8_t transfer; 724 uint8_t matrix; 725 uint8_t range; 726 AV1Field fields[] = {{&aProfile, 1}, 727 {&aLevel, 2}, 728 // parsing loop skips tier 729 {&aBitDepth, 2}, 730 {&monochrome, 1}, 731 {&subsampling, 3}, 732 {&primary, 2}, 733 {&transfer, 2}, 734 {&matrix, 2}, 735 {&range, 1}}; 736 737 auto splitter = aCodec.Split(u'.'); 738 auto iter = splitter.begin(); 739 ++iter; 740 size_t fieldCount = 0; 741 while (iter != splitter.end()) { 742 // Exit if there are too many fields. 743 if (fieldCount >= 9) { 744 return false; 745 } 746 747 AV1Field& field = fields[fieldCount]; 748 auto fieldStr = *iter; 749 750 if (field.field == &aLevel) { 751 // Parse tier and remove it from the level field. 752 if (fieldStr.Length() < 3) { 753 return false; 754 } 755 auto tier = fieldStr[2]; 756 switch (tier) { 757 case 'M': 758 aTier = 0; 759 break; 760 case 'H': 761 aTier = 1; 762 break; 763 default: 764 return false; 765 } 766 fieldStr.SetLength(2); 767 } 768 769 if (fieldStr.Length() < field.length) { 770 return false; 771 } 772 773 // Manually parse values since nsString.ToInteger silently stops parsing 774 // upon encountering unknown characters. 775 uint8_t value = 0; 776 for (size_t i = 0; i < field.length; i++) { 777 uint8_t oldValue = value; 778 char16_t character = fieldStr[i]; 779 if ('0' <= character && character <= '9') { 780 value = (value * 10) + (character - '0'); 781 } else { 782 return false; 783 } 784 if (value < oldValue) { 785 // Overflow is possible on the 3-digit subsampling field. 786 return false; 787 } 788 } 789 790 *field.field = value; 791 792 ++fieldCount; 793 ++iter; 794 795 // Field had extra characters, exit early. 796 if (fieldStr.Length() > field.length) { 797 // Disallow numbers as unexpected characters. 798 char16_t character = fieldStr[field.length]; 799 if ('0' <= character && character <= '9') { 800 return false; 801 } 802 break; 803 } 804 } 805 806 // Spec requires profile, level/tier, bitdepth, or for all possible fields to 807 // be present. 808 if (fieldCount != 3 && fieldCount != 9) { 809 return false; 810 } 811 812 // Valid profiles are: Main (0), High (1), Professional (2). 813 // Levels range from 0 to 23, or 31 to remove level restrictions. 814 if (aProfile > 2 || (aLevel > 23 && aLevel != 31)) { 815 return false; 816 } 817 818 if (fieldCount == 3) { 819 // If only required fields are included, set to the spec defaults for the 820 // rest and continue validating. 821 aMonochrome = false; 822 aSubsamplingX = true; 823 aSubsamplingY = true; 824 aChromaSamplePosition = 0; 825 aColorSpace.mPrimaries = ColourPrimaries::CP_BT709; 826 aColorSpace.mTransfer = TransferCharacteristics::TC_BT709; 827 aColorSpace.mMatrix = MatrixCoefficients::MC_BT709; 828 aColorSpace.mRange = ColorRange::LIMITED; 829 } else { 830 // Extract the individual values for the remaining fields, and check for 831 // valid values for each. 832 833 // Monochrome is a boolean. 834 if (monochrome > 1) { 835 return false; 836 } 837 aMonochrome = !!monochrome; 838 839 // Extract individual digits of the subsampling field. 840 // Subsampling is two binary digits for x and y 841 // and one enumerated sample position field of 842 // Unknown (0), Vertical (1), Colocated (2). 843 uint8_t subsamplingX = (subsampling / 100) % 10; 844 uint8_t subsamplingY = (subsampling / 10) % 10; 845 if (subsamplingX > 1 || subsamplingY > 1) { 846 return false; 847 } 848 aSubsamplingX = !!subsamplingX; 849 aSubsamplingY = !!subsamplingY; 850 aChromaSamplePosition = subsampling % 10; 851 if (aChromaSamplePosition > 2) { 852 return false; 853 } 854 855 // We can validate the color space values using CICP enums, as the values 856 // are standardized in Rec. ITU-T H.273. 857 aColorSpace.mPrimaries = static_cast<ColourPrimaries>(primary); 858 aColorSpace.mTransfer = static_cast<TransferCharacteristics>(transfer); 859 aColorSpace.mMatrix = static_cast<MatrixCoefficients>(matrix); 860 if (gfx::CICP::IsReserved(aColorSpace.mPrimaries) || 861 gfx::CICP::IsReserved(aColorSpace.mTransfer) || 862 gfx::CICP::IsReserved(aColorSpace.mMatrix)) { 863 return false; 864 } 865 // Range is a boolean, true meaning full and false meaning limited range. 866 if (range > 1) { 867 return false; 868 } 869 aColorSpace.mRange = static_cast<ColorRange>(range); 870 } 871 872 // Begin validating all parameter values: 873 874 // Only Levels 8 and above (4.0 and greater) can specify Tier. 875 // See: 5.5.1. General sequence header OBU syntax, 876 // if ( seq_level_idx[ i ] > 7 ) seq_tier[ i ] = f(1) 877 // https://aomediacodec.github.io/av1-spec/av1-spec.pdf#page=42 878 // Also: Annex A, A.3. Levels, columns MainMbps and HighMbps 879 // at https://aomediacodec.github.io/av1-spec/av1-spec.pdf#page=652 880 if (aLevel < 8 && aTier > 0) { 881 return false; 882 } 883 884 // Supported bit depths are 8, 10 and 12. 885 if (aBitDepth != 8 && aBitDepth != 10 && aBitDepth != 12) { 886 return false; 887 } 888 // Profiles 0 and 1 only support 8-bit and 10-bit. 889 if (aProfile < 2 && aBitDepth == 12) { 890 return false; 891 } 892 893 // x && y subsampling is used to specify monochrome 4:0:0 as well 894 bool is420or400 = aSubsamplingX && aSubsamplingY; 895 bool is422 = aSubsamplingX && !aSubsamplingY; 896 bool is444 = !aSubsamplingX && !aSubsamplingY; 897 898 // Profile 0 only supports 4:2:0. 899 if (aProfile == 0 && !is420or400) { 900 return false; 901 } 902 // Profile 1 only supports 4:4:4. 903 if (aProfile == 1 && !is444) { 904 return false; 905 } 906 // Profile 2 only allows 4:2:2 at 10 bits and below. 907 if (aProfile == 2 && aBitDepth < 12 && !is422) { 908 return false; 909 } 910 // Chroma sample position can only be specified with 4:2:0. 911 if (aChromaSamplePosition != 0 && !is420or400) { 912 return false; 913 } 914 915 // When video is monochrome, subsampling must be 4:0:0. 916 if (aMonochrome && (aChromaSamplePosition != 0 || !is420or400)) { 917 return false; 918 } 919 // Monochrome can only be signaled when profile is 0 or 2. 920 // Note: This check is redundant with the above subsampling check, 921 // as profile 1 only supports 4:4:4. 922 if (aMonochrome && aProfile != 0 && aProfile != 2) { 923 return false; 924 } 925 926 // Identity matrix requires 4:4:4 subsampling. 927 if (aColorSpace.mMatrix == MatrixCoefficients::MC_IDENTITY && 928 (aSubsamplingX || aSubsamplingY || 929 aColorSpace.mRange != gfx::ColorRange::FULL)) { 930 return false; 931 } 932 933 return true; 934 } 935 936 nsresult GenerateRandomName(nsCString& aOutSalt, uint32_t aLength) { 937 nsresult rv; 938 nsCOMPtr<nsIRandomGenerator> rg = 939 do_GetService("@mozilla.org/security/random-generator;1", &rv); 940 if (NS_FAILED(rv)) { 941 return rv; 942 } 943 944 // For each three bytes of random data we will get four bytes of ASCII. 945 const uint32_t requiredBytesLength = 946 static_cast<uint32_t>((aLength + 3) / 4 * 3); 947 948 uint8_t* buffer; 949 rv = rg->GenerateRandomBytes(requiredBytesLength, &buffer); 950 if (NS_FAILED(rv)) { 951 return rv; 952 } 953 954 nsCString temp; 955 nsDependentCSubstring randomData(reinterpret_cast<const char*>(buffer), 956 requiredBytesLength); 957 rv = Base64Encode(randomData, temp); 958 free(buffer); 959 buffer = nullptr; 960 if (NS_FAILED(rv)) { 961 return rv; 962 } 963 964 aOutSalt = std::move(temp); 965 return NS_OK; 966 } 967 968 nsresult GenerateRandomPathName(nsCString& aOutSalt, uint32_t aLength) { 969 nsresult rv = GenerateRandomName(aOutSalt, aLength); 970 if (NS_FAILED(rv)) { 971 return rv; 972 } 973 974 // Base64 characters are alphanumeric (a-zA-Z0-9) and '+' and '/', so we need 975 // to replace illegal characters -- notably '/' 976 aOutSalt.ReplaceChar(FILE_PATH_SEPARATOR FILE_ILLEGAL_CHARACTERS, '_'); 977 return NS_OK; 978 } 979 980 already_AddRefed<TaskQueue> CreateMediaDecodeTaskQueue(const char* aName) { 981 RefPtr<TaskQueue> queue = TaskQueue::Create( 982 GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER), aName); 983 return queue.forget(); 984 } 985 986 void SimpleTimer::Cancel() { 987 if (mTimer) { 988 #ifdef DEBUG 989 nsCOMPtr<nsIEventTarget> target; 990 mTimer->GetTarget(getter_AddRefs(target)); 991 bool onCurrent; 992 nsresult rv = target->IsOnCurrentThread(&onCurrent); 993 MOZ_ASSERT(NS_SUCCEEDED(rv) && onCurrent); 994 #endif 995 mTimer->Cancel(); 996 mTimer = nullptr; 997 } 998 mTask = nullptr; 999 } 1000 1001 NS_IMETHODIMP 1002 SimpleTimer::Notify(nsITimer* timer) { 1003 RefPtr<SimpleTimer> deathGrip(this); 1004 if (mTask) { 1005 mTask->Run(); 1006 mTask = nullptr; 1007 } 1008 return NS_OK; 1009 } 1010 1011 NS_IMETHODIMP 1012 SimpleTimer::GetName(nsACString& aName) { 1013 aName.AssignLiteral("SimpleTimer"); 1014 return NS_OK; 1015 } 1016 1017 nsresult SimpleTimer::Init(nsIRunnable* aTask, uint32_t aTimeoutMs, 1018 nsIEventTarget* aTarget) { 1019 nsresult rv; 1020 1021 // Get target thread first, so we don't have to cancel the timer if it fails. 1022 nsCOMPtr<nsIEventTarget> target; 1023 if (aTarget) { 1024 target = aTarget; 1025 } else { 1026 target = GetMainThreadSerialEventTarget(); 1027 if (!target) { 1028 return NS_ERROR_NOT_AVAILABLE; 1029 } 1030 } 1031 1032 rv = NS_NewTimerWithCallback(getter_AddRefs(mTimer), this, aTimeoutMs, 1033 nsITimer::TYPE_ONE_SHOT, target); 1034 if (NS_FAILED(rv)) { 1035 return rv; 1036 } 1037 1038 mTask = aTask; 1039 return NS_OK; 1040 } 1041 1042 NS_IMPL_ISUPPORTS(SimpleTimer, nsITimerCallback, nsINamed) 1043 1044 already_AddRefed<SimpleTimer> SimpleTimer::Create(nsIRunnable* aTask, 1045 uint32_t aTimeoutMs, 1046 nsIEventTarget* aTarget) { 1047 RefPtr<SimpleTimer> t(new SimpleTimer()); 1048 if (NS_FAILED(t->Init(aTask, aTimeoutMs, aTarget))) { 1049 return nullptr; 1050 } 1051 return t.forget(); 1052 } 1053 1054 void LogToBrowserConsole(const nsAString& aMsg) { 1055 if (!NS_IsMainThread()) { 1056 nsString msg(aMsg); 1057 nsCOMPtr<nsIRunnable> task = NS_NewRunnableFunction( 1058 "LogToBrowserConsole", [msg]() { LogToBrowserConsole(msg); }); 1059 SchedulerGroup::Dispatch(task.forget()); 1060 return; 1061 } 1062 nsCOMPtr<nsIConsoleService> console( 1063 do_GetService("@mozilla.org/consoleservice;1")); 1064 if (!console) { 1065 NS_WARNING("Failed to log message to console."); 1066 return; 1067 } 1068 nsAutoString msg(aMsg); 1069 console->LogStringMessage(msg.get()); 1070 } 1071 1072 bool ParseCodecsString(const nsAString& aCodecs, 1073 nsTArray<nsString>& aOutCodecs) { 1074 aOutCodecs.Clear(); 1075 bool expectMoreTokens = false; 1076 nsCharSeparatedTokenizer tokenizer(aCodecs, ','); 1077 while (tokenizer.hasMoreTokens()) { 1078 const nsAString& token = tokenizer.nextToken(); 1079 expectMoreTokens = tokenizer.separatorAfterCurrentToken(); 1080 aOutCodecs.AppendElement(token); 1081 } 1082 return !expectMoreTokens; 1083 } 1084 1085 bool ParseMIMETypeString(const nsAString& aMIMEType, 1086 nsString& aOutContainerType, 1087 nsTArray<nsString>& aOutCodecs) { 1088 nsContentTypeParser parser(aMIMEType); 1089 nsresult rv = parser.GetType(aOutContainerType); 1090 if (NS_FAILED(rv)) { 1091 return false; 1092 } 1093 1094 nsString codecsStr; 1095 parser.GetParameter("codecs", codecsStr); 1096 return ParseCodecsString(codecsStr, aOutCodecs); 1097 } 1098 1099 template <int N> 1100 static bool StartsWith(const nsACString& string, const char (&prefix)[N]) { 1101 if (N - 1 > string.Length()) { 1102 return false; 1103 } 1104 return memcmp(string.Data(), prefix, N - 1) == 0; 1105 } 1106 1107 bool IsH264CodecString(const nsAString& aCodec) { 1108 uint8_t profile = 0; 1109 uint8_t constraint = 0; 1110 H264_LEVEL level; 1111 return ExtractH264CodecDetails(aCodec, profile, constraint, level, 1112 H264CodecStringStrictness::Lenient); 1113 } 1114 1115 bool IsAllowedH264Codec(const nsAString& aCodec) { 1116 uint8_t profile = 0, constraint = 0; 1117 H264_LEVEL level; 1118 1119 // Don't validate too much here, validation happens below 1120 if (!ExtractH264CodecDetails(aCodec, profile, constraint, level, 1121 H264CodecStringStrictness::Lenient)) { 1122 return false; 1123 } 1124 1125 // Just assume what we can play on all platforms the codecs/formats that 1126 // WMF can play, since we don't have documentation about what other 1127 // platforms can play... According to the WMF documentation: 1128 // http://msdn.microsoft.com/en-us/library/windows/desktop/dd797815%28v=vs.85%29.aspx 1129 // "The Media Foundation H.264 video decoder is a Media Foundation Transform 1130 // that supports decoding of Baseline, Main, and High profiles, up to level 1131 // 5.1.". We extend the limit to level 6.2, relying on the decoder to handle 1132 // any potential errors, the level limit being rather arbitrary. 1133 // We also report that we can play Extended profile, as there are 1134 // bitstreams that are Extended compliant that are also Baseline compliant. 1135 return level >= H264_LEVEL::H264_LEVEL_1 && 1136 level <= H264_LEVEL::H264_LEVEL_6_2 && 1137 (profile == H264_PROFILE_BASE || profile == H264_PROFILE_MAIN || 1138 profile == H264_PROFILE_EXTENDED || profile == H264_PROFILE_HIGH); 1139 } 1140 1141 bool IsH265CodecString(const nsAString& aCodec) { 1142 uint8_t profile = 0; 1143 uint8_t level = 0; 1144 nsTArray<uint8_t> constraints; 1145 return ExtractH265CodecDetails(aCodec, profile, level, constraints); 1146 } 1147 1148 bool IsAACCodecString(const nsAString& aCodec) { 1149 return aCodec.EqualsLiteral("mp4a.40.2") || // MPEG4 AAC-LC 1150 aCodec.EqualsLiteral( 1151 "mp4a.40.02") || // MPEG4 AAC-LC(for compatibility) 1152 aCodec.EqualsLiteral("mp4a.40.5") || // MPEG4 HE-AAC 1153 aCodec.EqualsLiteral( 1154 "mp4a.40.05") || // MPEG4 HE-AAC(for compatibility) 1155 aCodec.EqualsLiteral("mp4a.67") || // MPEG2 AAC-LC 1156 aCodec.EqualsLiteral("mp4a.40.29"); // MPEG4 HE-AACv2 1157 } 1158 1159 bool IsVP8CodecString(const nsAString& aCodec) { 1160 uint8_t profile = 0; 1161 uint8_t level = 0; 1162 uint8_t bitDepth = 0; 1163 return aCodec.EqualsLiteral("vp8") || aCodec.EqualsLiteral("vp8.0") || 1164 (StartsWith(NS_ConvertUTF16toUTF8(aCodec), "vp08") && 1165 ExtractVPXCodecDetails(aCodec, profile, level, bitDepth)); 1166 } 1167 1168 bool IsVP9CodecString(const nsAString& aCodec) { 1169 uint8_t profile = 0; 1170 uint8_t level = 0; 1171 uint8_t bitDepth = 0; 1172 return aCodec.EqualsLiteral("vp9") || aCodec.EqualsLiteral("vp9.0") || 1173 (StartsWith(NS_ConvertUTF16toUTF8(aCodec), "vp09") && 1174 ExtractVPXCodecDetails(aCodec, profile, level, bitDepth)); 1175 } 1176 1177 bool IsAV1CodecString(const nsAString& aCodec) { 1178 uint8_t profile, level, tier, bitDepth, chromaPosition; 1179 bool monochrome, subsamplingX, subsamplingY; 1180 VideoColorSpace colorSpace; 1181 return aCodec.EqualsLiteral("av1") || 1182 (StartsWith(NS_ConvertUTF16toUTF8(aCodec), "av01") && 1183 ExtractAV1CodecDetails(aCodec, profile, level, tier, bitDepth, 1184 monochrome, subsamplingX, subsamplingY, 1185 chromaPosition, colorSpace)); 1186 } 1187 1188 UniquePtr<TrackInfo> CreateTrackInfoWithMIMEType( 1189 const nsACString& aCodecMIMEType) { 1190 UniquePtr<TrackInfo> trackInfo; 1191 if (StartsWith(aCodecMIMEType, "audio/")) { 1192 trackInfo.reset(new AudioInfo()); 1193 trackInfo->mMimeType = aCodecMIMEType; 1194 } else if (StartsWith(aCodecMIMEType, "video/")) { 1195 trackInfo.reset(new VideoInfo()); 1196 trackInfo->mMimeType = aCodecMIMEType; 1197 } 1198 return trackInfo; 1199 } 1200 1201 UniquePtr<TrackInfo> CreateTrackInfoWithMIMETypeAndContainerTypeExtraParameters( 1202 const nsACString& aCodecMIMEType, 1203 const MediaContainerType& aContainerType) { 1204 UniquePtr<TrackInfo> trackInfo = CreateTrackInfoWithMIMEType(aCodecMIMEType); 1205 if (trackInfo) { 1206 VideoInfo* videoInfo = trackInfo->GetAsVideoInfo(); 1207 if (videoInfo) { 1208 Maybe<int32_t> maybeWidth = aContainerType.ExtendedType().GetWidth(); 1209 if (maybeWidth && *maybeWidth > 0) { 1210 videoInfo->mImage.width = *maybeWidth; 1211 videoInfo->mDisplay.width = *maybeWidth; 1212 } 1213 Maybe<int32_t> maybeHeight = aContainerType.ExtendedType().GetHeight(); 1214 if (maybeHeight && *maybeHeight > 0) { 1215 videoInfo->mImage.height = *maybeHeight; 1216 videoInfo->mDisplay.height = *maybeHeight; 1217 } 1218 } else if (trackInfo->GetAsAudioInfo()) { 1219 AudioInfo* audioInfo = trackInfo->GetAsAudioInfo(); 1220 Maybe<int32_t> maybeChannels = 1221 aContainerType.ExtendedType().GetChannels(); 1222 if (maybeChannels && *maybeChannels > 0) { 1223 audioInfo->mChannels = *maybeChannels; 1224 } 1225 Maybe<int32_t> maybeSamplerate = 1226 aContainerType.ExtendedType().GetSamplerate(); 1227 if (maybeSamplerate && *maybeSamplerate > 0) { 1228 audioInfo->mRate = *maybeSamplerate; 1229 } 1230 } 1231 } 1232 return trackInfo; 1233 } 1234 1235 bool OnCellularConnection() { 1236 uint32_t linkType = nsINetworkLinkService::LINK_TYPE_UNKNOWN; 1237 if (XRE_IsContentProcess()) { 1238 mozilla::dom::ContentChild* cpc = 1239 mozilla::dom::ContentChild::GetSingleton(); 1240 if (!cpc) { 1241 NS_WARNING("Can't get ContentChild singleton in content process!"); 1242 return false; 1243 } 1244 linkType = cpc->NetworkLinkType(); 1245 } else { 1246 nsresult rv; 1247 nsCOMPtr<nsINetworkLinkService> nls = 1248 do_GetService(NS_NETWORK_LINK_SERVICE_CONTRACTID, &rv); 1249 if (NS_FAILED(rv)) { 1250 NS_WARNING("Can't get nsINetworkLinkService."); 1251 return false; 1252 } 1253 1254 rv = nls->GetLinkType(&linkType); 1255 if (NS_FAILED(rv)) { 1256 NS_WARNING("Can't get network link type."); 1257 return false; 1258 } 1259 } 1260 1261 switch (linkType) { 1262 case nsINetworkLinkService::LINK_TYPE_UNKNOWN: 1263 case nsINetworkLinkService::LINK_TYPE_ETHERNET: 1264 case nsINetworkLinkService::LINK_TYPE_USB: 1265 case nsINetworkLinkService::LINK_TYPE_WIFI: 1266 default: 1267 return false; 1268 case nsINetworkLinkService::LINK_TYPE_WIMAX: 1269 case nsINetworkLinkService::LINK_TYPE_MOBILE: 1270 return true; 1271 } 1272 } 1273 1274 bool IsWaveMimetype(const nsACString& aMimeType) { 1275 return aMimeType.EqualsLiteral("audio/x-wav") || 1276 aMimeType.EqualsLiteral("audio/wave; codecs=1") || 1277 aMimeType.EqualsLiteral("audio/wave; codecs=3") || 1278 aMimeType.EqualsLiteral("audio/wave; codecs=6") || 1279 aMimeType.EqualsLiteral("audio/wave; codecs=7") || 1280 aMimeType.EqualsLiteral("audio/wave; codecs=65534"); 1281 } 1282 1283 void DetermineResolutionForTelemetry(const MediaInfo& aInfo, 1284 nsCString& aResolutionOut) { 1285 if (aInfo.HasAudio()) { 1286 aResolutionOut.AppendASCII("AV,"); 1287 } else { 1288 aResolutionOut.AppendASCII("V,"); 1289 } 1290 static const struct { 1291 int32_t mH; 1292 const char* mRes; 1293 } sResolutions[] = {{240, "0<h<=240"}, {480, "240<h<=480"}, 1294 {576, "480<h<=576"}, {720, "576<h<=720"}, 1295 {1080, "720<h<=1080"}, {2160, "1080<h<=2160"}}; 1296 const char* resolution = "h>2160"; 1297 int32_t height = aInfo.mVideo.mDisplay.height; 1298 for (const auto& res : sResolutions) { 1299 if (height <= res.mH) { 1300 resolution = res.mRes; 1301 break; 1302 } 1303 } 1304 aResolutionOut.AppendASCII(resolution); 1305 } 1306 1307 bool ContainHardwareCodecsSupported( 1308 const media::MediaCodecsSupported& aSupport) { 1309 return aSupport.contains( 1310 mozilla::media::MediaCodecsSupport::H264HardwareDecode) || 1311 aSupport.contains( 1312 mozilla::media::MediaCodecsSupport::VP8HardwareDecode) || 1313 aSupport.contains( 1314 mozilla::media::MediaCodecsSupport::VP9HardwareDecode) || 1315 aSupport.contains( 1316 mozilla::media::MediaCodecsSupport::AV1HardwareDecode) || 1317 aSupport.contains( 1318 mozilla::media::MediaCodecsSupport::HEVCHardwareDecode); 1319 } 1320 1321 } // end namespace mozilla