MediaEngineRemoteVideoSource.cpp (41590B)
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- 2 * This Source Code Form is subject to the terms of the Mozilla Public 3 * License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 * You can obtain one at http://mozilla.org/MPL/2.0/. */ 5 6 #include "MediaEngineRemoteVideoSource.h" 7 8 #include "CamerasChild.h" 9 #include "ImageContainer.h" 10 #include "MediaManager.h" 11 #include "MediaTrackConstraints.h" 12 #include "PerformanceRecorder.h" 13 #include "VideoSegment.h" 14 #include "common_video/include/video_frame_buffer.h" 15 #include "common_video/libyuv/include/webrtc_libyuv.h" 16 #include "mozilla/ErrorNames.h" 17 #include "mozilla/RefPtr.h" 18 #include "mozilla/dom/MediaTrackCapabilitiesBinding.h" 19 #include "mozilla/dom/MediaTrackSettingsBinding.h" 20 #include "mozilla/gfx/Point.h" 21 22 namespace mozilla { 23 24 extern LazyLogModule gMediaManagerLog; 25 #define LOG(...) MOZ_LOG(gMediaManagerLog, LogLevel::Debug, (__VA_ARGS__)) 26 #define LOG_FRAME(...) \ 27 MOZ_LOG(gMediaManagerLog, LogLevel::Verbose, (__VA_ARGS__)) 28 29 using dom::MediaSourceEnum; 30 using dom::MediaTrackCapabilities; 31 using dom::MediaTrackConstraints; 32 using dom::MediaTrackConstraintSet; 33 using dom::MediaTrackSettings; 34 using dom::VideoFacingModeEnum; 35 using dom::VideoResizeModeEnum; 36 37 /* static */ 38 camera::CaptureEngine MediaEngineRemoteVideoSource::CaptureEngine( 39 MediaSourceEnum aMediaSource) { 40 switch (aMediaSource) { 41 case MediaSourceEnum::Browser: 42 return camera::BrowserEngine; 43 case MediaSourceEnum::Camera: 44 return camera::CameraEngine; 45 case MediaSourceEnum::Screen: 46 return camera::ScreenEngine; 47 case MediaSourceEnum::Window: 48 return camera::WinEngine; 49 default: 50 MOZ_CRASH(); 51 } 52 } 53 54 static Maybe<VideoFacingModeEnum> GetFacingMode(const nsString& aDeviceName) { 55 // Set facing mode based on device name. 56 #if defined(ANDROID) 57 // Names are generated. Example: "Camera 0, Facing back, Orientation 90" 58 // 59 // See media/webrtc/trunk/webrtc/modules/video_capture/android/java/src/org/ 60 // webrtc/videoengine/VideoCaptureDeviceInfoAndroid.java 61 62 if (aDeviceName.Find(u"Facing back"_ns) != kNotFound) { 63 return Some(VideoFacingModeEnum::Environment); 64 } 65 if (aDeviceName.Find(u"Facing front"_ns) != kNotFound) { 66 return Some(VideoFacingModeEnum::User); 67 } 68 #endif // ANDROID 69 #ifdef XP_WIN 70 // The cameras' name of Surface book are "Microsoft Camera Front" and 71 // "Microsoft Camera Rear" respectively. 72 73 if (aDeviceName.Find(u"Front"_ns) != kNotFound) { 74 return Some(VideoFacingModeEnum::User); 75 } 76 if (aDeviceName.Find(u"Rear"_ns) != kNotFound) { 77 return Some(VideoFacingModeEnum::Environment); 78 } 79 #endif // WINDOWS 80 81 return Nothing(); 82 } 83 84 struct DesiredSizeInput { 85 NormalizedConstraints mConstraints; 86 Maybe<bool> mCanCropAndScale; 87 Maybe<int32_t> mCapabilityWidth; 88 Maybe<int32_t> mCapabilityHeight; 89 camera::CaptureEngine mCapEngine; 90 int32_t mInputWidth; 91 int32_t mInputHeight; 92 int32_t mRotation; 93 }; 94 95 static gfx::IntSize CalculateDesiredSize(DesiredSizeInput aInput) { 96 if (!aInput.mCanCropAndScale.valueOr(aInput.mCapEngine != 97 camera::CameraEngine)) { 98 // Don't scale to constraints in resizeMode "none". 99 // If resizeMode is disabled, follow our legacy behavior of downscaling for 100 // screen capture but not for cameras. 101 aInput.mConstraints.mWidth.mIdeal = Nothing(); 102 aInput.mConstraints.mHeight.mIdeal = Nothing(); 103 } 104 105 if (aInput.mRotation == 90 || aInput.mRotation == 270) { 106 // This frame is rotated, so what was negotiated as width is now height, 107 // and vice versa. 108 std::swap(aInput.mConstraints.mWidth, aInput.mConstraints.mHeight); 109 std::swap(aInput.mCapabilityWidth, aInput.mCapabilityWidth); 110 } 111 112 // Account for a shared camera giving us higher resolution than we asked for. 113 const int32_t inputWidth = 114 std::max(2, aInput.mCapabilityWidth.valueOr(aInput.mInputWidth)); 115 const int32_t inputHeight = 116 std::max(2, aInput.mCapabilityHeight.valueOr(aInput.mInputHeight)); 117 118 // This logic works for both camera and screen sharing case. 119 // In VideoResizeModeEnum::None, ideal dimensions are absent. 120 // In screen sharing, min and exact dimensions are forbidden. 121 int32_t dst_width = aInput.mConstraints.mWidth.Get(inputWidth); 122 int32_t dst_height = aInput.mConstraints.mHeight.Get(inputHeight); 123 124 // We must not upscale. 125 dst_width = std::min(dst_width, inputWidth); 126 dst_height = std::min(dst_height, inputHeight); 127 128 if (aInput.mCapEngine != camera::CameraEngine || 129 !aInput.mConstraints.mWidth.mIdeal || 130 !aInput.mConstraints.mHeight.mIdeal) { 131 // Scale down without cropping. 132 // Cropping is not allowed by spec for desktop capture. 133 // For cameras, it only makes sense when not both ideal width and 134 // height are given, assuming they're within min/max constraints. 135 136 // Max constraints decide the envelope. 137 const double scale_width_strict = 138 std::min(1.0, AssertedCast<double>(aInput.mConstraints.mWidth.mMax) / 139 AssertedCast<double>(inputWidth)); 140 const double scale_height_strict = 141 std::min(1.0, AssertedCast<double>(aInput.mConstraints.mHeight.mMax) / 142 AssertedCast<double>(inputHeight)); 143 144 double scale_width = 145 AssertedCast<double>(dst_width) / AssertedCast<double>(inputWidth); 146 double scale_height = 147 AssertedCast<double>(dst_height) / AssertedCast<double>(inputHeight); 148 149 // If both ideal width & ideal height are absent, scale is 1, but 150 // if one is present and the other not, scale precisely to the one present. 151 // If both are present, scale to the smaller one. 152 // This works because the fitness distance for width and height is shortest 153 // where either dimension exactly matches its ideal constraint. 154 // Also adapt to max constraints. 155 double scale = std::min( 156 {scale_width, scale_height, scale_width_strict, scale_height_strict}); 157 158 dst_width = SaturatingCast<int32_t>( 159 std::round(scale * AssertedCast<double>(inputWidth))); 160 dst_height = SaturatingCast<int32_t>( 161 std::round(scale * AssertedCast<double>(inputHeight))); 162 } 163 164 if (aInput.mCapEngine == camera::CameraEngine) { 165 // For cameras we are allowed to crop. Adapt to min constraints. 166 dst_width = aInput.mConstraints.mWidth.Clamp(dst_width); 167 dst_height = aInput.mConstraints.mHeight.Clamp(dst_height); 168 } 169 170 // Ensure width and height are at least two. Smaller frames can lead to 171 // problems with scaling and video encoding. 172 dst_width = std::max(2, dst_width); 173 dst_height = std::max(2, dst_height); 174 175 return {dst_width, dst_height}; 176 } 177 178 MediaEngineRemoteVideoSource::MediaEngineRemoteVideoSource( 179 const MediaDevice* aMediaDevice) 180 : mCapEngine(CaptureEngine(aMediaDevice->mMediaSource)), 181 mTrackingId(CaptureEngineToTrackingSourceStr(mCapEngine), 0), 182 mMutex("MediaEngineRemoteVideoSource::mMutex"), 183 mRescalingBufferPool(/* zero_initialize */ false, 184 /* max_number_of_buffers */ 1), 185 mSettingsUpdatedByFrame(MakeAndAddRef<media::Refcountable<AtomicBool>>()), 186 mSettings(MakeAndAddRef<media::Refcountable<MediaTrackSettings>>()), 187 mTrackCapabilities( 188 MakeAndAddRef<media::Refcountable<MediaTrackCapabilities>>()), 189 mFirstFramePromise(mFirstFramePromiseHolder.Ensure(__func__)), 190 mCalculation(kFitness), 191 mPrefs(MakeUnique<MediaEnginePrefs>()), 192 mMediaDevice(aMediaDevice), 193 mDeviceUUID(NS_ConvertUTF16toUTF8(aMediaDevice->mRawID)) { 194 LOG("%s", __PRETTY_FUNCTION__); 195 if (mCapEngine == camera::CameraEngine) { 196 // Only cameras can have a facing mode. 197 Maybe<VideoFacingModeEnum> facingMode = 198 GetFacingMode(mMediaDevice->mRawName); 199 if (facingMode.isSome()) { 200 mFacingMode.emplace( 201 NS_ConvertASCIItoUTF16(dom::GetEnumString(*facingMode))); 202 } 203 } 204 } 205 206 /*static*/ 207 already_AddRefed<MediaEngineRemoteVideoSource> 208 MediaEngineRemoteVideoSource::CreateFrom( 209 const MediaEngineRemoteVideoSource* aSource, 210 const MediaDevice* aMediaDevice) { 211 auto src = MakeRefPtr<MediaEngineRemoteVideoSource>(aMediaDevice); 212 *static_cast<MediaTrackSettings*>(src->mSettings) = *aSource->mSettings; 213 *static_cast<MediaTrackCapabilities*>(src->mTrackCapabilities) = 214 *aSource->mTrackCapabilities; 215 { 216 MutexAutoLock lock(aSource->mMutex); 217 src->mIncomingImageSize = aSource->mIncomingImageSize; 218 } 219 return src.forget(); 220 } 221 222 MediaEngineRemoteVideoSource::~MediaEngineRemoteVideoSource() { 223 mFirstFramePromiseHolder.RejectIfExists(NS_ERROR_ABORT, __func__); 224 } 225 226 static inline DistanceCalculation ToDistanceCalculation( 227 VideoResizeModeEnum aMode) { 228 switch (aMode) { 229 case VideoResizeModeEnum::None: 230 return kFitness; 231 case VideoResizeModeEnum::Crop_and_scale: 232 return kFeasibility; 233 } 234 MOZ_CRASH("Unexpected resize mode"); 235 } 236 237 static inline const char* ToString(DistanceCalculation aMode) { 238 switch (aMode) { 239 case kFitness: 240 return "kFitness"; 241 case kFeasibility: 242 return "kFeasibility"; 243 } 244 MOZ_CRASH("Unexpected distance calculation"); 245 } 246 247 nsresult MediaEngineRemoteVideoSource::Allocate( 248 const MediaTrackConstraints& aConstraints, const MediaEnginePrefs& aPrefs, 249 uint64_t aWindowID, const char** aOutBadConstraint) { 250 LOG("%s", __PRETTY_FUNCTION__); 251 AssertIsOnOwningThread(); 252 253 MOZ_ASSERT(mState == kReleased); 254 255 NormalizedConstraints c(aConstraints); 256 const auto resizeMode = MediaConstraintsHelper::GetResizeMode(c, aPrefs); 257 const auto distanceMode = 258 resizeMode.map(&ToDistanceCalculation).valueOr(kFitness); 259 webrtc::CaptureCapability newCapability; 260 LOG("ChooseCapability(%s) for mCapability (Allocate) ++", 261 ToString(distanceMode)); 262 if (!ChooseCapability(c, aPrefs, newCapability, distanceMode, 263 aOutBadConstraint)) { 264 if (aOutBadConstraint && !*aOutBadConstraint) { 265 *aOutBadConstraint = 266 MediaConstraintsHelper::FindBadConstraint(c, aPrefs, mMediaDevice); 267 } 268 return NS_ERROR_FAILURE; 269 } 270 LOG("ChooseCapability(%s) for mCapability (Allocate) --", 271 ToString(distanceMode)); 272 273 mCaptureId = 274 camera::GetChildAndCall(&camera::CamerasChild::AllocateCapture, 275 mCapEngine, mDeviceUUID.get(), aWindowID); 276 if (mCaptureId < 0) { 277 return NS_ERROR_FAILURE; 278 } 279 280 DesiredSizeInput input{}; 281 double framerate = 0.0; 282 { 283 MutexAutoLock lock(mMutex); 284 mState = kAllocated; 285 mCapability = newCapability; 286 mCalculation = distanceMode; 287 mConstraints = Some(c); 288 *mPrefs = aPrefs; 289 mTrackingId = 290 TrackingId(CaptureEngineToTrackingSourceStr(mCapEngine), mCaptureId); 291 const int32_t& cw = mCapability.width; 292 const int32_t& ch = mCapability.height; 293 const double maxFPS = AssertedCast<double>(mCapability.maxFPS); 294 input = { 295 .mConstraints = c, 296 .mCanCropAndScale = resizeMode.map([](auto aRM) { 297 return aRM == dom::VideoResizeModeEnum::Crop_and_scale; 298 }), 299 .mCapabilityWidth = cw ? Some(cw) : Nothing(), 300 .mCapabilityHeight = ch ? Some(ch) : Nothing(), 301 .mCapEngine = mCapEngine, 302 .mInputWidth = cw ? cw : mIncomingImageSize.width, 303 .mInputHeight = ch ? ch : mIncomingImageSize.height, 304 .mRotation = 0, 305 }; 306 framerate = input.mCanCropAndScale.valueOr(false) 307 ? std::min(mConstraints->mFrameRate.Get(maxFPS), maxFPS) 308 : maxFPS; 309 } 310 311 auto dstSize = CalculateDesiredSize(input); 312 313 NS_DispatchToMainThread(NS_NewRunnableFunction( 314 "MediaEngineRemoteVideoSource::Allocate::MainUpdate", 315 [settings = mSettings, caps = mTrackCapabilities, dstSize, framerate, 316 facingMode = mFacingMode, resizeMode]() { 317 *settings = dom::MediaTrackSettings(); 318 319 settings->mWidth.Construct(dstSize.width); 320 settings->mHeight.Construct(dstSize.height); 321 settings->mFrameRate.Construct(framerate); 322 323 caps->mFacingMode.Reset(); 324 if (facingMode) { 325 settings->mFacingMode.Construct(*facingMode); 326 caps->mFacingMode.Construct(nsTArray{*facingMode}); 327 } 328 329 caps->mResizeMode.Reset(); 330 if (resizeMode) { 331 nsString noneString, cropString; 332 noneString.AssignASCII(dom::GetEnumString(VideoResizeModeEnum::None)); 333 cropString.AssignASCII( 334 dom::GetEnumString(VideoResizeModeEnum::Crop_and_scale)); 335 settings->mResizeMode.Construct( 336 *resizeMode == VideoResizeModeEnum::Crop_and_scale ? cropString 337 : noneString); 338 caps->mResizeMode.Construct(nsTArray{noneString, cropString}); 339 } 340 })); 341 342 LOG("Video device %d allocated", mCaptureId); 343 return NS_OK; 344 } 345 346 nsresult MediaEngineRemoteVideoSource::Deallocate() { 347 LOG("%s", __PRETTY_FUNCTION__); 348 AssertIsOnOwningThread(); 349 350 MOZ_ASSERT(mState == kStopped || mState == kAllocated); 351 352 if (mTrack) { 353 mTrack->End(); 354 } 355 356 { 357 MutexAutoLock lock(mMutex); 358 359 mTrack = nullptr; 360 mPrincipal = PRINCIPAL_HANDLE_NONE; 361 mState = kReleased; 362 } 363 364 // Stop() has stopped capture synchronously on the media thread before we get 365 // here, so there are no longer any callbacks on an IPC thread accessing 366 // mImageContainer or mRescalingBufferPool. 367 mImageContainer = nullptr; 368 mRescalingBufferPool.Release(); 369 370 LOG("Video device %d deallocated", mCaptureId); 371 372 int error = camera::GetChildAndCall(&camera::CamerasChild::ReleaseCapture, 373 mCapEngine, mCaptureId); 374 375 if (error == camera::kSuccess) { 376 return NS_OK; 377 } 378 379 if (error == camera::kIpcError) { 380 // Failure can occur when the parent process is shutting down, and the IPC 381 // channel is down. We still consider the capturer deallocated in this 382 // case, since it cannot deliver frames without the IPC channel open. 383 return NS_OK; 384 } 385 386 MOZ_ASSERT(error == camera::kError); 387 return NS_ERROR_FAILURE; 388 } 389 390 void MediaEngineRemoteVideoSource::SetTrack(const RefPtr<MediaTrack>& aTrack, 391 const PrincipalHandle& aPrincipal) { 392 LOG("%s", __PRETTY_FUNCTION__); 393 AssertIsOnOwningThread(); 394 395 MOZ_ASSERT(mState == kAllocated); 396 MOZ_ASSERT(!mTrack); 397 MOZ_ASSERT(aTrack); 398 MOZ_ASSERT(aTrack->AsSourceTrack()); 399 400 if (!mImageContainer) { 401 mImageContainer = MakeAndAddRef<layers::ImageContainer>( 402 layers::ImageUsageType::Webrtc, layers::ImageContainer::ASYNCHRONOUS); 403 } 404 405 { 406 MutexAutoLock lock(mMutex); 407 mTrack = aTrack->AsSourceTrack(); 408 mPrincipal = aPrincipal; 409 } 410 } 411 412 nsresult MediaEngineRemoteVideoSource::Start() { 413 LOG("%s", __PRETTY_FUNCTION__); 414 AssertIsOnOwningThread(); 415 416 MOZ_ASSERT(mState == kAllocated || mState == kStarted || mState == kStopped); 417 MOZ_ASSERT(mTrack); 418 419 NormalizedConstraints constraints; 420 { 421 MutexAutoLock lock(mMutex); 422 mState = kStarted; 423 constraints = *mConstraints; 424 } 425 426 // Tell CamerasParent what resizeMode was selected, as it doesn't have access 427 // to MediaEnginePrefs. 428 const auto resizeMode = mCalculation == kFeasibility 429 ? VideoResizeModeEnum::Crop_and_scale 430 : VideoResizeModeEnum::None; 431 const auto resizeModeString = 432 NS_ConvertASCIItoUTF16(dom::GetEnumString(resizeMode)); 433 constraints.mResizeMode.mIdeal.clear(); 434 constraints.mResizeMode.mIdeal.insert(resizeModeString); 435 436 nsresult rv = StartCapture(constraints, resizeMode); 437 mSettingsUpdatedByFrame->mValue = false; 438 return rv; 439 } 440 441 nsresult MediaEngineRemoteVideoSource::StartCapture( 442 const NormalizedConstraints& aConstraints, 443 const dom::VideoResizeModeEnum& aResizeMode) { 444 LOG("%s", __PRETTY_FUNCTION__); 445 AssertIsOnOwningThread(); 446 447 MOZ_ASSERT(mState == kStarted); 448 449 if (camera::GetChildAndCall(&camera::CamerasChild::StartCapture, mCapEngine, 450 mCaptureId, mCapability, aConstraints, 451 aResizeMode, this)) { 452 LOG("StartCapture failed"); 453 MutexAutoLock lock(mMutex); 454 mState = kStopped; 455 return NS_ERROR_FAILURE; 456 } 457 458 return NS_OK; 459 } 460 461 nsresult MediaEngineRemoteVideoSource::FocusOnSelectedSource() { 462 LOG("%s", __PRETTY_FUNCTION__); 463 AssertIsOnOwningThread(); 464 465 int result; 466 result = camera::GetChildAndCall(&camera::CamerasChild::FocusOnSelectedSource, 467 mCapEngine, mCaptureId); 468 return result == 0 ? NS_OK : NS_ERROR_FAILURE; 469 } 470 471 nsresult MediaEngineRemoteVideoSource::Stop() { 472 LOG("%s", __PRETTY_FUNCTION__); 473 AssertIsOnOwningThread(); 474 475 if (mState == kStopped || mState == kAllocated) { 476 return NS_OK; 477 } 478 479 MOZ_ASSERT(mState == kStarted); 480 481 int error = camera::GetChildAndCall(&camera::CamerasChild::StopCapture, 482 mCapEngine, mCaptureId); 483 484 if (error == camera::kError) { 485 // CamerasParent replied with error. The capturer is still running. 486 return NS_ERROR_FAILURE; 487 } 488 489 { 490 MutexAutoLock lock(mMutex); 491 mState = kStopped; 492 } 493 494 if (error == camera::kSuccess) { 495 return NS_OK; 496 } 497 498 MOZ_ASSERT(error == camera::kIpcError); 499 // Failure can occur when the parent process is shutting down, and the IPC 500 // channel is down. We still consider the capturer stopped in this case, 501 // since it cannot deliver frames without the IPC channel open. 502 return NS_ERROR_FAILURE; 503 } 504 505 nsresult MediaEngineRemoteVideoSource::Reconfigure( 506 const MediaTrackConstraints& aConstraints, const MediaEnginePrefs& aPrefs, 507 const char** aOutBadConstraint) { 508 LOG("%s", __PRETTY_FUNCTION__); 509 AssertIsOnOwningThread(); 510 511 NormalizedConstraints c(aConstraints); 512 const auto resizeMode = MediaConstraintsHelper::GetResizeMode(c, aPrefs); 513 const auto distanceMode = 514 resizeMode.map(&ToDistanceCalculation).valueOr(kFitness); 515 webrtc::CaptureCapability newCapability; 516 LOG("ChooseCapability(%s) for mTargetCapability (Reconfigure) ++", 517 ToString(distanceMode)); 518 if (!ChooseCapability(c, aPrefs, newCapability, distanceMode, 519 aOutBadConstraint)) { 520 if (aOutBadConstraint && !*aOutBadConstraint) { 521 *aOutBadConstraint = 522 MediaConstraintsHelper::FindBadConstraint(c, aPrefs, mMediaDevice); 523 } 524 return NS_ERROR_INVALID_ARG; 525 } 526 LOG("ChooseCapability(%s) for mTargetCapability (Reconfigure) --", 527 ToString(distanceMode)); 528 529 bool needsRestart{}; 530 DesiredSizeInput input{}; 531 double framerate = 0.0; 532 { 533 MutexAutoLock lock(mMutex); 534 535 needsRestart = mCapability != newCapability || mConstraints != Some(c) || 536 !(*mPrefs == aPrefs); 537 538 // StartCapture() applies mCapability on the device. 539 mCapability = newCapability; 540 mCalculation = distanceMode; 541 mConstraints = Some(c); 542 *mPrefs = aPrefs; 543 const int32_t& cw = mCapability.width; 544 const int32_t& ch = mCapability.height; 545 input = { 546 .mConstraints = c, 547 .mCanCropAndScale = resizeMode.map([](auto aRM) { 548 return aRM == dom::VideoResizeModeEnum::Crop_and_scale; 549 }), 550 .mCapabilityWidth = cw ? Some(cw) : Nothing(), 551 .mCapabilityHeight = ch ? Some(ch) : Nothing(), 552 .mCapEngine = mCapEngine, 553 .mInputWidth = cw ? cw : mIncomingImageSize.width, 554 .mInputHeight = ch ? ch : mIncomingImageSize.height, 555 .mRotation = 0, 556 }; 557 framerate = distanceMode == kFeasibility 558 ? std::min(mConstraints->mFrameRate.Get(mCapability.maxFPS), 559 AssertedCast<double>(mCapability.maxFPS)) 560 : mCapability.maxFPS; 561 } 562 563 if (mState == kStarted && needsRestart) { 564 nsresult rv = 565 StartCapture(c, resizeMode.valueOr(dom::VideoResizeModeEnum::None)); 566 if (NS_WARN_IF(NS_FAILED(rv))) { 567 nsAutoCString name; 568 GetErrorName(rv, name); 569 LOG("Video source %p for video device %d Reconfigure() failed " 570 "unexpectedly in Start(). rv=%s", 571 this, mCaptureId, name.Data()); 572 return NS_ERROR_UNEXPECTED; 573 } 574 } 575 576 mSettingsUpdatedByFrame->mValue = false; 577 gfx::IntSize dstSize = CalculateDesiredSize(input); 578 NS_DispatchToMainThread(NS_NewRunnableFunction( 579 __func__, 580 [settings = mSettings, updated = mSettingsUpdatedByFrame, dstSize, 581 framerate, resizeModeEnabled = mPrefs->mResizeModeEnabled, 582 distanceMode]() mutable { 583 const bool cropAndScale = distanceMode == kFeasibility; 584 if (!updated->mValue) { 585 settings->mWidth.Value() = dstSize.width; 586 settings->mHeight.Value() = dstSize.height; 587 } 588 settings->mFrameRate.Value() = framerate; 589 if (resizeModeEnabled) { 590 auto resizeMode = cropAndScale ? VideoResizeModeEnum::Crop_and_scale 591 : VideoResizeModeEnum::None; 592 settings->mResizeMode.Reset(); 593 settings->mResizeMode.Construct( 594 NS_ConvertASCIItoUTF16(dom::GetEnumString(resizeMode))); 595 } 596 })); 597 598 return NS_OK; 599 } 600 601 size_t MediaEngineRemoteVideoSource::NumCapabilities() const { 602 AssertIsOnOwningThread(); 603 604 if (!mCapabilities.IsEmpty()) { 605 return mCapabilities.Length(); 606 } 607 608 int num = camera::GetChildAndCall(&camera::CamerasChild::NumberOfCapabilities, 609 mCapEngine, mDeviceUUID.get()); 610 if (num > 0) { 611 mCapabilities.SetLength(num); 612 } else { 613 // The default for devices that don't return discrete capabilities: treat 614 // them as supporting all capabilities orthogonally. E.g. screensharing. 615 // CaptureCapability defaults key values to 0, which means accept any value. 616 mCapabilities.AppendElement(MakeUnique<webrtc::CaptureCapability>()); 617 mCapabilitiesAreHardcoded = true; 618 } 619 620 return mCapabilities.Length(); 621 } 622 623 webrtc::CaptureCapability& MediaEngineRemoteVideoSource::GetCapability( 624 size_t aIndex) const { 625 AssertIsOnOwningThread(); 626 MOZ_RELEASE_ASSERT(aIndex < mCapabilities.Length()); 627 if (!mCapabilities[aIndex]) { 628 mCapabilities[aIndex] = MakeUnique<webrtc::CaptureCapability>(); 629 camera::GetChildAndCall(&camera::CamerasChild::GetCaptureCapability, 630 mCapEngine, mDeviceUUID.get(), aIndex, 631 mCapabilities[aIndex].get()); 632 } 633 return *mCapabilities[aIndex]; 634 } 635 636 const TrackingId& MediaEngineRemoteVideoSource::GetTrackingId() const { 637 AssertIsOnOwningThread(); 638 MOZ_ASSERT(mState != kReleased); 639 return mTrackingId; 640 } 641 642 void MediaEngineRemoteVideoSource::OnCaptureEnded() { 643 mFirstFramePromiseHolder.RejectIfExists(NS_ERROR_UNEXPECTED, __func__); 644 mCaptureEndedEvent.Notify(); 645 } 646 647 int MediaEngineRemoteVideoSource::DeliverFrame( 648 uint8_t* aBuffer, const camera::VideoFrameProperties& aProps) { 649 // Cameras IPC thread - take great care with accessing members! 650 651 DesiredSizeInput input{}; 652 { 653 MutexAutoLock lock(mMutex); 654 MOZ_ASSERT(mState == kStarted); 655 mIncomingImageSize = {aProps.width(), aProps.height()}; 656 const int32_t& cw = mCapability.width; 657 const int32_t& ch = mCapability.height; 658 659 input = { 660 .mConstraints = *mConstraints, 661 .mCanCropAndScale = mPrefs->mResizeModeEnabled 662 ? Some(mCalculation == kFeasibility) 663 : Nothing(), 664 .mCapabilityWidth = cw ? Some(cw) : Nothing(), 665 .mCapabilityHeight = ch ? Some(ch) : Nothing(), 666 .mCapEngine = mCapEngine, 667 .mInputWidth = aProps.width(), 668 .mInputHeight = aProps.height(), 669 .mRotation = aProps.rotation(), 670 }; 671 if (!mFrameDeliveringTrackingId) { 672 mFrameDeliveringTrackingId = Some(mTrackingId); 673 } 674 } 675 676 gfx::IntSize dstSize = CalculateDesiredSize(input); 677 678 std::function<void()> callback_unused = []() {}; 679 webrtc::scoped_refptr<webrtc::I420BufferInterface> buffer = 680 webrtc::WrapI420Buffer( 681 aProps.width(), aProps.height(), aBuffer, aProps.yStride(), 682 aBuffer + aProps.yAllocatedSize(), aProps.uStride(), 683 aBuffer + aProps.yAllocatedSize() + aProps.uAllocatedSize(), 684 aProps.vStride(), callback_unused); 685 686 if ((dstSize.width != aProps.width() || dstSize.height != aProps.height()) && 687 dstSize.width <= aProps.width() && dstSize.height <= aProps.height()) { 688 PerformanceRecorder<CopyVideoStage> rec("MERVS::CropAndScale"_ns, 689 *mFrameDeliveringTrackingId, 690 dstSize.width, dstSize.height); 691 // Destination resolution is smaller than source buffer. We'll rescale. 692 webrtc::scoped_refptr<webrtc::I420Buffer> scaledBuffer = 693 mRescalingBufferPool.CreateI420Buffer(dstSize.width, dstSize.height); 694 if (!scaledBuffer) { 695 MOZ_ASSERT_UNREACHABLE( 696 "We might fail to allocate a buffer, but with this " 697 "being a recycling pool that shouldn't happen"); 698 return 0; 699 } 700 scaledBuffer->CropAndScaleFrom(*buffer); 701 buffer = scaledBuffer; 702 rec.Record(); 703 } 704 705 layers::PlanarYCbCrData data; 706 data.mYChannel = const_cast<uint8_t*>(buffer->DataY()); 707 data.mYStride = buffer->StrideY(); 708 MOZ_ASSERT(buffer->StrideU() == buffer->StrideV()); 709 data.mCbCrStride = buffer->StrideU(); 710 data.mCbChannel = const_cast<uint8_t*>(buffer->DataU()); 711 data.mCrChannel = const_cast<uint8_t*>(buffer->DataV()); 712 data.mPictureRect = gfx::IntRect(0, 0, buffer->width(), buffer->height()); 713 data.mYUVColorSpace = gfx::YUVColorSpace::BT601; 714 data.mChromaSubsampling = gfx::ChromaSubsampling::HALF_WIDTH_AND_HEIGHT; 715 716 RefPtr<layers::PlanarYCbCrImage> image; 717 { 718 PerformanceRecorder<CopyVideoStage> rec("MERVS::Copy"_ns, 719 *mFrameDeliveringTrackingId, 720 dstSize.width, dstSize.height); 721 image = mImageContainer->CreatePlanarYCbCrImage(); 722 if (NS_FAILED(image->CopyData(data))) { 723 MOZ_ASSERT_UNREACHABLE( 724 "We might fail to allocate a buffer, but with this " 725 "being a recycling container that shouldn't happen"); 726 return 0; 727 } 728 rec.Record(); 729 } 730 731 #ifdef DEBUG 732 static uint32_t frame_num = 0; 733 LOG_FRAME( 734 "frame %d (%dx%d)->(%dx%d); rotation %d, rtpTimeStamp %u, ntpTimeMs " 735 "%" PRIu64 ", renderTimeMs %" PRIu64, 736 frame_num++, aProps.width(), aProps.height(), dstSize.width, 737 dstSize.height, aProps.rotation(), aProps.rtpTimeStamp(), 738 aProps.ntpTimeMs(), aProps.renderTimeMs()); 739 #endif 740 741 if (mScaledImageSize != dstSize) { 742 NS_DispatchToMainThread(NS_NewRunnableFunction( 743 "MediaEngineRemoteVideoSource::FrameSizeChange", 744 [settings = mSettings, updated = mSettingsUpdatedByFrame, 745 holder = std::move(mFirstFramePromiseHolder), dstSize]() mutable { 746 settings->mWidth.Value() = dstSize.width; 747 settings->mHeight.Value() = dstSize.height; 748 updated->mValue = true; 749 // Since mImageSize was initialized to (0,0), we end up here on the 750 // arrival of the first frame. We resolve the promise representing 751 // arrival of first frame, after correct settings values have been 752 // made available (Resolve() is idempotent if already resolved). 753 holder.ResolveIfExists(true, __func__); 754 })); 755 } 756 757 { 758 MutexAutoLock lock(mMutex); 759 MOZ_ASSERT(mState == kStarted); 760 VideoSegment segment; 761 mScaledImageSize = image->GetSize(); 762 segment.AppendWebrtcLocalFrame(image.forget(), mScaledImageSize, mPrincipal, 763 /* aForceBlack */ false, TimeStamp::Now(), 764 aProps.captureTime()); 765 mTrack->AppendData(&segment); 766 } 767 768 return 0; 769 } 770 771 uint32_t MediaEngineRemoteVideoSource::GetDistance( 772 const webrtc::CaptureCapability& aCandidate, 773 const NormalizedConstraintSet& aConstraints, 774 const DistanceCalculation aCalculate) const { 775 if (aCalculate == kFeasibility) { 776 return GetFeasibilityDistance(aCandidate, aConstraints); 777 } 778 return GetFitnessDistance(aCandidate, aConstraints); 779 } 780 781 uint32_t MediaEngineRemoteVideoSource::GetFitnessDistance( 782 const webrtc::CaptureCapability& aCandidate, 783 const NormalizedConstraintSet& aConstraints) const { 784 AssertIsOnOwningThread(); 785 786 // Treat width|height|frameRate == 0 on capability as "can do any". 787 // This allows for orthogonal capabilities that are not in discrete steps. 788 789 typedef MediaConstraintsHelper H; 790 uint64_t distance = 791 uint64_t(H::FitnessDistance(mFacingMode, aConstraints.mFacingMode)) + 792 uint64_t(aCandidate.width ? H::FitnessDistance(int32_t(aCandidate.width), 793 aConstraints.mWidth) 794 : 0) + 795 uint64_t(aCandidate.height 796 ? H::FitnessDistance(int32_t(aCandidate.height), 797 aConstraints.mHeight) 798 : 0) + 799 uint64_t(aCandidate.maxFPS ? H::FitnessDistance(double(aCandidate.maxFPS), 800 aConstraints.mFrameRate) 801 : 0); 802 return uint32_t(std::min(distance, uint64_t(UINT32_MAX))); 803 } 804 805 uint32_t MediaEngineRemoteVideoSource::GetFeasibilityDistance( 806 const webrtc::CaptureCapability& aCandidate, 807 const NormalizedConstraintSet& aConstraints) const { 808 AssertIsOnOwningThread(); 809 810 // Treat width|height|frameRate == 0 on capability as "can do any". 811 // This allows for orthogonal capabilities that are not in discrete steps. 812 813 typedef MediaConstraintsHelper H; 814 uint64_t distance = 815 uint64_t(H::FitnessDistance(mFacingMode, aConstraints.mFacingMode)) + 816 uint64_t(aCandidate.width 817 ? H::FeasibilityDistance(int32_t(aCandidate.width), 818 aConstraints.mWidth) 819 : 0) + 820 uint64_t(aCandidate.height 821 ? H::FeasibilityDistance(int32_t(aCandidate.height), 822 aConstraints.mHeight) 823 : 0) + 824 uint64_t(aCandidate.maxFPS 825 ? H::FeasibilityDistance(double(aCandidate.maxFPS), 826 aConstraints.mFrameRate) 827 : 0); 828 return uint32_t(std::min(distance, uint64_t(UINT32_MAX))); 829 } 830 831 // Find best capability by removing inferiors. May leave >1 of equal distance 832 833 /* static */ 834 void MediaEngineRemoteVideoSource::TrimLessFitCandidates( 835 nsTArray<CapabilityCandidate>& aSet) { 836 uint32_t best = UINT32_MAX; 837 for (auto& candidate : aSet) { 838 if (best > candidate.mDistance) { 839 best = candidate.mDistance; 840 } 841 } 842 aSet.RemoveElementsBy( 843 [best](const auto& set) { return set.mDistance > best; }); 844 MOZ_ASSERT(aSet.Length()); 845 } 846 847 uint32_t MediaEngineRemoteVideoSource::GetBestFitnessDistance( 848 const nsTArray<const NormalizedConstraintSet*>& aConstraintSets, 849 const MediaEnginePrefs& aPrefs) const { 850 AssertIsOnOwningThread(); 851 852 size_t num = NumCapabilities(); 853 nsTArray<CapabilityCandidate> candidateSet; 854 for (size_t i = 0; i < num; i++) { 855 candidateSet.AppendElement(CapabilityCandidate(GetCapability(i))); 856 } 857 858 bool first = true; 859 for (const NormalizedConstraintSet* ns : aConstraintSets) { 860 auto mode = MediaConstraintsHelper::GetResizeMode(*ns, aPrefs) 861 .map(&ToDistanceCalculation) 862 .valueOr(kFitness); 863 for (size_t i = 0; i < candidateSet.Length();) { 864 auto& candidate = candidateSet[i]; 865 uint32_t distance = GetDistance(candidate.mCapability, *ns, mode); 866 if (distance == UINT32_MAX) { 867 candidateSet.RemoveElementAt(i); 868 } else { 869 ++i; 870 if (first) { 871 candidate.mDistance = distance; 872 } 873 } 874 } 875 first = false; 876 } 877 if (!candidateSet.Length()) { 878 return UINT32_MAX; 879 } 880 TrimLessFitCandidates(candidateSet); 881 return candidateSet[0].mDistance; 882 } 883 884 static const char* ConvertVideoTypeToCStr(webrtc::VideoType aType) { 885 switch (aType) { 886 case webrtc::VideoType::kI420: 887 return "I420"; 888 case webrtc::VideoType::kIYUV: 889 case webrtc::VideoType::kYV12: 890 return "YV12"; 891 case webrtc::VideoType::kRGB24: 892 return "24BG"; 893 case webrtc::VideoType::kABGR: 894 return "ABGR"; 895 case webrtc::VideoType::kARGB: 896 return "ARGB"; 897 case webrtc::VideoType::kARGB4444: 898 return "R444"; 899 case webrtc::VideoType::kRGB565: 900 return "RGBP"; 901 case webrtc::VideoType::kARGB1555: 902 return "RGBO"; 903 case webrtc::VideoType::kYUY2: 904 return "YUY2"; 905 case webrtc::VideoType::kUYVY: 906 return "UYVY"; 907 case webrtc::VideoType::kMJPEG: 908 return "MJPG"; 909 case webrtc::VideoType::kNV21: 910 return "NV21"; 911 case webrtc::VideoType::kNV12: 912 return "NV12"; 913 case webrtc::VideoType::kBGRA: 914 return "BGRA"; 915 case webrtc::VideoType::kUnknown: 916 default: 917 return "unknown"; 918 } 919 } 920 921 static void LogCapability(const char* aHeader, 922 const webrtc::CaptureCapability& aCapability, 923 uint32_t aDistance) { 924 LOG("%s: %4u x %4u x %2u maxFps, %s. Distance = %" PRIu32, aHeader, 925 aCapability.width, aCapability.height, aCapability.maxFPS, 926 ConvertVideoTypeToCStr(aCapability.videoType), aDistance); 927 } 928 929 bool MediaEngineRemoteVideoSource::ChooseCapability( 930 const NormalizedConstraints& aConstraints, const MediaEnginePrefs& aPrefs, 931 webrtc::CaptureCapability& aCapability, 932 const DistanceCalculation aCalculate, const char** aOutBadConstraint) { 933 LOG("%s", __PRETTY_FUNCTION__); 934 AssertIsOnOwningThread(); 935 936 if (MOZ_LOG_TEST(gMediaManagerLog, LogLevel::Debug)) { 937 LOG("ChooseCapability: prefs: %dx%d @%dfps", aPrefs.GetWidth(), 938 aPrefs.GetHeight(), aPrefs.mFPS); 939 MediaConstraintsHelper::LogConstraints(aConstraints); 940 if (!aConstraints.mAdvanced.empty()) { 941 LOG("Advanced array[%zu]:", aConstraints.mAdvanced.size()); 942 for (const auto& advanced : aConstraints.mAdvanced) { 943 MediaConstraintsHelper::LogConstraints(advanced); 944 } 945 } 946 } 947 948 switch (mCapEngine) { 949 case camera::ScreenEngine: 950 case camera::WinEngine: 951 case camera::BrowserEngine: { 952 MOZ_ASSERT_IF(aOutBadConstraint, !*aOutBadConstraint); 953 FlattenedConstraints c(aConstraints); 954 const auto checkConstraint = [](const auto& aConstraint) { 955 return aConstraint.mMin <= aConstraint.mMax && aConstraint.mMax > 0; 956 }; 957 if (!checkConstraint(c.mWidth)) { 958 if (aOutBadConstraint) { 959 *aOutBadConstraint = "width"; 960 } 961 return false; 962 } 963 if (!checkConstraint(c.mHeight)) { 964 if (aOutBadConstraint) { 965 *aOutBadConstraint = "height"; 966 } 967 return false; 968 } 969 if (!checkConstraint(c.mFrameRate)) { 970 if (aOutBadConstraint) { 971 *aOutBadConstraint = "frameRate"; 972 } 973 return false; 974 } 975 // DesktopCaptureImpl polls for frames and so must know the framerate to 976 // capture at. This is signaled through CamerasParent as the capability's 977 // maxFPS. Note that DesktopCaptureImpl does not expose any capabilities. 978 constexpr int32_t probablyNativeFramerate = 60; 979 constexpr int32_t cap = 120; 980 const int32_t constrainedFramerate = 981 SaturatingCast<int32_t>(std::lround(c.mFrameRate.Get(aPrefs.mFPS))); 982 aCapability.maxFPS = 983 aCalculate == kFeasibility 984 ? std::min(constrainedFramerate, cap) 985 : std::clamp(constrainedFramerate, probablyNativeFramerate, cap); 986 return true; 987 } 988 default: 989 break; 990 } 991 992 nsTArray<CapabilityCandidate> candidateSet; 993 size_t num = NumCapabilities(); 994 int32_t maxHeight = 0, maxWidth = 0, maxFps = 0; 995 for (size_t i = 0; i < num; i++) { 996 auto capability = GetCapability(i); 997 if (capability.height > maxHeight) { 998 maxHeight = capability.height; 999 } 1000 if (capability.width > maxWidth) { 1001 maxWidth = capability.width; 1002 } 1003 if (capability.maxFPS > maxFps) { 1004 maxFps = capability.maxFPS; 1005 } 1006 candidateSet.AppendElement(CapabilityCandidate(capability)); 1007 } 1008 1009 NS_DispatchToMainThread(NS_NewRunnableFunction( 1010 "MediaEngineRemoteVideoSource::ChooseCapability", 1011 [capabilities = mTrackCapabilities, maxHeight, maxWidth, 1012 maxFps]() mutable { 1013 dom::ULongRange widthRange; 1014 widthRange.mMax.Construct(maxWidth); 1015 widthRange.mMin.Construct(2); 1016 capabilities->mWidth.Reset(); 1017 capabilities->mWidth.Construct(widthRange); 1018 1019 dom::ULongRange heightRange; 1020 heightRange.mMax.Construct(maxHeight); 1021 heightRange.mMin.Construct(2); 1022 capabilities->mHeight.Reset(); 1023 capabilities->mHeight.Construct(heightRange); 1024 1025 dom::DoubleRange frameRateRange; 1026 frameRateRange.mMax.Construct(maxFps); 1027 frameRateRange.mMin.Construct(0); 1028 capabilities->mFrameRate.Reset(); 1029 capabilities->mFrameRate.Construct(frameRateRange); 1030 })); 1031 1032 if (mCapabilitiesAreHardcoded && mCapEngine == camera::CameraEngine) { 1033 // We have a hardcoded capability, which means this camera didn't report 1034 // discrete capabilities. It might still allow a ranged capability, so we 1035 // add a couple of default candidates based on prefs and constraints. 1036 // The chosen candidate will be propagated to StartCapture() which will fail 1037 // for an invalid candidate. 1038 MOZ_DIAGNOSTIC_ASSERT(mCapabilities.Length() == 1); 1039 MOZ_DIAGNOSTIC_ASSERT(candidateSet.Length() == 1); 1040 candidateSet.Clear(); 1041 1042 FlattenedConstraints c(aConstraints); 1043 // Reuse the code across both the low-definition (`false`) pref and 1044 // the high-definition (`true`) pref. 1045 // If there are constraints we try to satisfy them but we default to prefs. 1046 // Note that since constraints are from content and can literally be 1047 // anything we put (rather generous) caps on them. 1048 for (bool isHd : {false, true}) { 1049 webrtc::CaptureCapability cap; 1050 int32_t prefWidth = aPrefs.GetWidth(isHd); 1051 int32_t prefHeight = aPrefs.GetHeight(isHd); 1052 1053 cap.width = c.mWidth.Get(prefWidth); 1054 cap.width = std::clamp(cap.width, 0, 7680); 1055 1056 cap.height = c.mHeight.Get(prefHeight); 1057 cap.height = std::clamp(cap.height, 0, 4320); 1058 1059 cap.maxFPS = 1060 SaturatingCast<int32_t>(std::lround(c.mFrameRate.Get(aPrefs.mFPS))); 1061 cap.maxFPS = std::clamp(cap.maxFPS, 0, 480); 1062 1063 if (cap.width != prefWidth) { 1064 // Width was affected by constraints. 1065 // We'll adjust the height too so the aspect ratio is retained. 1066 cap.height = cap.width * prefHeight / prefWidth; 1067 } else if (cap.height != prefHeight) { 1068 // Height was affected by constraints but not width. 1069 // We'll adjust the width too so the aspect ratio is retained. 1070 cap.width = cap.height * prefWidth / prefHeight; 1071 } 1072 1073 if (candidateSet.Contains(cap, CapabilityComparator())) { 1074 continue; 1075 } 1076 LogCapability("Hardcoded capability", cap, 0); 1077 candidateSet.AppendElement(cap); 1078 } 1079 } 1080 1081 // First, filter capabilities by required constraints (min, max, exact). 1082 1083 for (size_t i = 0; i < candidateSet.Length();) { 1084 auto& candidate = candidateSet[i]; 1085 candidate.mDistance = 1086 GetDistance(candidate.mCapability, aConstraints, aCalculate); 1087 LogCapability("Capability", candidate.mCapability, candidate.mDistance); 1088 if (candidate.mDistance == UINT32_MAX) { 1089 candidateSet.RemoveElementAt(i); 1090 } else { 1091 ++i; 1092 } 1093 } 1094 1095 if (candidateSet.IsEmpty()) { 1096 LOG("failed to find capability match from %zu choices", 1097 candidateSet.Length()); 1098 return false; 1099 } 1100 1101 // Filter further with all advanced constraints (that don't overconstrain). 1102 1103 for (const auto& cs : aConstraints.mAdvanced) { 1104 nsTArray<CapabilityCandidate> rejects; 1105 for (size_t i = 0; i < candidateSet.Length();) { 1106 if (GetDistance(candidateSet[i].mCapability, cs, aCalculate) == 1107 UINT32_MAX) { 1108 rejects.AppendElement(candidateSet[i]); 1109 candidateSet.RemoveElementAt(i); 1110 } else { 1111 ++i; 1112 } 1113 } 1114 if (!candidateSet.Length()) { 1115 candidateSet.AppendElements(std::move(rejects)); 1116 } 1117 } 1118 MOZ_ASSERT( 1119 candidateSet.Length(), 1120 "advanced constraints filtering step can't reduce candidates to zero"); 1121 1122 // Remaining algorithm is up to the UA. 1123 1124 TrimLessFitCandidates(candidateSet); 1125 1126 // Any remaining multiples all have the same distance. A common case of this 1127 // occurs when no ideal is specified. Lean toward defaults. 1128 uint32_t sameDistance = candidateSet[0].mDistance; 1129 { 1130 MediaTrackConstraintSet prefs; 1131 prefs.mWidth.Construct().SetAsLong() = aPrefs.GetWidth(); 1132 prefs.mHeight.Construct().SetAsLong() = aPrefs.GetHeight(); 1133 prefs.mFrameRate.Construct().SetAsDouble() = aPrefs.mFPS; 1134 NormalizedConstraintSet normPrefs(prefs, false); 1135 1136 for (auto& candidate : candidateSet) { 1137 candidate.mDistance = 1138 GetDistance(candidate.mCapability, normPrefs, aCalculate); 1139 } 1140 TrimLessFitCandidates(candidateSet); 1141 } 1142 1143 aCapability = candidateSet[0].mCapability; 1144 1145 LogCapability("Chosen capability", aCapability, sameDistance); 1146 return true; 1147 } 1148 1149 void MediaEngineRemoteVideoSource::GetSettings( 1150 MediaTrackSettings& aOutSettings) const { 1151 aOutSettings = *mSettings; 1152 } 1153 1154 void MediaEngineRemoteVideoSource::GetCapabilities( 1155 dom::MediaTrackCapabilities& aOutCapabilities) const { 1156 aOutCapabilities = *mTrackCapabilities; 1157 } 1158 1159 } // namespace mozilla