commit 38bbd8dd7935793d3229931b96d32e40a784d992 parent 6bf0d8e481e92c220f70c3d98f912156c6051772 Author: Ashley Hale <ahale@mozilla.com> Date: Wed, 7 Jan 2026 06:48:05 +0000 Bug 1977746 - display HDR video on Windows using RGB10A2 r=bradwerth,gfx-reviewers,media-playback-reviewers,sotaro,alwu This adds support for MFVideoFormat_P010 and MFVideoFormat_A2R10G10B10 in a number of places (both WMF and DVXA2 code paths with ffmpeg). This advertises that we support HDR to JavaScript so video players can detect our support if the window is on an HDR display at the time. This adds comments for several poorly documented parts of the code that I had to figure out. It changes fallback logic for selecting a WMF video subtype to do what the code was clearly meant to do (e.g. fall back from P010 to NV12 or YV12 depending on hw vs sw decode). Zero copy decode is not supported by this patch, but can be implemented in a follow-up bug. HDR videos are now automatically flagged as promoted surfaces more aggressively than for video in general, because WebRender does not currently have the ability to do regular CSS/SVG compositing of HDR video, so we definitely do want it to be a compositor surface if possible - this is still subject to the MAX_COMPOSITOR_SURFACES limit in WebRender, so a document with more than 2 or 3 HDR videos will have a bad time, that is worth fixing in a follow-up bug, and we still don't have HDR CSS rendering in WebRender as mentioned so any clipmasks will usually prevent surface promotion for HDR videos, the same goes for any CSS filter effects or SVG filter primitives applied to a video. IsHDRTransferFunction() was added to check if a gfx::TransferFunction is one of the HDR ones (PQ or HLG), but currently TextureHost does not have any easy way to get the gfx::TransferFunction to DCLayerTree to make a decision to display it as HDR, so DCLayerTree code is assuming that any video with BT2020 colorspace is HDR (this is the same assumption we made on macOS, it has proven surprisingly robust) - it would be good to clean that up by properly plumbing TransferFunction through TextureHost, which could be useful for future code paths like HDR canvas and images. Tests are not added by this patch because properly testing HDR video would rely heavily on features not currently implemented in WebRender and ImageLib for reftests to work with high dynamic range colors, we have already been shipping HDR video on macOS for a while without tests, so this is not meaningfully different. A proper test suite is planned in 2026 as a major project that depends on those other moving parts. Differential Revision: https://phabricator.services.mozilla.com/D268499 Diffstat:
32 files changed, 748 insertions(+), 165 deletions(-)
diff --git a/dom/canvas/ImageUtils.cpp b/dom/canvas/ImageUtils.cpp @@ -37,6 +37,9 @@ static Maybe<ImageBitmapFormat> GetImageBitmapFormatFromSurfaceFromat( case SurfaceFormat::A8: return Some(ImageBitmapFormat::GRAY8); case SurfaceFormat::R5G6B5_UINT16: + case SurfaceFormat::R10G10B10A2_UINT32: + case SurfaceFormat::R10G10B10X2_UINT32: + case SurfaceFormat::R16G16B16A16F: case SurfaceFormat::YUV420: case SurfaceFormat::NV12: case SurfaceFormat::P010: diff --git a/dom/media/ipc/MFMediaEngineUtils.cpp b/dom/media/ipc/MFMediaEngineUtils.cpp @@ -138,6 +138,7 @@ const char* MFVideoTransferFunctionToStr(MFVideoTransferFunction aFunc) { ENUM_TO_STR(MFVideoTransFunc_709); ENUM_TO_STR(MFVideoTransFunc_2020); ENUM_TO_STR(MFVideoTransFunc_sRGB); + ENUM_TO_STR(MFVideoTransFunc_2084); default: return "Unsupported MFVideoTransferFunction"; } diff --git a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp @@ -2333,9 +2333,15 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::CreateImageD3D11( MOZ_DIAGNOSTIC_ASSERT(mFrame); MOZ_DIAGNOSTIC_ASSERT(mDXVA2Manager); + gfx::TransferFunction transferFunction = + mInfo.mTransferFunction.refOr(gfx::TransferFunction::BT709); + bool isHDR = transferFunction == gfx::TransferFunction::PQ || + transferFunction == gfx::TransferFunction::HLG; HRESULT hr = mDXVA2Manager->ConfigureForSize( GetSurfaceFormat(), GetFrameColorSpace(), GetFrameColorRange(), - mInfo.mColorDepth, mFrame->width, mFrame->height); + mInfo.mColorDepth, + mInfo.mTransferFunction.refOr(gfx::TransferFunction::BT709), + mFrame->width, mFrame->height); if (FAILED(hr)) { nsPrintfCString msg("Failed to configure DXVA2Manager, hr=%lx", hr); FFMPEG_LOG("%s", msg.get()); @@ -2368,6 +2374,9 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::CreateImageD3D11( if (desc.Format == DXGI_FORMAT_P016) { return gfx::SurfaceFormat::P016; } + if (isHDR) { + return gfx::SurfaceFormat::P010; + } MOZ_ASSERT(desc.Format == DXGI_FORMAT_NV12); return gfx::SurfaceFormat::NV12; }(); @@ -2377,10 +2386,13 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::CreateImageD3D11( mInfo.ScaledImageRect(mFrame->width, mFrame->height); UINT index = (uintptr_t)mFrame->data[1]; - if (CanUseZeroCopyVideoFrame()) { + // TODO(https://bugzilla.mozilla.org/show_bug.cgi?id=2008886) + // Currently the zero-copy path supports NV12 but not P010 so it can't do HDR + // yet, this can be implemented in future. + if (format == gfx::SurfaceFormat::NV12 && CanUseZeroCopyVideoFrame()) { mNumOfHWTexturesInUse++; - FFMPEGV_LOG("CreateImageD3D11, zero copy, index=%u (texInUse=%u)", index, - mNumOfHWTexturesInUse.load()); + FFMPEGV_LOG("CreateImageD3D11, zero copy, index=%u (texInUse=%u), isHDR=%u", + index, mNumOfHWTexturesInUse.load(), (unsigned int)isHDR); hr = mDXVA2Manager->WrapTextureWithImage( new D3D11TextureWrapper( mFrame, mLib, texture, format, index, @@ -2390,7 +2402,8 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::CreateImageD3D11( }), pictureRegion, getter_AddRefs(image)); } else { - FFMPEGV_LOG("CreateImageD3D11, copy output to a shared texture"); + FFMPEGV_LOG("CreateImageD3D11, copy output to a shared texture, isHDR=%u", + (unsigned int)isHDR); hr = mDXVA2Manager->CopyToImage(texture, index, pictureRegion, getter_AddRefs(image)); } diff --git a/dom/media/platforms/wmf/DXVA2Manager.cpp b/dom/media/platforms/wmf/DXVA2Manager.cpp @@ -357,13 +357,15 @@ class D3D11DXVA2Manager : public DXVA2Manager { HRESULT ConfigureForSize(IMFMediaType* aInputType, gfx::YUVColorSpace aColorSpace, gfx::ColorRange aColorRange, - gfx::ColorDepth aColorDepth, uint32_t aWidth, - uint32_t aHeight) override; + gfx::ColorDepth aColorDepth, + gfx::TransferFunction aTransferFunction, + uint32_t aWidth, uint32_t aHeight) override; HRESULT ConfigureForSize(gfx::SurfaceFormat aSurfaceFormat, gfx::YUVColorSpace aColorSpace, gfx::ColorRange aColorRange, - gfx::ColorDepth aColorDepth, uint32_t aWidth, - uint32_t aHeight) override; + gfx::ColorDepth aColorDepth, + gfx::TransferFunction aTransferFunction, + uint32_t aWidth, uint32_t aHeight) override; bool IsD3D11() override { return true; } @@ -423,6 +425,7 @@ class D3D11DXVA2Manager : public DXVA2Manager { gfx::YUVColorSpace mYUVColorSpace; gfx::ColorRange mColorRange = gfx::ColorRange::LIMITED; gfx::ColorDepth mColorDepth = gfx::ColorDepth::COLOR_8; + gfx::TransferFunction mTransferFunction = gfx::TransferFunction::BT709; gfx::SurfaceFormat mSurfaceFormat; std::list<ThreadSafeWeakPtr<layers::IMFSampleWrapper>> mIMFSampleWrappers; RefPtr<layers::ZeroCopyUsageInfo> mZeroCopyUsageInfo; @@ -575,6 +578,10 @@ bool D3D11DXVA2Manager::SupportsConfig(const VideoInfo& aInfo, desc.OutputFormat = DXGI_FORMAT_P010; } else if (subtype == MFVideoFormat_P016) { desc.OutputFormat = DXGI_FORMAT_P016; + } else if (subtype == MFVideoFormat_A2R10G10B10) { + desc.OutputFormat = DXGI_FORMAT_R10G10B10A2_UNORM; + } else if (subtype == MFVideoFormat_A16B16G16R16F) { + desc.OutputFormat = DXGI_FORMAT_R16G16B16A16_FLOAT; } } @@ -699,6 +706,7 @@ D3D11DXVA2Manager::InitInternal(layers::KnowsCompositor* aKnowsCompositor, return hr; } + // TODO(https://bugzilla.mozilla.org/show_bug.cgi?id=2008874) // The IMFTransform interface used by MFTDecoder is documented to require to // run on an MTA thread. // https://msdn.microsoft.com/en-us/library/windows/desktop/ee892371(v=vs.85).aspx#components @@ -924,19 +932,89 @@ void D3D11DXVA2Manager::BeforeShutdownVideoMFTDecoder() { ReleaseAllIMFSamples(); } +// Glossary of RGB formats for display: +// DXGI_FORMAT_B8G8R8A8_UNORM <-> MFVideoFormat_ARGB32 +// DXGI_FORMAT_R16G16B16A16_FLOAT <-> MFVideoFormat_A16B16G16R16F +// DXGI_FORMAT_R10G10B10A2_UNORM <-> MFVideoFormat_A2R10G10B10 +// (Yes MFVideoFormat_A2R10G10B10 is actually R10G10B10A2) +// +// YUV formats for decoder output: +// DXGI_FORMAT_NV12 <-> MFVideoFormat_NV12 +// DXGI_FORMAT_YV12 <-> MFVideoFormat_YV12 +// DXGI_FORMAT_P010 <-> MFVideoFormat_P010 +// DXGI_FORMAT_P016 <-> MFVideoFormat_P016 +// +// Format pairings for SDR colorspaces: +// {DXGI_FORMAT_NV12, DXGI_COLOR_SPACE_YCBCR_STUDIO_G22_LEFT_P709} +// {DXGI_FORMAT_B8G8R8A8_UNORM, DXGI_COLOR_SPACE_RGB_STUDIO_G22_NONE_P709} +// +// Format pairings for HDR colorspaces: +// {DXGI_FORMAT_R10G10B10A2_UNORM, DXGI_COLOR_SPACE_RGB_FULL_G2084_NONE_P2020} +// {DXGI_FORMAT_R16G16B16A16_FLOAT, DXGI_COLOR_SPACE_RGB_FULL_G10_NONE_P709} +// +// A few notes: +// * NV12 can be directly displayed as an overlay, whereas all other +// formats need to be converted to RGB for display. +// * YV12 is only used for SW decode (and we have no SurfaceFormat to represent +// it, gfx::SurfaceFormat::YUV420 has a different plane order). +// * P010 and P016 are for HDR video, testing these as an overlay showed that +// they do not seem to honor the HDR G2084 transfer function so we can +// consider them simply not displayable. +// * If a video has alpha channel the decoder would have to output RGBA because +// Windows has no enums for YUV with alpha, we don't currently support RGBA +// output from decoders so alpha is not currently relevant. +// * Windows seems to do some magic HDR tonemapping on SDR displays without even +// telling it to do anything, it's not clear what we can rely on about this, +// but testing in the wild is probably the best answer for now. + +// Convert a Media Foundation subtype GUID to a gfx::SurfaceFormat. +static gfx::SurfaceFormat SurfaceFormatFromSubType(const GUID& aSubType) { + // We can't use a switch on subType because GUID is a struct. + if (aSubType == MFVideoFormat_ARGB32) { + return gfx::SurfaceFormat::B8G8R8A8; + } + if (aSubType == MFVideoFormat_A16B16G16R16F) { + return gfx::SurfaceFormat::R16G16B16A16F; + } + if (aSubType == MFVideoFormat_A2R10G10B10) { + return gfx::SurfaceFormat::R10G10B10X2_UINT32; + } + if (aSubType == MFVideoFormat_NV12) { + return gfx::SurfaceFormat::NV12; + } + if (aSubType == MFVideoFormat_YUY2) { + return gfx::SurfaceFormat::YUY2; + } + if (aSubType == MFVideoFormat_P010) { + return gfx::SurfaceFormat::P010; + } + if (aSubType == MFVideoFormat_P016) { + return gfx::SurfaceFormat::P016; + } + MOZ_ASSERT_UNREACHABLE("Unknown subtype"); + return gfx::SurfaceFormat::UNKNOWN; +} + HRESULT D3D11DXVA2Manager::ConfigureForSize(IMFMediaType* aInputType, gfx::YUVColorSpace aColorSpace, gfx::ColorRange aColorRange, gfx::ColorDepth aColorDepth, + gfx::TransferFunction aTransferFunction, uint32_t aWidth, uint32_t aHeight) { GUID subType = {0}; HRESULT hr = aInputType->GetGUID(MF_MT_SUBTYPE, &subType); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); + // The aInputType subType here is one of the following: + // * MFVideoFormat_NV12 + // * MFVideoFormat_YV12 + // * MFVideoFormat_P010 + // * MFVideoFormat_P016 + if (subType == mInputSubType && aWidth == mWidth && aHeight == mHeight && mYUVColorSpace == aColorSpace && mColorRange == aColorRange && - mColorDepth == aColorDepth) { + mColorDepth == aColorDepth && mTransferFunction == aTransferFunction) { // If the media type hasn't changed, don't reconfigure. return S_OK; } @@ -969,13 +1047,24 @@ D3D11DXVA2Manager::ConfigureForSize(IMFMediaType* aInputType, hr = outputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); - hr = outputType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_ARGB32); - NS_ENSURE_TRUE(SUCCEEDED(hr), hr); + // Pick an RGB format based on the required color depth. + if (aColorDepth > gfx::ColorDepth::COLOR_10) { + hr = outputType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_A16B16G16R16F); + NS_ENSURE_TRUE(SUCCEEDED(hr), hr); + } else if (aColorDepth > gfx::ColorDepth::COLOR_8) { + hr = outputType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_A2R10G10B10); + NS_ENSURE_TRUE(SUCCEEDED(hr), hr); + } else { + hr = outputType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_ARGB32); + NS_ENSURE_TRUE(SUCCEEDED(hr), hr); + } + // TODO(https://bugzilla.mozilla.org/show_bug.cgi?id=2008874) hr = E_FAIL; mozilla::mscom::EnsureMTA([&]() -> void { hr = mTransform->SetMediaTypes( - inputType, outputType, [aWidth, aHeight](IMFMediaType* aOutput) { + inputType, outputType, MFVideoFormat_ARGB32, + [aWidth, aHeight](IMFMediaType* aOutput) { HRESULT hr = aOutput->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); @@ -997,19 +1086,9 @@ D3D11DXVA2Manager::ConfigureForSize(IMFMediaType* aInputType, mYUVColorSpace = aColorSpace; mColorRange = aColorRange; mColorDepth = aColorDepth; + mTransferFunction = aTransferFunction; if (mTextureClientAllocator) { - mSurfaceFormat = [&]() { - if (subType == MFVideoFormat_NV12) { - return gfx::SurfaceFormat::NV12; - } else if (subType == MFVideoFormat_P010) { - return gfx::SurfaceFormat::P010; - } else if (subType == MFVideoFormat_P016) { - return gfx::SurfaceFormat::P016; - } else { - MOZ_ASSERT_UNREACHABLE("Unexpected texture type"); - return gfx::SurfaceFormat::NV12; - } - }(); + mSurfaceFormat = SurfaceFormatFromSubType(subType); mTextureClientAllocator->SetPreferredSurfaceFormat(mSurfaceFormat); } // Reconfig video processor as well @@ -1018,9 +1097,10 @@ D3D11DXVA2Manager::ConfigureForSize(IMFMediaType* aInputType, NS_ENSURE_TRUE(SUCCEEDED(hr), hr); } LOG("Configured D3D11DXVA2Manager, size=[%u,%u], colorSpace=%hhu, " - "colorRange=%hhu, colorDepth=%hhu", + "colorRange=%hhu, colorDepth=%hhu, transferFunction=%hhu", mWidth, mHeight, static_cast<uint8_t>(mYUVColorSpace), - static_cast<uint8_t>(mColorRange), static_cast<uint8_t>(mColorDepth)); + static_cast<uint8_t>(mColorRange), static_cast<uint8_t>(mColorDepth), + static_cast<uint8_t>(mTransferFunction)); return S_OK; } @@ -1029,10 +1109,11 @@ D3D11DXVA2Manager::ConfigureForSize(gfx::SurfaceFormat aSurfaceFormat, gfx::YUVColorSpace aColorSpace, gfx::ColorRange aColorRange, gfx::ColorDepth aColorDepth, + gfx::TransferFunction aTransferFunction, uint32_t aWidth, uint32_t aHeight) { if (aWidth == mWidth && aHeight == mHeight && mYUVColorSpace == aColorSpace && mColorRange == aColorRange && aSurfaceFormat == mSurfaceFormat && - mColorDepth == aColorDepth) { + mColorDepth == aColorDepth && mTransferFunction == aTransferFunction) { // No need to reconfigure if nothing changes. return S_OK; } @@ -1044,7 +1125,13 @@ D3D11DXVA2Manager::ConfigureForSize(gfx::SurfaceFormat aSurfaceFormat, mColorRange = aColorRange; mColorDepth = aColorDepth; mSurfaceFormat = aSurfaceFormat; + mTransferFunction = aTransferFunction; if (mTextureClientAllocator) { + // mSurfaceFormat here is one of the following: + // * SurfaceFormat::NV12 + // * SurfaceFormat::YV12 + // * SurfaceFormat::P010 + // * SurfaceFormat::P016 mTextureClientAllocator->SetPreferredSurfaceFormat(mSurfaceFormat); } // Reconfig video processor as well @@ -1052,9 +1139,11 @@ D3D11DXVA2Manager::ConfigureForSize(gfx::SurfaceFormat aSurfaceFormat, mProcessor->Init(gfx::IntSize(mWidth, mHeight)); } LOG("Configured D3D11DXVA2Manager, size=[%u,%u], colorSpace=%hhu, " - "colorRange=%hhu, colorDepth=%hhu, surfaceFormat=%hhd", + "colorRange=%hhu, colorDepth=%hhu, transferFunction=%hhu, " + "surfaceFormat=%hhd", mWidth, mHeight, static_cast<uint8_t>(mYUVColorSpace), static_cast<uint8_t>(mColorRange), static_cast<uint8_t>(mColorDepth), + static_cast<uint8_t>(mTransferFunction), static_cast<uint8_t>(mSurfaceFormat)); return S_OK; } @@ -1159,6 +1248,11 @@ HRESULT D3D11DXVA2Manager::CopyTextureToImage( D3D11_TEXTURE2D_DESC inDesc; aInTexture.mTexture->GetDesc(&inDesc); + LOG("CopyTextureToImage, inDesc.Format=%d, mYUVColorSpace=%d, " + "mColorRange=%d, mColorDepth=%d", + inDesc.Format, static_cast<int>(mYUVColorSpace), + static_cast<int>(mColorRange), static_cast<int>(mColorDepth)); + RefPtr<D3D11ShareHandleImage> image = new D3D11ShareHandleImage( gfx::IntSize(mWidth, mHeight), aInTexture.mRegion, ToColorSpace2(mYUVColorSpace), mColorRange, mColorDepth); diff --git a/dom/media/platforms/wmf/DXVA2Manager.h b/dom/media/platforms/wmf/DXVA2Manager.h @@ -63,15 +63,17 @@ class DXVA2Manager { virtual HRESULT ConfigureForSize(IMFMediaType* aInputType, gfx::YUVColorSpace aColorSpace, gfx::ColorRange aColorRange, - gfx::ColorDepth aColorDepth, uint32_t aWidth, - uint32_t aHeight) { + gfx::ColorDepth aColorDepth, + gfx::TransferFunction aTransferFunction, + uint32_t aWidth, uint32_t aHeight) { return S_OK; } virtual HRESULT ConfigureForSize(gfx::SurfaceFormat aSurfaceFormat, gfx::YUVColorSpace aColorSpace, gfx::ColorRange aColorRange, - gfx::ColorDepth aColorDepth, uint32_t aWidth, - uint32_t aHeight) { + gfx::ColorDepth aColorDepth, + gfx::TransferFunction aTransferFunction, + uint32_t aWidth, uint32_t aHeight) { // Not implemented! MOZ_CRASH("ConfigureForSize not implemented on this manager."); return E_FAIL; diff --git a/dom/media/platforms/wmf/MFTDecoder.cpp b/dom/media/platforms/wmf/MFTDecoder.cpp @@ -109,6 +109,7 @@ MFTDecoder::Create(const GUID& aCategory, const GUID& aInSubtype, HRESULT MFTDecoder::SetMediaTypes(IMFMediaType* aInputType, IMFMediaType* aOutputType, + const GUID& aFallbackSubType, std::function<HRESULT(IMFMediaType*)>&& aCallback) { MOZ_ASSERT(mscom::IsCurrentThreadMTA()); @@ -117,8 +118,8 @@ MFTDecoder::SetMediaTypes(IMFMediaType* aInputType, IMFMediaType* aOutputType, GUID currentSubtype = {0}; RETURN_IF_FAILED(aOutputType->GetGUID(MF_MT_SUBTYPE, ¤tSubtype)); - RETURN_IF_FAILED( - SetDecoderOutputType(currentSubtype, aOutputType, std::move(aCallback))); + RETURN_IF_FAILED(SetDecoderOutputType(currentSubtype, aFallbackSubType, + aOutputType, std::move(aCallback))); RETURN_IF_FAILED(mDecoder->GetInputStreamInfo(0, &mInputStreamInfo)); RETURN_IF_FAILED(SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0)); RETURN_IF_FAILED(SendMFTMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0)); @@ -142,22 +143,41 @@ already_AddRefed<IMFAttributes> MFTDecoder::GetOutputStreamAttributes() { } HRESULT -MFTDecoder::FindDecoderOutputType() { +MFTDecoder::FindDecoderOutputType(const GUID& aFallbackSubType) { MOZ_ASSERT(mscom::IsCurrentThreadMTA()); MOZ_ASSERT(mOutputType, "SetDecoderTypes must have been called once"); - return FindDecoderOutputTypeWithSubtype(mOutputSubType); + return FindDecoderOutputTypeWithSubtype(mOutputSubType, aFallbackSubType); } HRESULT -MFTDecoder::FindDecoderOutputTypeWithSubtype(const GUID& aSubType) { - return SetDecoderOutputType(aSubType, nullptr, +MFTDecoder::FindDecoderOutputTypeWithSubtype(const GUID& aSubType, + const GUID& aFallbackSubType) { + return SetDecoderOutputType(aSubType, aFallbackSubType, nullptr, [](IMFMediaType*) { return S_OK; }); } +// This method first attempts to find the provided aSubType in the compatible +// list reported by the decoder, if found it will be set up. +// +// If aSubType is not found in the compatible list, and aFallbackSubType is +// GUID_NULL, this will promptly return E_FAIL instead of attempting fallbacks. +// +// If aSubType is not found in the compatible list, and aFallbackSubType is not +// GUID_NULL, this method will attempt to find aFallbackSubType in the +// compatible list, if found it will be set up. +// +// If aSubType is not found in the compatible list, and aFallbackSubType is not +// GUID_NULL, but aFallbackSubType is also not found in the compatible list, +// this method will set up the last available compatible type reported by the +// decoder. +// +// Callers that do not want a fallback behavior must pass GUID_NULL as +// aFallbackSubType. HRESULT MFTDecoder::SetDecoderOutputType( - const GUID& aSubType, IMFMediaType* aTypeToUse, + const GUID& aSubType, const GUID& aFallbackSubType, + IMFMediaType* aTypeToUse, std::function<HRESULT(IMFMediaType*)>&& aCallback) { MOZ_ASSERT(mscom::IsCurrentThreadMTA()); NS_ENSURE_TRUE(mDecoder != nullptr, E_POINTER); @@ -197,16 +217,14 @@ MFTDecoder::SetDecoderOutputType( } if (foundType == Result::eNotFound) { + if (aFallbackSubType == GUID_NULL) { + // The caller specifically did not want a fallback, so just return. + return E_FAIL; + } typeIndex = 0; LOG("Can't find a compatible output type, searching with the preferred " "type instead"); - auto getPreferredSubtype = [](const GUID& aMajor) -> GUID { - if (aMajor == MFMediaType_Audio) { - return MFAudioFormat_Float; - } - return MFVideoFormat_NV12; - }; - const GUID preferredSubtype = getPreferredSubtype(mMajorType); + const GUID preferredSubtype = aFallbackSubType; while (SUCCEEDED(mDecoder->GetOutputAvailableType( 0, typeIndex++, getter_AddRefs(outputType)))) { GUID outSubtype = {0}; diff --git a/dom/media/platforms/wmf/MFTDecoder.h b/dom/media/platforms/wmf/MFTDecoder.h @@ -47,8 +47,14 @@ class MFTDecoder final { // - aOutputType needs at least major and minor types set. // This is used to select the matching output type out // of all the available output types of the MFT. + // - aFallbackSubType is a preferred subtype to fall back to if the currently + // selected subtype in aOutputType is unavailable, if this is GUID_NULL + // then no attempt to fallback will occur, if it is not GUID_NULL then it + // will be searched for as a preferred fallback, and if not found the last + // subtype available will be chosen as a final fallback. HRESULT SetMediaTypes( IMFMediaType* aInputType, IMFMediaType* aOutputType, + const GUID& aFallbackSubType, std::function<HRESULT(IMFMediaType*)>&& aCallback = [](IMFMediaType* aOutput) { return S_OK; }); @@ -100,14 +106,30 @@ class MFTDecoder final { // Sends a message to the MFT. HRESULT SendMFTMessage(MFT_MESSAGE_TYPE aMsg, ULONG_PTR aData); - HRESULT FindDecoderOutputTypeWithSubtype(const GUID& aSubType); - HRESULT FindDecoderOutputType(); + // This method first attempts to find the provided aSubType in the compatible + // list reported by the decoder, if found it will be set up, otherwise it will + // search for the preferred subtype aFallbackSubType, and if that is also not + // found the last available subtype is set up. + // + // aFallbackSubType can be GUID_NULL to cause this to return E_FAIL when + // aSubType is not found, avoiding fallback behaviors. + HRESULT FindDecoderOutputTypeWithSubtype(const GUID& aSubType, + const GUID& aFallbackSubType); + HRESULT FindDecoderOutputType(const GUID& aFallbackSubType); private: // Will search a suitable MediaType using aTypeToUse if set, if not will // use the current mOutputType. + // + // When aSubType (or the current mOutputType) is not found, it will search for + // aFallbackSubType instead, and if not is not found it will use the last + // available compatible type reported by the decoder. + // + // aFallbackSubType can be GUID_NULL to cause this to return E_FAIL when + // aSubType (or the current mOutputType) is not found, avoiding fallbacks. HRESULT SetDecoderOutputType( - const GUID& aSubType, IMFMediaType* aTypeToUse, + const GUID& aSubType, const GUID& aFallbackSubType, + IMFMediaType* aTypeToUse, std::function<HRESULT(IMFMediaType*)>&& aCallback); HRESULT CreateOutputSample(RefPtr<IMFSample>* aOutSample); diff --git a/dom/media/platforms/wmf/WMFAudioMFTManager.cpp b/dom/media/platforms/wmf/WMFAudioMFTManager.cpp @@ -130,7 +130,7 @@ bool WMFAudioMFTManager::Init() { hr = outputType->SetUINT32(MF_MT_AUDIO_BITS_PER_SAMPLE, 32); NS_ENSURE_TRUE(SUCCEEDED(hr), false); - hr = decoder->SetMediaTypes(inputType, outputType); + hr = decoder->SetMediaTypes(inputType, outputType, MFAudioFormat_Float); NS_ENSURE_TRUE(SUCCEEDED(hr), false); mDecoder = decoder; @@ -195,7 +195,7 @@ WMFAudioMFTManager::Output(int64_t aStreamOffset, RefPtr<MediaData>& aOutput) { return hr; } if (hr == MF_E_TRANSFORM_STREAM_CHANGE) { - hr = mDecoder->FindDecoderOutputType(); + hr = mDecoder->FindDecoderOutputType(MFAudioFormat_Float); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); hr = UpdateOutputType(); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); diff --git a/dom/media/platforms/wmf/WMFUtils.cpp b/dom/media/platforms/wmf/WMFUtils.cpp @@ -131,6 +131,10 @@ nsCString GetSubTypeStr(const GUID& aSubtype) { ENUM_TO_STR(MFVideoFormat_P016) ENUM_TO_STR(MFVideoFormat_ARGB32) ENUM_TO_STR(MFVideoFormat_RGB32) + ENUM_TO_STR(MFVideoFormat_A2R10G10B10) + ENUM_TO_STR(MFVideoFormat_A16B16G16R16F) + ENUM_TO_STR(MFVideoFormat_I420) + ENUM_TO_STR(MFVideoFormat_YUY2) // codec ENUM_TO_STR(MFAudioFormat_MP3) ENUM_TO_STR(MFAudioFormat_AAC) diff --git a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp @@ -258,9 +258,13 @@ MediaResult WMFVideoMFTManager::InitInternal() { } } + // TODO(https://bugzilla.mozilla.org/show_bug.cgi?id=2008886) + // The zero-copy implementation doesn't support P010 for HDR video yet, only + // NV12 - change this when it is implemented. if (gfx::gfxVars::HwDecodedVideoZeroCopy() && mKnowsCompositor && mKnowsCompositor->UsingHardwareWebRender() && mDXVA2Manager && - mDXVA2Manager->SupportsZeroCopyNV12Texture()) { + mDXVA2Manager->SupportsZeroCopyNV12Texture() && + mColorDepth == gfx::ColorDepth::COLOR_8 && !IsHDR()) { mZeroCopyNV12Texture = true; const int kOutputBufferSize = 10; @@ -322,13 +326,18 @@ MediaResult WMFVideoMFTManager::InitInternal() { uint32_t(media::MediaDecoderBackend::WMFSoftware)); } - LOG("Created a video decoder, useDxva=%s, streamType=%s, outputSubType=%s", + // Note that some HDR videos are 8bit, and end up decoding to NV12/YV12, + // rather than the more obvious P010, and the decoder won't let us force P010. + // See https://bugzilla.mozilla.org/show_bug.cgi?id=2008887 + const GUID& outputSubType = GetOutputSubtype(); + LOG("Created a video decoder, useDxva=%s, streamType=%s, outputSubType=%s, " + "isHDR=%u", mUseHwAccel ? "Yes" : "No", EnumValueToString(mStreamType), - GetSubTypeStr(GetOutputSubtype()).get()); + GetSubTypeStr(outputSubType).get(), (unsigned int)IsHDR()); mDecoder = decoder; RETURN_PARAM_IF_FAILED( - SetDecoderMediaTypes(), + SetDecoderMediaTypes(outputSubType), MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, RESULT_DETAIL("Fail to set the decoder media types"))); @@ -356,14 +365,16 @@ MediaResult WMFVideoMFTManager::InitInternal() { LOG("Video Decoder initialized, Using DXVA: %s", (mUseHwAccel ? "Yes" : "No")); + // Now we need to convert the video decode output to a display format. if (mUseHwAccel) { RETURN_PARAM_IF_FAILED( mDXVA2Manager->ConfigureForSize( outputType, mColorSpace.refOr( DefaultColorSpace({mImageSize.width, mImageSize.height})), - mColorRange, mColorDepth, mVideoInfo.ImageRect().width, - mVideoInfo.ImageRect().height), + mColorRange, mColorDepth, + mVideoInfo.mTransferFunction.refOr(gfx::TransferFunction::BT709), + mVideoInfo.ImageRect().width, mVideoInfo.ImageRect().height), MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, RESULT_DETAIL("Fail to configure image size for " "DXVA2Manager."))); @@ -386,7 +397,7 @@ MediaResult WMFVideoMFTManager::InitInternal() { } HRESULT -WMFVideoMFTManager::SetDecoderMediaTypes() { +WMFVideoMFTManager::SetDecoderMediaTypes(const GUID& aFallbackSubType) { // Setup the input/output media types. RefPtr<IMFMediaType> inputType; RETURN_IF_FAILED(wmf::MFCreateMediaType(getter_AddRefs(inputType))); @@ -428,7 +439,7 @@ WMFVideoMFTManager::SetDecoderMediaTypes() { D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_DECODER)); } } - return mDecoder->SetMediaTypes(inputType, outputType); + return mDecoder->SetMediaTypes(inputType, outputType, aFallbackSubType); } HRESULT @@ -804,31 +815,56 @@ WMFVideoMFTManager::Output(int64_t aStreamOffset, RefPtr<MediaData>& aOutData) { // pixel type. // We must reconfigure the decoder output type. - // Attempt to find an appropriate OutputType, trying in order: - // if HW accelerated: NV12, P010, P016 - // if SW: YV12, P010, P016 - if (FAILED( - (hr = (mDecoder->FindDecoderOutputTypeWithSubtype( - mUseHwAccel ? MFVideoFormat_NV12 : MFVideoFormat_YV12)))) && - FAILED((hr = mDecoder->FindDecoderOutputTypeWithSubtype( - MFVideoFormat_P010))) && - FAILED((hr = mDecoder->FindDecoderOutputTypeWithSubtype( - MFVideoFormat_P016)))) { - LOG("No suitable output format found"); - return hr; + // Attempt to find an appropriate SubType for video decoding: + // * If the video is SDR we prefer decoding in 8bit formats (NV12 for HW, + // YV12 for SW decode), if that decoder is unavailable we can use the + // 10bit formats but they are more memory bandwidth intensive. + // * If the video is HDR, we want to prefer the 10bit formats (P010/P016) + // because HDR videos typically use PQ transfer function which requires + // 10bit to avoid severe banding artifacts, this probably matters less + // for HLG transfer function but that seems to be uncommon. + // + // Note that we deliberately pass GUID_NULL for aFallbackSubType to avoid + // the full fallback logic - on the final attempt we specify two preferred + // subtypes which will pick anything if both fail to be found; see + // MFTDecoder::SetDecoderOutputType for the full logic. + // + // Conversion from this subtype to a display-ready format (e.g. BGRA8) + // will be handled in DXVA2Manager below. + const GUID& SDRSubType = + mUseHwAccel ? MFVideoFormat_NV12 : MFVideoFormat_YV12; + bool preferP010 = mColorDepth > gfx::ColorDepth::COLOR_8 || IsHDR(); + if (preferP010) { + if (FAILED((hr = mDecoder->FindDecoderOutputTypeWithSubtype( + MFVideoFormat_P010, GUID_NULL))) && + FAILED((hr = mDecoder->FindDecoderOutputTypeWithSubtype( + MFVideoFormat_P016, SDRSubType)))) { + LOG("No suitable output format found"); + return hr; + } + } else { + if (FAILED((hr = (mDecoder->FindDecoderOutputTypeWithSubtype( + SDRSubType, GUID_NULL)))) && + FAILED((hr = mDecoder->FindDecoderOutputTypeWithSubtype( + MFVideoFormat_P010, MFVideoFormat_P016)))) { + LOG("No suitable output format found"); + return hr; + } } RefPtr<IMFMediaType> outputType; hr = mDecoder->GetOutputMediaType(outputType); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); + // Now we need to convert the video decode output to a display format. if (mUseHwAccel) { hr = mDXVA2Manager->ConfigureForSize( outputType, mColorSpace.refOr( DefaultColorSpace({mImageSize.width, mImageSize.height})), - mColorRange, mColorDepth, mVideoInfo.ImageRect().width, - mVideoInfo.ImageRect().height); + mColorRange, mColorDepth, + mVideoInfo.mTransferFunction.refOr(gfx::TransferFunction::BT709), + mVideoInfo.ImageRect().width, mVideoInfo.ImageRect().height); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); } else { // The stride may have changed, recheck for it. @@ -987,13 +1023,13 @@ nsCString WMFVideoMFTManager::GetDescriptionName() const { } if (format == MFVideoFormat_P010) { if (!gfx::DeviceManagerDx::Get()->CanUseP010()) { - return "p010->argb32"; + return "p010->a2rgb10"; } return "p010"; } if (format == MFVideoFormat_P016) { if (!gfx::DeviceManagerDx::Get()->CanUseP016()) { - return "p016->argb32"; + return "p016->argb16f"; } return "p016"; } diff --git a/dom/media/platforms/wmf/WMFVideoMFTManager.h b/dom/media/platforms/wmf/WMFVideoMFTManager.h @@ -69,7 +69,7 @@ class WMFVideoMFTManager : public MFTManager { HRESULT CreateD3DVideoFrame(IMFSample* aSample, int64_t aStreamOffset, VideoData** aOutVideoData); - HRESULT SetDecoderMediaTypes(); + HRESULT SetDecoderMediaTypes(const GUID& aFallbackSubType); bool CanUseDXVA(IMFMediaType* aInputType, IMFMediaType* aOutputType); @@ -86,6 +86,11 @@ class WMFVideoMFTManager : public MFTManager { media::TimeUnit GetSampleDurationOrLastKnownDuration( IMFSample* aSample) const; + bool IsHDR() const { + return gfx::IsHDRTransferFunction( + mVideoInfo.mTransferFunction.refOr(gfx::TransferFunction::BT709)); + } + // Video frame geometry. const VideoInfo mVideoInfo; const gfx::IntSize mImageSize; diff --git a/gfx/2d/Types.cpp b/gfx/2d/Types.cpp @@ -47,6 +47,9 @@ std::ostream& operator<<(std::ostream& aOut, const SurfaceFormat& aFormat) { Emit(SurfaceFormat::R8G8B8); Emit(SurfaceFormat::B8G8R8); Emit(SurfaceFormat::R5G6B5_UINT16); + Emit(SurfaceFormat::R10G10B10A2_UINT32); + Emit(SurfaceFormat::R10G10B10X2_UINT32); + Emit(SurfaceFormat::R16G16B16A16F); Emit(SurfaceFormat::A8); Emit(SurfaceFormat::A16); Emit(SurfaceFormat::R8G8); diff --git a/gfx/2d/Types.h b/gfx/2d/Types.h @@ -96,6 +96,14 @@ enum class SurfaceFormat : int8_t { Lab, Depth, + // LE packed 10bit per channel format primarily associated with HDR10 video. + R10G10B10A2_UINT32, // 0bAARRRRRRRRRRGGGGGGGGGGBBBBBBBBBB + // Same as R10G10B10A2_UINT32 but with the alpha channel ignored. + R10G10B10X2_UINT32, // 0b00RRRRRRRRRRGGGGGGGGGGBBBBBBBBBB + // 4 half-float (f16) components in RGBA order for HDR rendering, each is + // machine endian. + R16G16B16A16F, + // This represents the unknown format. UNKNOWN, // TODO: Replace uses with Maybe<SurfaceFormat>. @@ -141,6 +149,8 @@ inline std::optional<SurfaceFormatInfo> Info(const SurfaceFormat aFormat) { case SurfaceFormat::B8G8R8A8: case SurfaceFormat::R8G8B8A8: case SurfaceFormat::A8R8G8B8: + case SurfaceFormat::R10G10B10A2_UINT32: + case SurfaceFormat::R16G16B16A16F: info.hasColor = true; info.hasAlpha = true; break; @@ -151,6 +161,7 @@ inline std::optional<SurfaceFormatInfo> Info(const SurfaceFormat aFormat) { case SurfaceFormat::R8G8B8: case SurfaceFormat::B8G8R8: case SurfaceFormat::R5G6B5_UINT16: + case SurfaceFormat::R10G10B10X2_UINT32: case SurfaceFormat::R8G8: case SurfaceFormat::R16G16: case SurfaceFormat::HSV: @@ -218,6 +229,15 @@ inline std::optional<SurfaceFormatInfo> Info(const SurfaceFormat aFormat) { info.bytesPerPixel = 1; break; + case SurfaceFormat::R10G10B10A2_UINT32: + case SurfaceFormat::R10G10B10X2_UINT32: + info.bytesPerPixel = 4; + break; + + case SurfaceFormat::R16G16B16A16F: + info.bytesPerPixel = 8; + break; + case SurfaceFormat::HSV: case SurfaceFormat::Lab: info.bytesPerPixel = 3 * sizeof(float); @@ -302,9 +322,40 @@ static inline int BytesPerPixel(SurfaceFormat aFormat) { return 3 * sizeof(float); case SurfaceFormat::Depth: return sizeof(uint16_t); - default: + case SurfaceFormat::B8G8R8A8: + case SurfaceFormat::B8G8R8X8: + case SurfaceFormat::R8G8B8A8: + case SurfaceFormat::R8G8B8X8: + case SurfaceFormat::A8R8G8B8: + case SurfaceFormat::X8R8G8B8: + case SurfaceFormat::R10G10B10A2_UINT32: + case SurfaceFormat::R10G10B10X2_UINT32: + case SurfaceFormat::R16G16: + return 4; + case SurfaceFormat::R16G16B16A16F: + return 8; + case SurfaceFormat::R8G8: + return 2; + case SurfaceFormat::YUV420: + case SurfaceFormat::YUV420P10: + case SurfaceFormat::YUV422P10: + case SurfaceFormat::NV12: + case SurfaceFormat::NV16: + case SurfaceFormat::YUY2: + // These formats are not easily described in terms of bytes per pixel, + // technically 1.5 bytes per pixel on average, which is guaranteed by the + // width and height being multiples of 2. + return 0; + case SurfaceFormat::P016: + case SurfaceFormat::P010: + // Similar to NV12 but uint16 pixels. + return 0; + case SurfaceFormat::UNKNOWN: + MOZ_ASSERT_UNREACHABLE("unhandled gfx::SurfaceFormat::UNKNOWN"); return 4; } + MOZ_ASSERT_UNREACHABLE("unhandled enum value for gfx::SurfaceFormat"); + return 4; } inline bool IsOpaque(SurfaceFormat aFormat) { @@ -314,6 +365,7 @@ inline bool IsOpaque(SurfaceFormat aFormat) { case SurfaceFormat::R8G8B8X8: case SurfaceFormat::X8R8G8B8: case SurfaceFormat::R5G6B5_UINT16: + case SurfaceFormat::R10G10B10X2_UINT32: case SurfaceFormat::R8G8B8: case SurfaceFormat::B8G8R8: case SurfaceFormat::R8G8: @@ -326,9 +378,22 @@ inline bool IsOpaque(SurfaceFormat aFormat) { case SurfaceFormat::P016: case SurfaceFormat::YUY2: return true; - default: + case SurfaceFormat::B8G8R8A8: + case SurfaceFormat::R8G8B8A8: + case SurfaceFormat::A8R8G8B8: + case SurfaceFormat::R10G10B10A2_UINT32: + case SurfaceFormat::R16G16B16A16F: + case SurfaceFormat::A8: + case SurfaceFormat::A16: + case SurfaceFormat::R16G16: + case SurfaceFormat::YUV420P10: + case SurfaceFormat::YUV422P10: + case SurfaceFormat::NV16: + case SurfaceFormat::UNKNOWN: return false; } + MOZ_ASSERT_UNREACHABLE("unhandled enum value for gfx::SurfaceFormat"); + return false; } // These are standardized Coding-independent Code Points @@ -705,6 +770,19 @@ static inline uint32_t RescalingFactorForColorDepth(ColorDepth aColorDepth) { return factor; } +static inline bool IsHDRTransferFunction( + gfx::TransferFunction aTransferFunction) { + switch (aTransferFunction) { + case gfx::TransferFunction::PQ: + case gfx::TransferFunction::HLG: + return true; + case gfx::TransferFunction::BT709: + case gfx::TransferFunction::SRGB: + return false; + } + MOZ_CRASH("bad TransferFunction"); +} + enum class ChromaSubsampling : uint8_t { FULL, HALF_WIDTH, diff --git a/gfx/config/gfxVars.h b/gfx/config/gfxVars.h @@ -118,7 +118,8 @@ class MOZ_STACK_CLASS gfxVarsCollectUpdates; _(DMABufModifiersP010, ArrayOfuint64_t, nsTArray<uint64_t>()) \ _(DMABufModifiersNV12, ArrayOfuint64_t, nsTArray<uint64_t>()) \ _(AllowGLNorm16Textures, bool, false) \ - _(WebRenderLayerCompositorDCompTexture, bool, false) + _(WebRenderLayerCompositorDCompTexture, bool, false) \ + _(WebRenderOverlayHDR, bool, false) /* Add new entries above this line. */ diff --git a/gfx/gl/GLBlitHelperD3D.cpp b/gfx/gl/GLBlitHelperD3D.cpp @@ -242,6 +242,9 @@ bool GLBlitHelper::BlitDescriptor(const layers::SurfaceDescriptorD3D10& desc, case gfx::SurfaceFormat::B8G8R8X8: case gfx::SurfaceFormat::R8G8B8A8: case gfx::SurfaceFormat::R8G8B8X8: + case gfx::SurfaceFormat::R10G10B10A2_UINT32: + case gfx::SurfaceFormat::R10G10B10X2_UINT32: + case gfx::SurfaceFormat::R16G16B16A16F: yuv = false; break; case gfx::SurfaceFormat::NV12: diff --git a/gfx/ipc/OverlayInfo.h b/gfx/ipc/OverlayInfo.h @@ -31,9 +31,11 @@ struct OverlayInfo { OverlaySupportType mYuy2Overlay = OverlaySupportType::None; OverlaySupportType mBgra8Overlay = OverlaySupportType::None; OverlaySupportType mRgb10a2Overlay = OverlaySupportType::None; + OverlaySupportType mRgba16fOverlay = OverlaySupportType::None; bool mSupportsVpSuperResolution = false; bool mSupportsVpAutoHDR = false; + bool mSupportsHDR = false; friend struct IPC::ParamTraits<OverlayInfo>; }; diff --git a/gfx/layers/D3D11ShareHandleImage.cpp b/gfx/layers/D3D11ShareHandleImage.cpp @@ -51,8 +51,11 @@ bool D3D11ShareHandleImage::AllocateTexture(D3D11RecycleAllocator* aAllocator, return false; } else { MOZ_ASSERT(aDevice); + auto format = mColorDepth > gfx::ColorDepth::COLOR_8 + ? DXGI_FORMAT_R16G16B16A16_FLOAT + : DXGI_FORMAT_B8G8R8A8_UNORM; CD3D11_TEXTURE2D_DESC newDesc( - DXGI_FORMAT_B8G8R8A8_UNORM, mSize.width, mSize.height, 1, 1, + format, mSize.width, mSize.height, 1, 1, D3D11_BIND_RENDER_TARGET | D3D11_BIND_SHADER_RESOURCE); newDesc.MiscFlags = D3D11_RESOURCE_MISC_SHARED_NTHANDLE | D3D11_RESOURCE_MISC_SHARED; @@ -175,7 +178,10 @@ void D3D11RecycleAllocator::SetPreferredSurfaceFormat( gfx::SurfaceFormat aPreferredFormat) { if ((aPreferredFormat == gfx::SurfaceFormat::NV12 && mCanUseNV12) || (aPreferredFormat == gfx::SurfaceFormat::P010 && mCanUseP010) || - (aPreferredFormat == gfx::SurfaceFormat::P016 && mCanUseP016)) { + (aPreferredFormat == gfx::SurfaceFormat::P016 && mCanUseP016) || + (aPreferredFormat == gfx::SurfaceFormat::R10G10B10A2_UINT32) || + (aPreferredFormat == gfx::SurfaceFormat::R10G10B10X2_UINT32) || + (aPreferredFormat == gfx::SurfaceFormat::R16G16B16A16F)) { mUsableSurfaceFormat = aPreferredFormat; return; } diff --git a/gfx/layers/DcompSurfaceImage.cpp b/gfx/layers/DcompSurfaceImage.cpp @@ -64,7 +64,8 @@ DcompSurfaceImage::DcompSurfaceImage(HANDLE aHandle, gfx::IntSize aSize, // Dcomp surface supports DXGI_FORMAT_B8G8R8A8_UNORM, // DXGI_FORMAT_R8G8B8A8_UNORM and DXGI_FORMAT_R16G16B16A16_FLOAT MOZ_ASSERT(aFormat == gfx::SurfaceFormat::B8G8R8A8 || - aFormat == gfx::SurfaceFormat::R8G8B8A8); + aFormat == gfx::SurfaceFormat::R8G8B8A8 || + aFormat == gfx::SurfaceFormat::R16G16B16A16F); } TextureClient* DcompSurfaceImage::GetTextureClient( diff --git a/gfx/layers/NativeLayerCA.mm b/gfx/layers/NativeLayerCA.mm @@ -931,7 +931,8 @@ void NativeLayerCA::AttachExternalImage(wr::RenderTextureHost* aExternalImage) { bool isHDR = false; MacIOSurface* macIOSurface = texture->GetSurface(); - if (macIOSurface->GetYUVColorSpace() == gfx::YUVColorSpace::BT2020) { + if (macIOSurface->GetYUVColorSpace() == gfx::YUVColorSpace::BT2020 && + StaticPrefs::gfx_color_management_hdr_video_assume_rec2020_uses_pq()) { // BT2020 colorSpace is a signifier of HDR. isHDR = true; } @@ -940,7 +941,7 @@ void NativeLayerCA::AttachExternalImage(wr::RenderTextureHost* aExternalImage) { // 10-bit color is a signifier of HDR. isHDR = true; } - mIsHDR = isHDR; + mIsHDR = isHDR && StaticPrefs::gfx_color_management_hdr_video(); bool specializeVideo = ShouldSpecializeVideo(lock); bool changedSpecializeVideo = (mSpecializeVideo != specializeVideo); diff --git a/gfx/layers/NativeLayerRemoteMac.mm b/gfx/layers/NativeLayerRemoteMac.mm @@ -79,7 +79,8 @@ void NativeLayerRemoteMac::AttachExternalImage( bool isHDR = false; MacIOSurface* macIOSurface = texture->GetSurface(); - if (macIOSurface->GetYUVColorSpace() == gfx::YUVColorSpace::BT2020) { + if (macIOSurface->GetYUVColorSpace() == gfx::YUVColorSpace::BT2020 && + StaticPrefs::gfx_color_management_hdr_video_assume_rec2020_uses_pq()) { // BT2020 colorSpace is a signifier of HDR. isHDR = true; } @@ -88,7 +89,7 @@ void NativeLayerRemoteMac::AttachExternalImage( // 10-bit color is a signifier of HDR. isHDR = true; } - mIsHDR = isHDR; + mIsHDR = isHDR && StaticPrefs::gfx_color_management_hdr_video(); mDirtyLayerInfo |= changedDisplayRect; mSnapshotLayer.mMutatedFrontSurface = true; diff --git a/gfx/layers/d3d11/TextureD3D11.cpp b/gfx/layers/d3d11/TextureD3D11.cpp @@ -477,12 +477,55 @@ D3D11TextureData* D3D11TextureData::Create(IntSize aSize, SurfaceFormat aFormat, DXGI_FORMAT_B8G8R8A8_UNORM, aSize.width, aSize.height, 1, 1, D3D11_BIND_RENDER_TARGET | D3D11_BIND_SHADER_RESOURCE); - if (aFormat == SurfaceFormat::NV12) { - newDesc.Format = DXGI_FORMAT_NV12; - } else if (aFormat == SurfaceFormat::P010) { - newDesc.Format = DXGI_FORMAT_P010; - } else if (aFormat == SurfaceFormat::P016) { - newDesc.Format = DXGI_FORMAT_P016; + // This supported formats list matches DXGITextureHostD3D11::PushDisplayItems. + switch (aFormat) { + case gfx::SurfaceFormat::B8G8R8X8: + case gfx::SurfaceFormat::R8G8B8X8: + newDesc.Format = DXGI_FORMAT_B8G8R8X8_UNORM; + break; + case gfx::SurfaceFormat::B8G8R8A8: + case gfx::SurfaceFormat::R8G8B8A8: + newDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM; + break; + case gfx::SurfaceFormat::R10G10B10A2_UINT32: + case gfx::SurfaceFormat::R10G10B10X2_UINT32: + newDesc.Format = DXGI_FORMAT_R10G10B10A2_UNORM; + break; + case gfx::SurfaceFormat::R16G16B16A16F: + newDesc.Format = DXGI_FORMAT_R16G16B16A16_FLOAT; + break; + case gfx::SurfaceFormat::NV12: + newDesc.Format = DXGI_FORMAT_NV12; + break; + case gfx::SurfaceFormat::P010: + newDesc.Format = DXGI_FORMAT_P010; + break; + case gfx::SurfaceFormat::P016: + newDesc.Format = DXGI_FORMAT_P016; + break; + case gfx::SurfaceFormat::A8R8G8B8: + case gfx::SurfaceFormat::X8R8G8B8: + case gfx::SurfaceFormat::R8G8B8: + case gfx::SurfaceFormat::B8G8R8: + case gfx::SurfaceFormat::R5G6B5_UINT16: + case gfx::SurfaceFormat::A8: + case gfx::SurfaceFormat::A16: + case gfx::SurfaceFormat::R8G8: + case gfx::SurfaceFormat::R16G16: + case gfx::SurfaceFormat::YUV420: + case gfx::SurfaceFormat::YUV420P10: + case gfx::SurfaceFormat::YUV422P10: + case gfx::SurfaceFormat::NV16: + case gfx::SurfaceFormat::YUY2: + case gfx::SurfaceFormat::HSV: + case gfx::SurfaceFormat::Lab: + case gfx::SurfaceFormat::Depth: + case gfx::SurfaceFormat::UNKNOWN: + // Per advice from Sotaro, these formats are not supported for video. + gfxCriticalNoteOnce + << "D3D11TextureData::Create: Unsupported SurfaceFormat %u" + << static_cast<unsigned int>(aFormat); + return nullptr; } newDesc.MiscFlags = @@ -1069,7 +1112,10 @@ uint32_t DXGITextureHostD3D11::NumSubTextures() { case gfx::SurfaceFormat::R8G8B8X8: case gfx::SurfaceFormat::R8G8B8A8: case gfx::SurfaceFormat::B8G8R8A8: - case gfx::SurfaceFormat::B8G8R8X8: { + case gfx::SurfaceFormat::B8G8R8X8: + case gfx::SurfaceFormat::R10G10B10A2_UINT32: + case gfx::SurfaceFormat::R10G10B10X2_UINT32: + case gfx::SurfaceFormat::R16G16B16A16F: { return 1; } case gfx::SurfaceFormat::NV12: @@ -1100,7 +1146,10 @@ void DXGITextureHostD3D11::PushResourceUpdates( case gfx::SurfaceFormat::R8G8B8X8: case gfx::SurfaceFormat::R8G8B8A8: case gfx::SurfaceFormat::B8G8R8A8: - case gfx::SurfaceFormat::B8G8R8X8: { + case gfx::SurfaceFormat::B8G8R8X8: + case gfx::SurfaceFormat::R10G10B10A2_UINT32: + case gfx::SurfaceFormat::R10G10B10X2_UINT32: + case gfx::SurfaceFormat::R16G16B16A16F: { MOZ_ASSERT(aImageKeys.length() == 1); wr::ImageDescriptor descriptor(mSize, GetFormat()); @@ -1170,7 +1219,20 @@ void DXGITextureHostD3D11::PushDisplayItems( preferExternalCompositing = false; } + // This supported format list matches D3D11TextureData::Create. switch (GetFormat()) { + case gfx::SurfaceFormat::R10G10B10A2_UINT32: + case gfx::SurfaceFormat::R10G10B10X2_UINT32: + case gfx::SurfaceFormat::R16G16B16A16F: { + // WebRender isn't HDR ready so we have to push to the compositor. + preferCompositorSurface = preferExternalCompositing = true; + MOZ_ASSERT(aImageKeys.length() == 1); + aBuilder.PushImage(aBounds, aClip, true, false, aFilter, aImageKeys[0], + !(mFlags & TextureFlags::NON_PREMULTIPLIED), + wr::ColorF{1.0f, 1.0f, 1.0f, 1.0f}, + preferCompositorSurface, preferExternalCompositing); + break; + } case gfx::SurfaceFormat::R8G8B8X8: case gfx::SurfaceFormat::R8G8B8A8: case gfx::SurfaceFormat::B8G8R8A8: @@ -1183,14 +1245,23 @@ void DXGITextureHostD3D11::PushDisplayItems( break; } case gfx::SurfaceFormat::P010: - case gfx::SurfaceFormat::P016: - case gfx::SurfaceFormat::NV12: { + case gfx::SurfaceFormat::P016: { // DXGI_FORMAT_P010 stores its 10 bit value in the most significant bits // of each 16 bit word with the unused lower bits cleared to zero so that // it may be handled as if it was DXGI_FORMAT_P016. This is approximately // perceptually correct. However, due to rounding error, the precise // quantized value after sampling may be off by 1. MOZ_ASSERT(aImageKeys.length() == 2); + aBuilder.PushP010Image( + aBounds, aClip, true, aImageKeys[0], aImageKeys[1], + wr::ColorDepth::Color16, + wr::ToWrYuvColorSpace(ToYUVColorSpace(mColorSpace)), + wr::ToWrColorRange(mColorRange), aFilter, preferCompositorSurface, + preferExternalCompositing); + break; + } + case gfx::SurfaceFormat::NV12: { + MOZ_ASSERT(aImageKeys.length() == 2); aBuilder.PushNV12Image( aBounds, aClip, true, aImageKeys[0], aImageKeys[1], GetFormat() == gfx::SurfaceFormat::NV12 ? wr::ColorDepth::Color8 @@ -1200,7 +1271,25 @@ void DXGITextureHostD3D11::PushDisplayItems( preferExternalCompositing); break; } - default: { + case gfx::SurfaceFormat::A8R8G8B8: + case gfx::SurfaceFormat::X8R8G8B8: + case gfx::SurfaceFormat::R8G8B8: + case gfx::SurfaceFormat::B8G8R8: + case gfx::SurfaceFormat::R5G6B5_UINT16: + case gfx::SurfaceFormat::A8: + case gfx::SurfaceFormat::A16: + case gfx::SurfaceFormat::R8G8: + case gfx::SurfaceFormat::R16G16: + case gfx::SurfaceFormat::YUV420: + case gfx::SurfaceFormat::YUV420P10: + case gfx::SurfaceFormat::YUV422P10: + case gfx::SurfaceFormat::NV16: + case gfx::SurfaceFormat::YUY2: + case gfx::SurfaceFormat::HSV: + case gfx::SurfaceFormat::Lab: + case gfx::SurfaceFormat::Depth: + case gfx::SurfaceFormat::UNKNOWN: { + // Per advice from Sotaro, these formats are not supported for video. MOZ_ASSERT_UNREACHABLE("unexpected to be called"); } } @@ -1211,9 +1300,9 @@ bool DXGITextureHostD3D11::SupportsExternalCompositing( if (aBackend == WebRenderBackend::SOFTWARE) { return true; } - // XXX Add P010 and P016 support. if (GetFormat() == gfx::SurfaceFormat::NV12 || - GetFormat() == gfx::SurfaceFormat::P010) { + GetFormat() == gfx::SurfaceFormat::P010 || + GetFormat() == gfx::SurfaceFormat::P016) { if ((mFlags & TextureFlags::SOFTWARE_DECODED_VIDEO) && (gfx::gfxVars::UseWebRenderDCompVideoSwOverlayWin())) { return true; diff --git a/gfx/layers/d3d11/VideoProcessorD3D11.cpp b/gfx/layers/d3d11/VideoProcessorD3D11.cpp @@ -19,8 +19,8 @@ namespace layers { // TODO: Replace with YUVRangedColorSpace static Maybe<DXGI_COLOR_SPACE_TYPE> GetSourceDXGIColorSpace( - const gfx::YUVColorSpace aYUVColorSpace, - const gfx::ColorRange aColorRange) { + const gfx::YUVColorSpace aYUVColorSpace, const gfx::ColorRange aColorRange, + const bool aContentIsHDR) { if (aYUVColorSpace == gfx::YUVColorSpace::BT601) { if (aColorRange == gfx::ColorRange::FULL) { return Some(DXGI_COLOR_SPACE_YCBCR_FULL_G22_LEFT_P601); @@ -35,10 +35,22 @@ static Maybe<DXGI_COLOR_SPACE_TYPE> GetSourceDXGIColorSpace( } } else if (aYUVColorSpace == gfx::YUVColorSpace::BT2020) { if (aColorRange == gfx::ColorRange::FULL) { - // XXX Add SMPTEST2084 handling. HDR content is not handled yet - return Some(DXGI_COLOR_SPACE_YCBCR_FULL_G22_LEFT_P2020); + if (aContentIsHDR) { + // DXGI doesn't have a full range PQ YCbCr format, hopefully we won't + // have to deal with this case. + gfxCriticalNoteOnce + << "GetSourceDXGIColorSpace: DXGI has no full range " + "BT2020 PQ YCbCr format, using studio range instead"; + return Some(DXGI_COLOR_SPACE_YCBCR_STUDIO_G2084_LEFT_P2020); + } else { + return Some(DXGI_COLOR_SPACE_YCBCR_FULL_G22_LEFT_P2020); + } } else { - return Some(DXGI_COLOR_SPACE_YCBCR_STUDIO_G22_LEFT_P2020); + if (aContentIsHDR) { + return Some(DXGI_COLOR_SPACE_YCBCR_STUDIO_G2084_LEFT_P2020); + } else { + return Some(DXGI_COLOR_SPACE_YCBCR_STUDIO_G22_LEFT_P2020); + } } } @@ -46,9 +58,9 @@ static Maybe<DXGI_COLOR_SPACE_TYPE> GetSourceDXGIColorSpace( } static Maybe<DXGI_COLOR_SPACE_TYPE> GetSourceDXGIColorSpace( - const gfx::YUVRangedColorSpace aYUVColorSpace) { + const gfx::YUVRangedColorSpace aYUVColorSpace, const bool aContentIsHDR) { const auto info = FromYUVRangedColorSpace(aYUVColorSpace); - return GetSourceDXGIColorSpace(info.space, info.range); + return GetSourceDXGIColorSpace(info.space, info.range, aContentIsHDR); } /* static */ @@ -167,7 +179,8 @@ bool VideoProcessorD3D11::CallVideoProcessorBlt( auto yuvRangedColorSpace = gfx::ToYUVRangedColorSpace( gfx::ToYUVColorSpace(aTextureInfo.mColorSpace), aTextureInfo.mColorRange); - auto sourceColorSpace = GetSourceDXGIColorSpace(yuvRangedColorSpace); + auto sourceColorSpace = + GetSourceDXGIColorSpace(yuvRangedColorSpace, mContentIsHDR); if (sourceColorSpace.isNothing()) { gfxCriticalNoteOnce << "Unsupported color space"; return false; diff --git a/gfx/layers/d3d11/VideoProcessorD3D11.h b/gfx/layers/d3d11/VideoProcessorD3D11.h @@ -75,6 +75,7 @@ class VideoProcessorD3D11 { gfx::IntSize mSize; RefPtr<ID3D11VideoProcessor> mVideoProcessor; RefPtr<ID3D11VideoProcessorEnumerator> mVideoProcessorEnumerator; + bool mContentIsHDR = false; }; } // namespace layers diff --git a/gfx/layers/ipc/LayersMessageUtils.h b/gfx/layers/ipc/LayersMessageUtils.h @@ -1054,8 +1054,10 @@ struct ParamTraits<mozilla::layers::OverlayInfo> { WriteParam(aWriter, aParam.mYuy2Overlay); WriteParam(aWriter, aParam.mBgra8Overlay); WriteParam(aWriter, aParam.mRgb10a2Overlay); + WriteParam(aWriter, aParam.mRgba16fOverlay); WriteParam(aWriter, aParam.mSupportsVpSuperResolution); WriteParam(aWriter, aParam.mSupportsVpAutoHDR); + WriteParam(aWriter, aParam.mSupportsHDR); } static bool Read(MessageReader* aReader, paramType* aResult) { @@ -1064,8 +1066,10 @@ struct ParamTraits<mozilla::layers::OverlayInfo> { ReadParam(aReader, &aResult->mYuy2Overlay) && ReadParam(aReader, &aResult->mBgra8Overlay) && ReadParam(aReader, &aResult->mRgb10a2Overlay) && + ReadParam(aReader, &aResult->mRgba16fOverlay) && ReadParam(aReader, &aResult->mSupportsVpSuperResolution) && - ReadParam(aReader, &aResult->mSupportsVpAutoHDR); + ReadParam(aReader, &aResult->mSupportsVpAutoHDR) && + ReadParam(aReader, &aResult->mSupportsHDR); } }; diff --git a/gfx/layers/opengl/DMABUFTextureHostOGL.cpp b/gfx/layers/opengl/DMABUFTextureHostOGL.cpp @@ -135,7 +135,8 @@ void DMABUFTextureHostOGL::PushResourceUpdates( /* aNormalizedUvs */ false); break; } - case gfx::SurfaceFormat::P010: { + case gfx::SurfaceFormat::P010: + case gfx::SurfaceFormat::P016: { MOZ_ASSERT(aImageKeys.length() == 2); MOZ_ASSERT(mSurface->GetTextureCount() == 2); wr::ImageDescriptor descriptor0( @@ -205,7 +206,8 @@ void DMABUFTextureHostOGL::PushDisplayItems( supportsDirectComposition); break; } - case gfx::SurfaceFormat::P010: { + case gfx::SurfaceFormat::P010: + case gfx::SurfaceFormat::P016: { MOZ_ASSERT(aImageKeys.length() == 2); MOZ_ASSERT(mSurface->GetTextureCount() == 2); aBuilder.PushP010Image( diff --git a/gfx/layers/opengl/MacIOSurfaceTextureHostOGL.cpp b/gfx/layers/opengl/MacIOSurfaceTextureHostOGL.cpp @@ -165,7 +165,8 @@ void MacIOSurfaceTextureHostOGL::PushResourceUpdates( /* aNormalizedUvs */ false); break; } - case gfx::SurfaceFormat::P010: { + case gfx::SurfaceFormat::P010: + case gfx::SurfaceFormat::P016: { MOZ_ASSERT(aImageKeys.length() == 2); MOZ_ASSERT(mSurface->GetPlaneCount() == 2); wr::ImageDescriptor descriptor0( @@ -247,7 +248,8 @@ void MacIOSurfaceTextureHostOGL::PushDisplayItems( /* aSupportsExternalCompositing */ true); break; } - case gfx::SurfaceFormat::P010: { + case gfx::SurfaceFormat::P010: + case gfx::SurfaceFormat::P016: { MOZ_ASSERT(aImageKeys.length() == 2); MOZ_ASSERT(mSurface->GetPlaneCount() == 2); aBuilder.PushP010Image( diff --git a/gfx/thebes/gfxPlatform.cpp b/gfx/thebes/gfxPlatform.cpp @@ -2905,6 +2905,10 @@ void gfxPlatform::InitWebRenderConfig() { gfxVars::SetWebRenderOverlayVpAutoHDR(true); } + if (StaticPrefs::gfx_webrender_overlay_hdr_AtStartup()) { + gfxVars::SetWebRenderOverlayHDR(true); + } + bool allowOverlayVpSuperResolution = false; if (StaticPrefs::gfx_webrender_overlay_vp_super_resolution_AtStartup()) { allowOverlayVpSuperResolution = true; @@ -3776,13 +3780,16 @@ void gfxPlatform::GetOverlayInfo(mozilla::widget::InfoObject& aObj) { }; nsPrintfCString value( - "NV12=%s YUV2=%s BGRA8=%s RGB10A2=%s VpSR=%s VpAutoHDR=%s", + "NV12=%s YUV2=%s BGRA8=%s RGB10A2=%s RGBA16F=%s VpSR=%s VpAutoHDR=%s " + "HDR=%s", toString(mOverlayInfo.ref().mNv12Overlay), toString(mOverlayInfo.ref().mYuy2Overlay), toString(mOverlayInfo.ref().mBgra8Overlay), toString(mOverlayInfo.ref().mRgb10a2Overlay), + toString(mOverlayInfo.ref().mRgba16fOverlay), toStringBool(mOverlayInfo.ref().mSupportsVpSuperResolution), - toStringBool(mOverlayInfo.ref().mSupportsVpAutoHDR)); + toStringBool(mOverlayInfo.ref().mSupportsVpAutoHDR), + toStringBool(mOverlayInfo.ref().mSupportsHDR)); aObj.DefineProperty("OverlaySupport", NS_ConvertUTF8toUTF16(value)); } diff --git a/gfx/webrender_bindings/DCLayerTree.cpp b/gfx/webrender_bindings/DCLayerTree.cpp @@ -419,8 +419,19 @@ bool DCLayerTree::InitializeVideoOverlaySupport() { &info->mBgra8OverlaySupportFlags); output3->CheckOverlaySupport(DXGI_FORMAT_R10G10B10A2_UNORM, mDevice, &info->mRgb10a2OverlaySupportFlags); + output3->CheckOverlaySupport(DXGI_FORMAT_R16G16B16A16_FLOAT, mDevice, + &info->mRgba16fOverlaySupportFlags); - if (FlagsSupportsOverlays(info->mNv12OverlaySupportFlags)) { + if (FlagsSupportsOverlays(info->mRgb10a2OverlaySupportFlags)) { + info->mSupportsHDR = true; + } + + if (FlagsSupportsOverlays(info->mRgba16fOverlaySupportFlags)) { + info->mSupportsHDR = true; + } + + if (!info->mSupportsHardwareOverlays && + FlagsSupportsOverlays(info->mNv12OverlaySupportFlags)) { // NV12 format is preferred if it's supported. info->mOverlayFormatUsed = DXGI_FORMAT_NV12; info->mSupportsHardwareOverlays = true; @@ -433,16 +444,6 @@ bool DCLayerTree::InitializeVideoOverlaySupport() { info->mSupportsHardwareOverlays = true; } - // RGB10A2 overlay is used for displaying HDR content. In Intel's - // platform, RGB10A2 overlay is enabled only when - // DXGI_COLOR_SPACE_RGB_FULL_G2084_NONE_P2020 is supported. - if (FlagsSupportsOverlays(info->mRgb10a2OverlaySupportFlags)) { - if (!CheckOverlayColorSpaceSupport( - DXGI_FORMAT_R10G10B10A2_UNORM, - DXGI_COLOR_SPACE_RGB_FULL_G2084_NONE_P2020, output, mDevice)) - info->mRgb10a2OverlaySupportFlags = 0; - } - // Early out after the first output that reports overlay support. All // outputs are expected to report the same overlay support according to // Microsoft's WDDM documentation: @@ -1361,9 +1362,13 @@ layers::OverlayInfo DCLayerTree::GetOverlayInfo() { info.mRgb10a2Overlay = FlagsToOverlaySupportType(sGpuOverlayInfo->mRgb10a2OverlaySupportFlags, /* aSoftwareOverlaySupported */ false); + info.mRgba16fOverlay = + FlagsToOverlaySupportType(sGpuOverlayInfo->mRgba16fOverlaySupportFlags, + /* aSoftwareOverlaySupported */ false); info.mSupportsVpSuperResolution = sGpuOverlayInfo->mSupportsVpSuperResolution; info.mSupportsVpAutoHDR = sGpuOverlayInfo->mSupportsVpAutoHDR; + info.mSupportsHDR = sGpuOverlayInfo->mSupportsHDR; return info; } @@ -2169,10 +2174,15 @@ DCSurfaceVideo::~DCSurfaceVideo() { } bool IsYUVSwapChainFormat(DXGI_FORMAT aFormat) { - if (aFormat == DXGI_FORMAT_NV12 || aFormat == DXGI_FORMAT_YUY2) { - return true; + switch (aFormat) { + case DXGI_FORMAT_P010: + case DXGI_FORMAT_P016: + case DXGI_FORMAT_NV12: + case DXGI_FORMAT_YUY2: + return true; + default: + return false; } - return false; } void DCSurfaceVideo::AttachExternalImage(wr::ExternalImageId aExternalImage) { @@ -2193,11 +2203,31 @@ void DCSurfaceVideo::AttachExternalImage(wr::ExternalImageId aExternalImage) { return; } + // If the content format is HDR, we will want to use more than 8bit. + mContentIsHDR = false; + if (texture) { + const auto format = texture->GetFormat(); + nsPrintfCString str("AttachExternalImage: SurfaceFormat %d", (int)format); + PROFILER_MARKER_TEXT("DCSurfaceVideo", GRAPHICS, {}, str); + switch (format) { + case gfx::SurfaceFormat::R10G10B10A2_UINT32: + case gfx::SurfaceFormat::R10G10B10X2_UINT32: + case gfx::SurfaceFormat::R16G16B16A16F: + case gfx::SurfaceFormat::P010: + case gfx::SurfaceFormat::P016: + mContentIsHDR = true; + break; + default: + break; + } + } + // XXX if software decoded video frame format is nv12, it could be used as // video overlay. if (!texture || !texture->AsRenderDXGITextureHost() || ((texture->GetFormat() != gfx::SurfaceFormat::NV12) && - (texture->GetFormat() != gfx::SurfaceFormat::P010))) { + (texture->GetFormat() != gfx::SurfaceFormat::P010) && + (texture->GetFormat() != gfx::SurfaceFormat::P016))) { gfxCriticalNote << "Unsupported RenderTexture for overlay: " << gfx::hexa(texture); return; @@ -2265,7 +2295,7 @@ bool DCSurfaceVideo::CalculateSwapChainSize(gfx::Matrix& aTransform) { const bool driverSupportsAutoHDR = GetVpAutoHDRSupported(vendorId, mDCLayerTree->GetVideoContext(), mDCLayerTree->GetVideoProcessor()); - const bool contentIsHDR = false; // XXX for now, only non-HDR is supported. + const bool contentIsHDR = mContentIsHDR; const bool monitorIsHDR = gfx::DeviceManagerDx::Get()->WindowHDREnabled(mDCLayerTree->GetHwnd()); const bool powerIsCharging = RenderThread::Get()->GetPowerIsCharging(); @@ -2274,6 +2304,9 @@ bool DCSurfaceVideo::CalculateSwapChainSize(gfx::Matrix& aTransform) { !contentIsHDR && monitorIsHDR && driverSupportsAutoHDR && powerIsCharging && !mVpAutoHDRFailed; + bool useHDR = + gfx::gfxVars::WebRenderOverlayHDR() && contentIsHDR && monitorIsHDR; + if (profiler_thread_is_being_profiled_for_markers()) { nsPrintfCString str( "useVpAutoHDR %d gfxVars %d contentIsHDR %d monitor %d driver %d " @@ -2291,10 +2324,13 @@ bool DCSurfaceVideo::CalculateSwapChainSize(gfx::Matrix& aTransform) { mSwapChainSize = swapChainSize; mIsDRM = isDRM; - auto swapChainFormat = GetSwapChainFormat(useVpAutoHDR); + auto swapChainFormat = GetSwapChainFormat(useVpAutoHDR, useHDR); bool useYUVSwapChain = IsYUVSwapChainFormat(swapChainFormat); if (useYUVSwapChain) { // Tries to create YUV SwapChain + nsPrintfCString str("Creating video swapchain for YUV as DXGI format %d", + (int)swapChainFormat); + PROFILER_MARKER_TEXT("DCSurfaceVideo", GRAPHICS, {}, str); CreateVideoSwapChain(swapChainFormat); if (!mVideoSwapChain) { mFailedYuvSwapChain = true; @@ -2305,6 +2341,9 @@ bool DCSurfaceVideo::CalculateSwapChainSize(gfx::Matrix& aTransform) { } // Tries to create RGB SwapChain if (!mVideoSwapChain) { + nsPrintfCString str("Creating video swapchain for RGB as DXGI format %d", + (int)swapChainFormat); + PROFILER_MARKER_TEXT("DCSurfaceVideo", GRAPHICS, {}, str); CreateVideoSwapChain(swapChainFormat); } if (!mVideoSwapChain && useVpAutoHDR) { @@ -2313,13 +2352,19 @@ bool DCSurfaceVideo::CalculateSwapChainSize(gfx::Matrix& aTransform) { // Disable VpAutoHDR useVpAutoHDR = false; - swapChainFormat = GetSwapChainFormat(useVpAutoHDR); + swapChainFormat = GetSwapChainFormat(useVpAutoHDR, useHDR); + nsPrintfCString str( + "Creating video swapchain for RGB as DXGI format %d after fallback " + "from VpAutoHDR", + (int)swapChainFormat); + PROFILER_MARKER_TEXT("DCSurfaceVideo", GRAPHICS, {}, str); CreateVideoSwapChain(swapChainFormat); } } aTransform = transform; mUseVpAutoHDR = useVpAutoHDR; + mUseHDR = useHDR; return needsToPresent; } @@ -2449,10 +2494,14 @@ void DCSurfaceVideo::OnCompositorEndFrame(int aFrameId, uint32_t aDurationMs) { mRenderTextureHostUsageInfo->OnCompositorEndFrame(aFrameId, aDurationMs); } -DXGI_FORMAT DCSurfaceVideo::GetSwapChainFormat(bool aUseVpAutoHDR) { +DXGI_FORMAT DCSurfaceVideo::GetSwapChainFormat(bool aUseVpAutoHDR, + bool aUseHDR) { if (aUseVpAutoHDR) { return DXGI_FORMAT_R16G16B16A16_FLOAT; } + if (aUseHDR) { + return DXGI_FORMAT_R16G16B16A16_FLOAT; + } if (mFailedYuvSwapChain || !mDCLayerTree->SupportsHardwareOverlays()) { return DXGI_FORMAT_B8G8R8A8_UNORM; } @@ -2520,8 +2569,8 @@ bool DCSurfaceVideo::CreateVideoSwapChain(DXGI_FORMAT aSwapChainFormat) { // TODO: Replace with YUVRangedColorSpace static Maybe<DXGI_COLOR_SPACE_TYPE> GetSourceDXGIColorSpace( - const gfx::YUVColorSpace aYUVColorSpace, - const gfx::ColorRange aColorRange) { + const gfx::YUVColorSpace aYUVColorSpace, const gfx::ColorRange aColorRange, + const bool aContentIsHDR) { if (aYUVColorSpace == gfx::YUVColorSpace::BT601) { if (aColorRange == gfx::ColorRange::FULL) { return Some(DXGI_COLOR_SPACE_YCBCR_FULL_G22_LEFT_P601); @@ -2535,12 +2584,35 @@ static Maybe<DXGI_COLOR_SPACE_TYPE> GetSourceDXGIColorSpace( return Some(DXGI_COLOR_SPACE_YCBCR_STUDIO_G22_LEFT_P709); } } else if (aYUVColorSpace == gfx::YUVColorSpace::BT2020) { + // This is a probably-temporary internal workaround for the lack of access + // to mTransferFunction - BT2020 seems to always be used with PQ transfer + // function defined by BT2100 and STMPE 2084, we've/ been making this same + // assumption on macOS for quite some time, so if it was not universally + // true, hopefully bugs would have been filed. + // + // But ideally we'd plumb mTransferFunction through the various structs + // instead, which is a more delicate refactor. + if (StaticPrefs::gfx_color_management_hdr_video_assume_rec2020_uses_pq() && + StaticPrefs::gfx_color_management_hdr_video()) { + return Some(DXGI_COLOR_SPACE_YCBCR_STUDIO_G2084_LEFT_P2020); + } if (aColorRange == gfx::ColorRange::FULL) { - // XXX Add SMPTEST2084 handling. HDR content is not handled yet by - // video overlay. - return Some(DXGI_COLOR_SPACE_YCBCR_FULL_G22_LEFT_P2020); + if (aContentIsHDR && StaticPrefs::gfx_color_management_hdr_video()) { + // DXGI doesn't have a full range PQ YCbCr format, hopefully we won't + // have to deal with this case. + gfxCriticalNoteOnce + << "GetSourceDXGIColorSpace: DXGI has no full range " + "BT2020 PQ YCbCr format, using studio range instead"; + return Some(DXGI_COLOR_SPACE_YCBCR_STUDIO_G2084_LEFT_P2020); + } else { + return Some(DXGI_COLOR_SPACE_YCBCR_FULL_G22_LEFT_P2020); + } } else { - return Some(DXGI_COLOR_SPACE_YCBCR_STUDIO_G22_LEFT_P2020); + if (aContentIsHDR && StaticPrefs::gfx_color_management_hdr_video()) { + return Some(DXGI_COLOR_SPACE_YCBCR_STUDIO_G2084_LEFT_P2020); + } else { + return Some(DXGI_COLOR_SPACE_YCBCR_STUDIO_G22_LEFT_P2020); + } } } @@ -2548,9 +2620,49 @@ static Maybe<DXGI_COLOR_SPACE_TYPE> GetSourceDXGIColorSpace( } static Maybe<DXGI_COLOR_SPACE_TYPE> GetSourceDXGIColorSpace( - const gfx::YUVRangedColorSpace aYUVColorSpace) { + const gfx::YUVRangedColorSpace aYUVColorSpace, const bool aContentIsHDR) { const auto info = FromYUVRangedColorSpace(aYUVColorSpace); - return GetSourceDXGIColorSpace(info.space, info.range); + return GetSourceDXGIColorSpace(info.space, info.range, aContentIsHDR); +} + +static Maybe<DXGI_COLOR_SPACE_TYPE> GetOutputDXGIColorSpace( + DXGI_FORMAT aSwapChainFormat, DXGI_COLOR_SPACE_TYPE aInputColorSpace, + bool aUseVpAutoHDR) { + switch (aSwapChainFormat) { + case DXGI_FORMAT_NV12: + case DXGI_FORMAT_YUY2: + return Some(aInputColorSpace); + case DXGI_FORMAT_P010: + case DXGI_FORMAT_P016: + return Some(DXGI_COLOR_SPACE_RGB_FULL_G2084_NONE_P2020); + case DXGI_FORMAT_R16G16B16A16_FLOAT: + return Some(DXGI_COLOR_SPACE_RGB_FULL_G10_NONE_P709); + case DXGI_FORMAT_R10G10B10A2_UNORM: + if (aInputColorSpace == DXGI_COLOR_SPACE_YCBCR_STUDIO_G2084_LEFT_P2020) { + // YCbCr BT2100 PQ HDR video being converted to RGB10A2 + return Some(DXGI_COLOR_SPACE_RGB_FULL_G2084_NONE_P2020); + } else if (aInputColorSpace == + DXGI_COLOR_SPACE_YCBCR_FULL_G22_LEFT_P2020 || + aInputColorSpace == + DXGI_COLOR_SPACE_YCBCR_STUDIO_G22_LEFT_P2020) { + return Some(DXGI_COLOR_SPACE_RGB_FULL_G22_NONE_P2020); + } + return Some(DXGI_COLOR_SPACE_RGB_FULL_G22_NONE_P709); + case DXGI_FORMAT_R8G8B8A8_UNORM: + case DXGI_FORMAT_R8G8B8A8_UNORM_SRGB: + case DXGI_FORMAT_B8G8R8A8_UNORM: + case DXGI_FORMAT_B8G8R8A8_UNORM_SRGB: + case DXGI_FORMAT_B8G8R8X8_UNORM: + case DXGI_FORMAT_B8G8R8X8_UNORM_SRGB: + // Refactor note - not sure if mUseVpAutoHDR is ever true here, + // it may only ever use DXGI_FORMAT_R16G16B16A16_FLOAT. + if (aUseVpAutoHDR) { + return Some(DXGI_COLOR_SPACE_RGB_FULL_G2084_NONE_P2020); + } + return Some(DXGI_COLOR_SPACE_RGB_FULL_G22_NONE_P709); + default: + return Nothing(); + } } bool DCSurfaceVideo::CallVideoProcessorBlt() { @@ -2563,7 +2675,7 @@ bool DCSurfaceVideo::CallVideoProcessorBlt() { const auto texture = mRenderTextureHost->AsRenderDXGITextureHost(); Maybe<DXGI_COLOR_SPACE_TYPE> sourceColorSpace = - GetSourceDXGIColorSpace(texture->GetYUVColorSpace()); + GetSourceDXGIColorSpace(texture->GetYUVColorSpace(), mContentIsHDR); if (sourceColorSpace.isNothing()) { gfxCriticalNote << "Unsupported color space"; return false; @@ -2609,18 +2721,16 @@ bool DCSurfaceVideo::CallVideoProcessorBlt() { videoContext1->VideoProcessorSetStreamColorSpace1(videoProcessor, 0, inputColorSpace); - DXGI_COLOR_SPACE_TYPE outputColorSpace = - IsYUVSwapChainFormat(mSwapChainFormat) - ? inputColorSpace - : DXGI_COLOR_SPACE_RGB_FULL_G22_NONE_P709; - - if (mUseVpAutoHDR) { - outputColorSpace = mSwapChainFormat == DXGI_FORMAT_R16G16B16A16_FLOAT - ? DXGI_COLOR_SPACE_RGB_FULL_G10_NONE_P709 - : DXGI_COLOR_SPACE_RGB_FULL_G2084_NONE_P2020; + Maybe<DXGI_COLOR_SPACE_TYPE> outputColorSpace = + GetOutputDXGIColorSpace(mSwapChainFormat, inputColorSpace, mUseVpAutoHDR); + if (outputColorSpace.isNothing()) { + gfxCriticalNoteOnce << "Unrecognized DXGI mSwapChainFormat, unsure of " + "correct DXGI colorspace: " + << gfx::hexa(mSwapChainFormat); + return false; } - hr = swapChain3->SetColorSpace1(outputColorSpace); + hr = swapChain3->SetColorSpace1(outputColorSpace.ref()); if (FAILED(hr)) { gfxCriticalNoteOnce << "SetColorSpace1 failed: " << gfx::hexa(hr); RenderThread::Get()->NotifyWebRenderError( @@ -2628,7 +2738,7 @@ bool DCSurfaceVideo::CallVideoProcessorBlt() { return false; } videoContext1->VideoProcessorSetOutputColorSpace1(videoProcessor, - outputColorSpace); + outputColorSpace.ref()); D3D11_VIDEO_PROCESSOR_INPUT_VIEW_DESC inputDesc = {}; inputDesc.ViewDimension = D3D11_VPIV_DIMENSION_TEXTURE2D; @@ -2754,6 +2864,7 @@ void DCSurfaceVideo::ReleaseDecodeSwapChainResources() { mSwapChainSurfaceHandle = 0; } mUseVpAutoHDR = false; + mUseHDR = false; } DCSurfaceHandle::DCSurfaceHandle(bool aIsOpaque, DCLayerTree* aDCLayerTree) diff --git a/gfx/webrender_bindings/DCLayerTree.h b/gfx/webrender_bindings/DCLayerTree.h @@ -77,14 +77,16 @@ struct GpuOverlayInfo { bool mSupportsOverlays = false; bool mSupportsHardwareOverlays = false; DXGI_FORMAT mOverlayFormatUsed = DXGI_FORMAT_B8G8R8A8_UNORM; - DXGI_FORMAT mOverlayFormatUsedHdr = DXGI_FORMAT_R10G10B10A2_UNORM; + DXGI_FORMAT mOverlayFormatUsedHdr = DXGI_FORMAT_R16G16B16A16_FLOAT; UINT mNv12OverlaySupportFlags = 0; UINT mYuy2OverlaySupportFlags = 0; UINT mBgra8OverlaySupportFlags = 0; UINT mRgb10a2OverlaySupportFlags = 0; + UINT mRgba16fOverlaySupportFlags = 0; bool mSupportsVpSuperResolution = false; bool mSupportsVpAutoHDR = false; + bool mSupportsHDR = false; }; // - @@ -573,7 +575,7 @@ class DCSurfaceVideo : public DCSurface { protected: virtual ~DCSurfaceVideo(); - DXGI_FORMAT GetSwapChainFormat(bool aUseVpAutoHDR); + DXGI_FORMAT GetSwapChainFormat(bool aUseVpAutoHDR, bool aUseHDR); bool CreateVideoSwapChain(DXGI_FORMAT aFormat); bool CallVideoProcessorBlt(); void ReleaseDecodeSwapChainResources(); @@ -596,6 +598,8 @@ class DCSurfaceVideo : public DCSurface { bool mUseVpAutoHDR = false; bool mVpAutoHDRFailed = false; bool mVpSuperResolutionFailed = false; + bool mContentIsHDR = false; + bool mUseHDR = false; }; /** diff --git a/gfx/webrender_bindings/RenderD3D11TextureHost.cpp b/gfx/webrender_bindings/RenderD3D11TextureHost.cpp @@ -74,11 +74,40 @@ ID3D11Texture2D* RenderDXGITextureHost::GetD3D11Texture2DWithGL() { } size_t RenderDXGITextureHost::GetPlaneCount() const { - if (mFormat == gfx::SurfaceFormat::NV12 || - mFormat == gfx::SurfaceFormat::P010 || - mFormat == gfx::SurfaceFormat::P016) { - return 2; - } + switch (mFormat) { + case gfx::SurfaceFormat::NV12: + case gfx::SurfaceFormat::P010: + case gfx::SurfaceFormat::P016: { + return 2; + } + case gfx::SurfaceFormat::B8G8R8A8: + case gfx::SurfaceFormat::B8G8R8X8: + case gfx::SurfaceFormat::R8G8B8A8: + case gfx::SurfaceFormat::R8G8B8X8: + case gfx::SurfaceFormat::A8R8G8B8: + case gfx::SurfaceFormat::X8R8G8B8: + case gfx::SurfaceFormat::R8G8B8: + case gfx::SurfaceFormat::B8G8R8: + case gfx::SurfaceFormat::R5G6B5_UINT16: + case gfx::SurfaceFormat::R10G10B10A2_UINT32: + case gfx::SurfaceFormat::R10G10B10X2_UINT32: + case gfx::SurfaceFormat::R16G16B16A16F: + case gfx::SurfaceFormat::A8: + case gfx::SurfaceFormat::A16: + case gfx::SurfaceFormat::R8G8: + case gfx::SurfaceFormat::R16G16: + case gfx::SurfaceFormat::YUV420: + case gfx::SurfaceFormat::YUV420P10: + case gfx::SurfaceFormat::YUV422P10: + case gfx::SurfaceFormat::NV16: + case gfx::SurfaceFormat::YUY2: + case gfx::SurfaceFormat::HSV: + case gfx::SurfaceFormat::Lab: + case gfx::SurfaceFormat::Depth: + case gfx::SurfaceFormat::UNKNOWN: + return 1; + } + MOZ_ASSERT_UNREACHABLE("unhandled enum value for gfx::SurfaceFormat"); return 1; } diff --git a/gfx/webrender_bindings/RenderTextureHostSWGL.cpp b/gfx/webrender_bindings/RenderTextureHostSWGL.cpp @@ -69,6 +69,10 @@ bool RenderTextureHostSWGL::UpdatePlanes(RenderCompositor* aCompositor) { MOZ_ASSERT(colorDepth == gfx::ColorDepth::COLOR_10); internalFormat = i > 0 ? LOCAL_GL_RG16 : LOCAL_GL_R16; break; + case gfx::SurfaceFormat::P016: + MOZ_ASSERT(colorDepth == gfx::ColorDepth::COLOR_16); + internalFormat = i > 0 ? LOCAL_GL_RG16 : LOCAL_GL_R16; + break; case gfx::SurfaceFormat::YUY2: MOZ_ASSERT(colorDepth == gfx::ColorDepth::COLOR_8); internalFormat = LOCAL_GL_RGB_RAW_422_APPLE; @@ -176,6 +180,7 @@ bool RenderTextureHostSWGL::LockSWGLCompositeSurface( case gfx::SurfaceFormat::YUV420: case gfx::SurfaceFormat::NV12: case gfx::SurfaceFormat::P010: + case gfx::SurfaceFormat::P016: case gfx::SurfaceFormat::YUY2: { aInfo->yuv_planes = mPlanes.size(); auto colorSpace = GetYUVColorSpace(); diff --git a/modules/libpref/init/StaticPrefList.yaml b/modules/libpref/init/StaticPrefList.yaml @@ -6997,6 +6997,26 @@ value: true # Match naive behavior, but hopefully we can stop soon! mirror: always +# Enable HDR video support. +- name: gfx.color_management.hdr_video + type: RelaxedAtomicBool +#if defined(XP_WIN) + value: @IS_EARLY_BETA_OR_EARLIER@ +#else + value: true +#endif + mirror: always + +# Pending a better refactor we assume BT2020 colorspace is used with PQ transfer +# function (HDR) rather than assuming it is BT709/BT1886 transfer function, +# we've been making this assumption for a while on macOS HDR videos and it seems +# to be the norm. We should refactor to get mTransferFunction to compositor code +# instead... +- name: gfx.color_management.hdr_video_assume_rec2020_uses_pq + type: RelaxedAtomicBool + value: true + mirror: always + #ifdef XP_MACOSX # Whether GL contexts can be migrated to a different GPU (to match the one the # OS is using for composition). @@ -8149,6 +8169,12 @@ #endif mirror: once +# Enable overlays for HDR content +- name: gfx.webrender.overlay-hdr + type: bool + value: true + mirror: once + # Enable a dedicated arena on frame builder threads. - name: gfx.webrender.frame-builder-thread-local-arena type: bool @@ -11064,11 +11090,7 @@ # Is matching video-dynamic-range: high allowed? - name: layout.css.video-dynamic-range.allows-high type: RelaxedAtomicBool -#if defined(XP_MACOSX) || defined(MOZ_WAYLAND) value: true -#else - value: false -#endif mirror: always # Is support for CSS Typed Object Model API enabled?