tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

commit 09c39febaf7f372b827241c4579b1701c6a9952a
parent ff3e3b050763acbb0af2da96eef47e12c9f6d930
Author: agoloman <agoloman@mozilla.com>
Date:   Tue,  7 Oct 2025 20:06:17 +0300

Revert (Bug 1917965, Bug 1984198, Bug 1917970, Bug 1978496, Bug 1917974, Bug 1917965, Bug 1988076, Bug 1918741, Bug 1984157, Bug 1979087, Bug 1978494, Bug 1917976, Bug 1950965, Bug 1978495, Bug 1949981, Bug 1950972, Bug 1917975, Bug 1918013, Bug 1975014, Bug 1917982, Bug 1917963, Bug 1975024) - for causing build bustages @CacheIndex.cpp

This reverts commit 26c4a68ff792488e72b2f1fb36b4d3ce6c273d3e.

Revert "Bug 1978495: Make clear-site-data synchronous r=necko-reviewers,valentin"

This reverts commit 19e809294cafc7ef9643211d6b09a17f314a4b56.

Revert "Bug 1917965: Bug Fix for AddStandardHeaders and an await in TRR r=necko-reviewers,valentin"

This reverts commit 4c4a17eaeb33a194c56c6fd33e42246aabd69b1f.

Revert "Bug 1984198: Fix for ipv6 clear by principal for Compression Dictionaries r=edgul"

This reverts commit da9fc176ae56f1a079e297a4de2e5fc16252f8ec.

Revert "Bug 1917970: Update handling of removing encodings for CompressionDictionaries r=necko-reviewers,valentin,kershaw"

This reverts commit 4c7312620865aee9739abccef2c90f8dab6d8cd6.

Revert "Bug 1978496: Add markers for Compression Dictionaries #necko-reviewers! r=necko-reviewers,valentin"

This reverts commit d7cb9a5e0b91481697d6f1ebff33aba1b8e0a916.

Revert "Bug 1917974: Move CompressionDictionaries AddAcceptAndDictionaryHeaders to run earlier r=necko-reviewers,kershaw,devtools-reviewers,bomsy"

This reverts commit df63d60f8fea02540dc294965bc7d7c5b2d84251.

Revert "Bug 1917965: Update cache listener changes to ensure data is cached in compressed form r=necko-reviewers,kershaw"

This reverts commit 46c6d977d7012c66a40ff5391faa0a2b302e399d.

Revert "Bug 1917970: only remove dcb and dcz Content-Encodings r=necko-reviewers,kershaw"

This reverts commit bc645c1e248b60662445630fe66ccf811fa2239c.

Revert "Bug 1988076: Shutdown cleanup issues for Compression Dictionaries r=necko-reviewers,valentin"

This reverts commit 22c04bad814bdd93fcd6fc4fb546fab5af80195e.

Revert "Bug 1918741: Add unit tests for Compression Dictionaries r=necko-reviewers,kershaw,valentin"

This reverts commit b09a89245a3eee0311341b15598258e94895fb5d.

Revert "Bug 1917974: Clean up call to GetDictionaryFor() r=valentin,necko-reviewers"

This reverts commit b4d8ad3a34044330708f97b4b2ea8ed7057c8e16.

Revert "Bug 1917965: Minor Compression Dictionary cleanup r=necko-reviewers,valentin"

This reverts commit 9ef82139ca5cc4c9b5374e6c4355995b0cad5b55.

Revert "Bug 1978496: Add more logs for Compression Dictionaries r=necko-reviewers,valentin"

This reverts commit a85f97ac8c0de6f0e88a0996a27bee311ad297a1.

Revert "Bug 1984157: Vary support for Compression Dictionaries r=necko-reviewers,valentin"

This reverts commit 780bddc9453d9837862c57e653ba56fc2fa71e94.

Revert "Bug 1917965: Add design doc r=necko-reviewers,kershaw"

This reverts commit cf09ea9257deacb3468106cc47864ce61ea5c659.

Revert "Bug 1979087: Use URLPattern for matching Compression Dictionary entries r=edgul,necko-reviewers,kershaw"

This reverts commit 69720d6199ba4319dbc4486e51c1a42f077d394b.

Revert "Bug 1978494: Fix GetOriginalHeader to account for new header type r=necko-reviewers,valentin"

This reverts commit 0939a45626b680618594f6e4d69595c6594207e8.

Revert "Bug 1978494: Don't use stale dictionaries, more logs, improve handling of dictionary origin reading r=necko-reviewers,valentin"

This reverts commit a2c00deb4d2d113ecf9d1e6c25de7dca65c8da2a.

Revert "Bug 1917976: Compression Dictionary match-dest support r=necko-reviewers,valentin,kershaw"

This reverts commit bf874cae0dc56c292e7e3a5658eeb33bdce5be5e.

Revert "Bug 1950965: about:cache support for Compression Dictionaries and cleanup r=necko-reviewers,valentin"

This reverts commit 115be236bf2b8aff98f8c885dcf2ea3886f174a7.

Revert "Bug 1978495: Compression Dictionaries clear-site and clear cookies support r=necko-reviewers,valentin"

This reverts commit 62d7f7168e4bb8d973fc73fff02ecf36f68347a7.

Revert "Bug 1917974: Add OPEN_COMPLETE_ONLY and use it for Compression Dictionary Prefetch r=necko-reviewers,valentin"

This reverts commit 5076f7a779f569f2ef6921f7a565a78c182aab00.

Revert "Bug 1917974: Hook in Prefetching of cache entries for Compression Dictionaries r=necko-reviewers,valentin"

This reverts commit 77f0cd19123f76b1fe85ad3150a44009ea0e38c3.

Revert "Bug 1949981: Add code to clear Compression Dictionary entries r=necko-reviewers,valentin"

This reverts commit 515eb81e07f69c38a7d98a84d7f80dd42937b036.

Revert "Bug 1978494: Read and write of per-origin compression dictionary cache entries/metadata r=necko-reviewers,valentin"

This reverts commit 32fb24708cfbf7a526b858c2600a2d44a4a82c49.

Revert "Bug 1950972: Allow changing Content-Encoding header when we decompress for cache storage r=necko-reviewers,valentin"

This reverts commit c7c5e2b80072563dc6ef9044a796b21ab2a3b2e3.

Revert "Bug 1917974: Add OnCacheEntryCallbacks for Compression Dictionaries r=necko-reviewers,valentin"

This reverts commit e599730050428ff9ba6b10cf88cb3048ee7c4b8b.

Revert "Bug 1917974: Make GetDictionaryFor async since we need to query the cache r=necko-reviewers,valentin"

This reverts commit 33cb96b72fdf243a20a9b0ba7887b3bdc3611141.

Revert "Bug 1917975: Compression Dictionary initial metadata parsing r=necko-reviewers,valentin"

This reverts commit 2f04f4d43c3ea35fd5f1f3f0cfb52eb2a8fb0663.

Revert "Bug 1917975: Read dictionaries from cache for Compression Dictionaries r=necko-reviewers,valentin"

This reverts commit 23e72a7efc2215073fc1866e21917e51c38669cb.

Revert "Bug 1918013: Compression Dictionaries link rel= support r=smaug,necko-reviewers,kershaw"

This reverts commit f45f89e18858f77d0679126017d425f8eb14897c.

Revert "Bug 1975014: Add a pref for Compression Dictionaries r=necko-reviewers,kershaw"

This reverts commit 00429283549a36638f5e7b7e5c222925b7970e34.

Revert "Bug 1978496: Add logs for CompressionDictionaries r=necko-reviewers,sunil,kershaw"

This reverts commit 15dec1802619b153ecd1292d8ffe19b5e3bc835f.

Revert "Bug 1917970: Support decoding brotli & zstd dictionary-encoded streams r=necko-reviewers,valentin"

This reverts commit 3dfc96583b392d294604f0755cb507574254052f.

Revert "Bug 1917965: Add support for dcb and dcz content-encoding r=necko-reviewers,valentin"

This reverts commit fec1a197f44cb4e260ccd86069182491d70b7d42.

Revert "Bug 1917974: Accumulate Dictionary hash values r=necko-reviewers,sunil"

This reverts commit 5cdbe885bc5e31877e42acc272f3312681168f31.

Revert "Bug 1917982: Support use_as_dictionary response headers r=necko-reviewers,kershaw"

This reverts commit 642c049998bea84acecf6dd61165768fc8aff115.

Revert "Bug 1917965: Add initial Dictionary in-memory structures r=necko-reviewers,kershaw"

This reverts commit 19ab2d5af81085a112717f5a65baa17c4af205d0.

Revert "Bug 1917963: Add Cache flags for Compression Dictionaries r=necko-reviewers,kershaw"

This reverts commit e31fee540044734ce7f0f20fcc3f1c67e8f4137d.

Revert "Bug 1917963: Break out adding Accept-Encoding headers r=necko-reviewers,sunil,kershaw"

This reverts commit 93bd84a6183a234896adcbe257db12848046a5e8.

Revert "Bug 1918741: Change Compression Dictionary WPTs to assume pass r=necko-reviewers,kershaw"

This reverts commit e83caa2c66d28e216a204c6402bf68267ba393bc.

Revert "Bug 1975024: Cleanup Cache code and locks in prep for Compression Dictionaries #necko-reviewers r=necko-reviewers,kershaw"

This reverts commit 5dd4896f18ff7cb7fe9ddc08dfe2553f733ac07d.

Diffstat:
Mdom/base/LinkStyle.cpp | 2--
Mdom/base/LinkStyle.h | 3+--
Mdom/base/nsContentSink.cpp | 43++++++++++++++++++-------------------------
Mdom/base/nsContentSink.h | 9++++-----
Mdom/fetch/InternalRequest.cpp | 58----------------------------------------------------------
Mdom/fetch/InternalRequest.h | 2--
Mdom/html/HTMLLinkElement.cpp | 19+++----------------
Mdom/tests/browser/browser_scriptCache_redirect.js | 19+++++++++++++++++++
Mmodules/libpref/init/StaticPrefList.yaml | 6------
Mmodules/libpref/init/all.js | 3---
Mnetwerk/base/nsNetUtil.cpp | 122-------------------------------------------------------------------------------
Mnetwerk/base/nsNetUtil.h | 7-------
Mnetwerk/cache2/CacheEntry.cpp | 20+-------------------
Mnetwerk/cache2/CacheEntry.h | 12------------
Mnetwerk/cache2/CacheFile.cpp | 25+------------------------
Mnetwerk/cache2/CacheFile.h | 5-----
Mnetwerk/cache2/CacheFileContextEvictor.cpp | 20+++++++++-----------
Mnetwerk/cache2/CacheFileIOManager.cpp | 54++++++++++++------------------------------------------
Mnetwerk/cache2/CacheFileIOManager.h | 7+------
Mnetwerk/cache2/CacheFileMetadata.cpp | 3+--
Mnetwerk/cache2/CacheFileOutputStream.cpp | 6------
Mnetwerk/cache2/CacheFileOutputStream.h | 4----
Mnetwerk/cache2/CacheIndex.cpp | 50+++-----------------------------------------------
Mnetwerk/cache2/CacheIndex.h | 28++++------------------------
Mnetwerk/cache2/CacheStorage.cpp | 33+++++++++++++--------------------
Mnetwerk/cache2/CacheStorageService.cpp | 126+++++++++++++++++++++++++++++++------------------------------------------------
Mnetwerk/cache2/CacheStorageService.h | 33++++-----------------------------
Dnetwerk/cache2/Dictionary.cpp | 1375-------------------------------------------------------------------------------
Dnetwerk/cache2/Dictionary.h | 368-------------------------------------------------------------------------------
Mnetwerk/cache2/moz.build | 6+-----
Mnetwerk/cache2/nsICacheEntry.idl | 19-------------------
Mnetwerk/cache2/nsICacheStorage.idl | 34+---------------------------------
Mnetwerk/cache2/nsICacheStorageService.idl | 14--------------
Mnetwerk/docs/cache2/doc.rst | 66+-----------------------------------------------------------------
Mnetwerk/protocol/http/EarlyHintsService.cpp | 7-------
Mnetwerk/protocol/http/HttpBaseChannel.cpp | 56+++++---------------------------------------------------
Mnetwerk/protocol/http/HttpChannelChild.h | 9---------
Mnetwerk/protocol/http/HttpChannelParent.cpp | 2+-
Mnetwerk/protocol/http/HttpLog.h | 4----
Mnetwerk/protocol/http/InterceptedHttpChannel.h | 10----------
Mnetwerk/protocol/http/NullHttpChannel.cpp | 13-------------
Mnetwerk/protocol/http/ObliviousHttpChannel.cpp | 11-----------
Mnetwerk/protocol/http/PHttpChannelParams.h | 8--------
Mnetwerk/protocol/http/TRRServiceChannel.h | 9---------
Mnetwerk/protocol/http/nsHttpAtomList.h | 3---
Mnetwerk/protocol/http/nsHttpChannel.cpp | 486+++++++++++++------------------------------------------------------------------
Mnetwerk/protocol/http/nsHttpChannel.h | 32++------------------------------
Mnetwerk/protocol/http/nsHttpHandler.cpp | 138+++++++++++--------------------------------------------------------------------
Mnetwerk/protocol/http/nsHttpHandler.h | 15++-------------
Mnetwerk/protocol/http/nsHttpHeaderArray.cpp | 16+++++-----------
Mnetwerk/protocol/http/nsHttpHeaderArray.h | 3+--
Mnetwerk/protocol/http/nsHttpRequestHead.cpp | 6------
Mnetwerk/protocol/http/nsHttpRequestHead.h | 6------
Mnetwerk/protocol/http/nsHttpResponseHead.cpp | 13-------------
Mnetwerk/protocol/http/nsHttpResponseHead.h | 2--
Mnetwerk/protocol/http/nsIHttpChannel.idl | 9---------
Mnetwerk/protocol/viewsource/nsViewSourceChannel.cpp | 13-------------
Mnetwerk/streamconv/converters/nsHTTPCompressConv.cpp | 153++++++++-----------------------------------------------------------------------
Mnetwerk/streamconv/converters/nsHTTPCompressConv.h | 4----
Dnetwerk/test/gtest/TestUseAsDictionary.cpp | 226-------------------------------------------------------------------------------
Mnetwerk/test/gtest/moz.build | 1-
Mnetwerk/test/httpserver/NodeServer.sys.mjs | 1-
Mnetwerk/test/unit/head_cache.js | 25++++++-------------------
Dnetwerk/test/unit/test_cache2_compression_dictionary.js | 132-------------------------------------------------------------------------------
Dnetwerk/test/unit/test_dictionary_compression_dcb.js | 1113-------------------------------------------------------------------------------
Dnetwerk/test/unit/test_dictionary_retrieval.js | 510-------------------------------------------------------------------------------
Dnetwerk/test/unit/test_dictionary_storage.js | 612-------------------------------------------------------------------------------
Mnetwerk/test/unit/xpcshell.toml | 8--------
Atesting/web-platform/meta/fetch/compression-dictionary/dictionary-clear-site-data-cache.tentative.https.html.ini | 3+++
Atesting/web-platform/meta/fetch/compression-dictionary/dictionary-clear-site-data-cookies.tentative.https.html.ini | 3+++
Atesting/web-platform/meta/fetch/compression-dictionary/dictionary-clear-site-data-storage.tentative.https.html.ini | 3+++
Mtesting/web-platform/meta/fetch/compression-dictionary/dictionary-compressed.tentative.https.html.ini | 15+++++++++++++++
Atesting/web-platform/meta/fetch/compression-dictionary/dictionary-decompression.tentative.https.html.ini | 9+++++++++
Mtesting/web-platform/meta/fetch/compression-dictionary/dictionary-fetch-no-cors.tentative.https.html.ini | 3+++
Atesting/web-platform/meta/fetch/compression-dictionary/dictionary-fetch-with-link-element.tentative.https.html.ini | 9+++++++++
Atesting/web-platform/meta/fetch/compression-dictionary/dictionary-fetch-with-link-header.tentative.https.html.ini | 3+++
Atesting/web-platform/meta/fetch/compression-dictionary/dictionary-registration.tentative.https.html.ini | 15+++++++++++++++
Mtoolkit/components/cleardata/ClearDataService.sys.mjs | 22----------------------
Mtoolkit/components/clearsitedata/ClearSiteData.cpp | 9---------
Mtoolkit/components/extensions/test/mochitest/head_webrequest.js | 4----
Muriloader/preload/PreloadService.cpp | 42+++++++++++-------------------------------
Muriloader/preload/PreloadService.h | 19+++++++++----------
82 files changed, 364 insertions(+), 6052 deletions(-)

diff --git a/dom/base/LinkStyle.cpp b/dom/base/LinkStyle.cpp @@ -149,8 +149,6 @@ static uint32_t ToLinkMask(const nsAString& aLink) { mask = LinkStyle::ePRELOAD; } else if (aLink.EqualsLiteral("modulepreload")) { mask = LinkStyle::eMODULE_PRELOAD; - } else if (aLink.EqualsLiteral("compression-dictionary")) { - mask = LinkStyle::eCOMPRESSION_DICTIONARY; } return mask; diff --git a/dom/base/LinkStyle.h b/dom/base/LinkStyle.h @@ -117,8 +117,7 @@ class LinkStyle { ePRECONNECT = 0x00000020, // NOTE: 0x40 is unused ePRELOAD = 0x00000080, - eMODULE_PRELOAD = 0x00000100, - eCOMPRESSION_DICTIONARY = 0x00000200 + eMODULE_PRELOAD = 0x00000100 }; // The return value is a bitwise or of 0 or more RelValues. diff --git a/dom/base/nsContentSink.cpp b/dom/base/nsContentSink.cpp @@ -316,19 +316,10 @@ nsresult nsContentSink::ProcessLinkFromHeader(const net::LinkHeader& aHeader, } if (linkTypes & LinkStyle::ePRELOAD) { - PreloadHref(aHeader.mHref, aHeader.mAs, aHeader.mRel, aHeader.mType, - aHeader.mMedia, aHeader.mNonce, aHeader.mIntegrity, - aHeader.mSrcset, aHeader.mSizes, aHeader.mCrossOrigin, - aHeader.mReferrerPolicy, aEarlyHintPreloaderId, - aHeader.mFetchPriority); - } - - if (linkTypes & LinkStyle::eCOMPRESSION_DICTIONARY) { - PreloadHref(aHeader.mHref, u"fetch"_ns, aHeader.mRel, aHeader.mType, - aHeader.mMedia, aHeader.mNonce, aHeader.mIntegrity, - aHeader.mSrcset, aHeader.mSizes, aHeader.mCrossOrigin, - aHeader.mReferrerPolicy, aEarlyHintPreloaderId, - aHeader.mFetchPriority); + PreloadHref(aHeader.mHref, aHeader.mAs, aHeader.mType, aHeader.mMedia, + aHeader.mNonce, aHeader.mIntegrity, aHeader.mSrcset, + aHeader.mSizes, aHeader.mCrossOrigin, aHeader.mReferrerPolicy, + aEarlyHintPreloaderId, aHeader.mFetchPriority); } if ((linkTypes & LinkStyle::eMODULE_PRELOAD) && @@ -440,13 +431,15 @@ void nsContentSink::PrefetchHref(const nsAString& aHref, const nsAString& aAs, } } -void nsContentSink::PreloadHref( - const nsAString& aHref, const nsAString& aAs, const nsAString& aRel, - const nsAString& aType, const nsAString& aMedia, const nsAString& aNonce, - const nsAString& aIntegrity, const nsAString& aSrcset, - const nsAString& aSizes, const nsAString& aCORS, - const nsAString& aReferrerPolicy, uint64_t aEarlyHintPreloaderId, - const nsAString& aFetchPriority) { +void nsContentSink::PreloadHref(const nsAString& aHref, const nsAString& aAs, + const nsAString& aType, const nsAString& aMedia, + const nsAString& aNonce, + const nsAString& aIntegrity, + const nsAString& aSrcset, + const nsAString& aSizes, const nsAString& aCORS, + const nsAString& aReferrerPolicy, + uint64_t aEarlyHintPreloaderId, + const nsAString& aFetchPriority) { auto encoding = mDocument->GetDocumentCharacterSet(); nsCOMPtr<nsIURI> uri; NS_NewURI(getter_AddRefs(uri), aHref, encoding, mDocument->GetDocBaseURI()); @@ -471,8 +464,8 @@ void nsContentSink::PreloadHref( } mDocument->Preloads().PreloadLinkHeader( - uri, aHref, policyType, aAs, aRel, aType, aNonce, aIntegrity, aSrcset, - aSizes, aCORS, aReferrerPolicy, aEarlyHintPreloaderId, aFetchPriority); + uri, aHref, policyType, aAs, aType, aNonce, aIntegrity, aSrcset, aSizes, + aCORS, aReferrerPolicy, aEarlyHintPreloaderId, aFetchPriority); } void nsContentSink::PreloadModule( @@ -513,9 +506,9 @@ void nsContentSink::PreloadModule( moduleLoader->DisallowImportMaps(); mDocument->Preloads().PreloadLinkHeader( - uri, aHref, nsIContentPolicy::TYPE_SCRIPT, u"script"_ns, - u"modulepreload"_ns, u"module"_ns, aNonce, aIntegrity, u""_ns, u""_ns, - aCORS, aReferrerPolicy, aEarlyHintPreloaderId, aFetchPriority); + uri, aHref, nsIContentPolicy::TYPE_SCRIPT, u"script"_ns, u"module"_ns, + aNonce, aIntegrity, u""_ns, u""_ns, aCORS, aReferrerPolicy, + aEarlyHintPreloaderId, aFetchPriority); } void nsContentSink::PrefetchDNS(const nsAString& aHref) { diff --git a/dom/base/nsContentSink.h b/dom/base/nsContentSink.h @@ -141,11 +141,10 @@ class nsContentSink : public nsICSSLoaderObserver, void PrefetchHref(const nsAString& aHref, const nsAString& aAs, const nsAString& aType, const nsAString& aMedia); void PreloadHref(const nsAString& aHref, const nsAString& aAs, - const nsAString& aRel, const nsAString& aType, - const nsAString& aMedia, const nsAString& aNonce, - const nsAString& aIntegrity, const nsAString& aSrcset, - const nsAString& aSizes, const nsAString& aCORS, - const nsAString& aReferrerPolicy, + const nsAString& aType, const nsAString& aMedia, + const nsAString& aNonce, const nsAString& aIntegrity, + const nsAString& aSrcset, const nsAString& aSizes, + const nsAString& aCORS, const nsAString& aReferrerPolicy, uint64_t aEarlyHintPreloaderId, const nsAString& aFetchPriority); diff --git a/dom/fetch/InternalRequest.cpp b/dom/fetch/InternalRequest.cpp @@ -260,7 +260,6 @@ void InternalRequest::SetInterceptionContentPolicyType( } /* static */ -/* static */ RequestDestination InternalRequest::MapContentPolicyTypeToRequestDestination( nsContentPolicyType aContentPolicyType) { switch (aContentPolicyType) { @@ -369,63 +368,6 @@ RequestDestination InternalRequest::MapContentPolicyTypeToRequestDestination( return RequestDestination::_empty; } -/* static */ -RequestDestination InternalRequest::MapContentPolicyTypeToRequestDestination( - ExtContentPolicyType aContentPolicyType) { - switch (aContentPolicyType) { - case ExtContentPolicyType::TYPE_INVALID: - case ExtContentPolicyType::TYPE_OTHER: - return RequestDestination::_empty; - case ExtContentPolicyType::TYPE_SCRIPT: - return RequestDestination::Script; - case ExtContentPolicyType::TYPE_IMAGE: - return RequestDestination::Image; - case ExtContentPolicyType::TYPE_STYLESHEET: - return RequestDestination::Style; - case ExtContentPolicyType::TYPE_OBJECT: - return RequestDestination::Object; - case ExtContentPolicyType::TYPE_DOCUMENT: - return RequestDestination::Document; - case ExtContentPolicyType::TYPE_SUBDOCUMENT: - return RequestDestination::Iframe; - case ExtContentPolicyType::TYPE_PING: - case ExtContentPolicyType::TYPE_XMLHTTPREQUEST: - case ExtContentPolicyType::TYPE_DTD: - return RequestDestination::_empty; - case ExtContentPolicyType::TYPE_FONT: - return RequestDestination::Font; - case ExtContentPolicyType::TYPE_MEDIA: - case ExtContentPolicyType::TYPE_WEBSOCKET: - return RequestDestination::_empty; - case ExtContentPolicyType::TYPE_CSP_REPORT: - return RequestDestination::Report; - case ExtContentPolicyType::TYPE_XSLT: - return RequestDestination::Xslt; - case ExtContentPolicyType::TYPE_BEACON: - case ExtContentPolicyType::TYPE_FETCH: - return RequestDestination::_empty; - case ExtContentPolicyType::TYPE_IMAGESET: - return RequestDestination::Image; - case ExtContentPolicyType::TYPE_WEB_MANIFEST: - return RequestDestination::Manifest; - case ExtContentPolicyType::TYPE_SAVEAS_DOWNLOAD: - case ExtContentPolicyType::TYPE_SPECULATIVE: - return RequestDestination::_empty; - case ExtContentPolicyType::TYPE_UA_FONT: - return RequestDestination::Font; - case ExtContentPolicyType::TYPE_PROXIED_WEBRTC_MEDIA: - case ExtContentPolicyType::TYPE_WEB_IDENTITY: - case ExtContentPolicyType::TYPE_WEB_TRANSPORT: - return RequestDestination::_empty; - case ExtContentPolicyType::TYPE_JSON: - return RequestDestination::Json; - // Do not add default: so that compilers can catch the missing case. - } - - MOZ_ASSERT(false, "Unhandled ExContentPolicyType value"); - return RequestDestination::_empty; -} - // static bool InternalRequest::IsNavigationContentPolicy( nsContentPolicyType aContentPolicyType) { diff --git a/dom/fetch/InternalRequest.h b/dom/fetch/InternalRequest.h @@ -426,8 +426,6 @@ class InternalRequest final : public AtomicSafeRefCounted<InternalRequest> { // destination. static RequestDestination MapContentPolicyTypeToRequestDestination( nsContentPolicyType aContentPolicyType); - static RequestDestination MapContentPolicyTypeToRequestDestination( - ExtContentPolicyType aContentPolicyType); private: static bool IsNavigationContentPolicy(nsContentPolicyType aContentPolicyType); diff --git a/dom/html/HTMLLinkElement.cpp b/dom/html/HTMLLinkElement.cpp @@ -323,23 +323,17 @@ void HTMLLinkElement::AfterSetAttr(int32_t aNameSpaceID, nsAtom* aName, "preload", "prefetch", "dns-prefetch", "stylesheet", "next", "alternate", \ "preconnect", "icon", "search", nullptr -static const DOMTokenListSupportedToken sSupportedRelValueCombinations[][13] = { +static const DOMTokenListSupportedToken sSupportedRelValueCombinations[][12] = { {SUPPORTED_REL_VALUES_BASE}, {"manifest", SUPPORTED_REL_VALUES_BASE}, {"modulepreload", SUPPORTED_REL_VALUES_BASE}, - {"modulepreload", "manifest", SUPPORTED_REL_VALUES_BASE}, - {"compression-dictionary", SUPPORTED_REL_VALUES_BASE}, - {"compression-dictionary", "manifest", SUPPORTED_REL_VALUES_BASE}, - {"compression-dictionary", "modulepreload", SUPPORTED_REL_VALUES_BASE}, - {"compression-dictionary", "modulepreload", "manifest", - SUPPORTED_REL_VALUES_BASE}}; + {"modulepreload", "manifest", SUPPORTED_REL_VALUES_BASE}}; #undef SUPPORTED_REL_VALUES_BASE nsDOMTokenList* HTMLLinkElement::RelList() { if (!mRelList) { int index = (StaticPrefs::dom_manifest_enabled() ? 1 : 0) | - (StaticPrefs::network_modulepreload() ? 2 : 0) | - (StaticPrefs::network_http_dictionaries_enable() ? 4 : 0); + (StaticPrefs::network_modulepreload() ? 2 : 0); mRelList = new nsDOMTokenList(this, nsGkAtoms::rel, sSupportedRelValueCombinations[index]); @@ -471,13 +465,6 @@ void HTMLLinkElement:: } } - if (linkTypes & eCOMPRESSION_DICTIONARY) { - if (nsCOMPtr<nsIURI> uri = GetURI()) { - StartPreload(nsIContentPolicy::TYPE_INTERNAL_FETCH_PRELOAD); - return; - } - } - if (linkTypes & ePRELOAD) { if (nsCOMPtr<nsIURI> uri = GetURI()) { nsContentPolicyType policyType; diff --git a/dom/tests/browser/browser_scriptCache_redirect.js b/dom/tests/browser/browser_scriptCache_redirect.js @@ -49,6 +49,25 @@ add_task(async function test_redirectCache() { ",script=not-cacheable", ].join(""), }, + { + query: "?redirect=not-cacheable&script=cacheable", + cachedCounter: true, + log: [ + ",redirect=not-cacheable&script=cacheable", + ",script=cacheable", + ",redirect=not-cacheable&script=cacheable", + ].join(""), + }, + { + query: "?redirect=not-cacheable&script=not-cacheable", + cachedCounter: false, + log: [ + ",redirect=not-cacheable&script=not-cacheable", + ",script=not-cacheable", + ",redirect=not-cacheable&script=not-cacheable", + ",script=not-cacheable", + ].join(""), + }, ]; for (const { query, cachedCounter, log } of tests) { diff --git a/modules/libpref/init/StaticPrefList.yaml b/modules/libpref/init/StaticPrefList.yaml @@ -15740,12 +15740,6 @@ value: false mirror: always -# Enable HTTP Compression Dictionary support -- name: network.http.dictionaries.enable - type: RelaxedAtomicBool - value: true - mirror: always - #--------------------------------------------------------------------------- # Prefs starting with "nglayout." #--------------------------------------------------------------------------- diff --git a/modules/libpref/init/all.js b/modules/libpref/init/all.js @@ -1167,9 +1167,6 @@ pref("network.http.redirection-limit", 20); // NOTE: separate values with comma+space (", "): see bug 576033 pref("network.http.accept-encoding", "gzip, deflate"); pref("network.http.accept-encoding.secure", "gzip, deflate, br, zstd"); -// dictionary compression is always only for secure connections -// Added to network.http.accept-encoding.secure -pref("network.http.accept-encoding.dictionary", "dcb, dcz"); // Prompt for redirects resulting in unsafe HTTP requests pref("network.http.prompt-temp-redirect", false); diff --git a/netwerk/base/nsNetUtil.cpp b/netwerk/base/nsNetUtil.cpp @@ -4044,128 +4044,6 @@ void WarnIgnoredPreload(const mozilla::dom::Document& aDoc, nsIURI& aURI) { "PreloadIgnoredInvalidAttr", params); } -bool NS_ParseUseAsDictionary(const nsACString& aValue, nsACString& aMatch, - nsACString& aMatchId, - nsTArray<nsCString>& aMatchDestItems, - nsACString& aType) { - // Note: match= is required - // Use-As-Dictionary = %s"match" / - // %il"match-dest" / - // %s"id" / - // %t"type" ; case-sensitive - - nsCOMPtr<nsISFVService> sfv = GetSFVService(); - - nsCOMPtr<nsISFVDictionary> parsedHeader; - nsresult rv; - if (NS_FAILED( - rv = sfv->ParseDictionary(aValue, getter_AddRefs(parsedHeader)))) { - return false; - } - - nsCOMPtr<nsISFVItemOrInnerList> match; - rv = parsedHeader->Get("match"_ns, getter_AddRefs(match)); - if (NS_FAILED(rv)) { - return false; // match is required, fail if not found - } - if (nsCOMPtr<nsISFVItem> listItem = do_QueryInterface(match)) { - nsCOMPtr<nsISFVBareItem> value; - rv = listItem->GetValue(getter_AddRefs(value)); - if (NS_FAILED(rv)) { - return false; - } - if (nsCOMPtr<nsISFVString> stringVal = do_QueryInterface(value)) { - if (NS_FAILED(stringVal->GetValue(aMatch))) { - return false; - } - if (aMatch.IsEmpty()) { - return false; // match is required, fail if not found - } - } else { - return false; - } - } else { - return false; - } - - nsCOMPtr<nsISFVItemOrInnerList> matchdest; - rv = parsedHeader->Get("match-dest"_ns, getter_AddRefs(matchdest)); - if (NS_SUCCEEDED(rv)) { - if (nsCOMPtr<nsISFVInnerList> innerList = do_QueryInterface(matchdest)) { - // Extract the first entry of each inner list, which should contain the - // endpoint's URL string - nsTArray<RefPtr<nsISFVItem>> items; - if (NS_FAILED(innerList->GetItems(items))) { - return false; - } - // Don't check items.IsEmpty() because an empty list is valid - - for (auto& item : items) { - nsCOMPtr<nsISFVBareItem> value; - if (NS_FAILED(item->GetValue(getter_AddRefs(value)))) { - return false; - } - if (nsCOMPtr<nsISFVString> stringVal = do_QueryInterface(value)) { - nsAutoCString string; - if (NS_FAILED(stringVal->GetValue(string))) { - return false; - } - aMatchDestItems.AppendElement(string); - } else { - return false; // match-dest is an inner list of strings - } - } - } - } - - nsCOMPtr<nsISFVItemOrInnerList> matchid; - rv = parsedHeader->Get("id"_ns, getter_AddRefs(matchid)); - if (NS_SUCCEEDED(rv)) { - if (nsCOMPtr<nsISFVItem> listItem = do_QueryInterface(matchid)) { - nsCOMPtr<nsISFVBareItem> value; - rv = listItem->GetValue(getter_AddRefs(value)); - if (NS_FAILED(rv)) { - return false; - } - if (nsCOMPtr<nsISFVString> stringVal = do_QueryInterface(value)) { - if (NS_FAILED(stringVal->GetValue(aMatchId))) { - return false; - } - } else { - return false; - } - } else { - return false; - } - } - - nsCOMPtr<nsISFVItemOrInnerList> type; - rv = parsedHeader->Get("type"_ns, getter_AddRefs(type)); - if (NS_SUCCEEDED(rv)) { - if (nsCOMPtr<nsISFVItem> listItem = do_QueryInterface(type)) { - nsCOMPtr<nsISFVBareItem> value; - rv = listItem->GetValue(getter_AddRefs(value)); - if (NS_FAILED(rv)) { - return false; - } - if (nsCOMPtr<nsISFVToken> tokenVal = do_QueryInterface(value)) { - if (NS_FAILED(tokenVal->GetValue(aType))) { - return false; - } - if (!aType.Equals("raw"_ns)) { - return false; - } - } else { - return false; - } - } else { - return false; - } - } - - return true; -} - nsresult HasRootDomain(const nsACString& aInput, const nsACString& aHost, bool* aResult) { if (NS_WARN_IF(!aResult)) { diff --git a/netwerk/base/nsNetUtil.h b/netwerk/base/nsNetUtil.h @@ -1165,13 +1165,6 @@ bool CheckPreloadAttrs(const nsAttrValue& aAs, const nsAString& aType, mozilla::dom::Document* aDocument); void WarnIgnoredPreload(const mozilla::dom::Document&, nsIURI&); -// Implements parsing of Use-As-Dictionary headers for Compression Dictionary -// support. -bool NS_ParseUseAsDictionary(const nsACString& aValue, nsACString& aMatch, - nsACString& aMatchId, - nsTArray<nsCString>& aMatchDestItems, - nsACString& aType); - /** * Returns true if the |aInput| in is part of the root domain of |aHost|. * For example, if |aInput| is "www.mozilla.org", and we pass in diff --git a/netwerk/cache2/CacheEntry.cpp b/netwerk/cache2/CacheEntry.cpp @@ -266,12 +266,6 @@ nsresult CacheEntry::HashingKey(const nsACString& aStorageID, return HashingKey(aStorageID, aEnhanceID, spec, aResult); } -// The hash key (which is also the filename) is: -// A[~B]:C -// Where A is the storage ID ([O<oa>,][a,][p,]), B is the optional 'id', -// and C is the URI 'oa' are the OriginAttributes in suffix form -// (i.e. |^key=value&key2=value2|) - // static nsresult CacheEntry::HashingKey(const nsACString& aStorageID, const nsACString& aEnhanceID, @@ -295,12 +289,6 @@ nsresult CacheEntry::HashingKey(const nsACString& aStorageID, return NS_OK; } -nsresult CacheEntry::SetDictionary(DictionaryCacheEntry* aDict) { - mDict = aDict; - mFile->SetDictionary(aDict); - return NS_OK; -} - void CacheEntry::AsyncOpen(nsICacheEntryOpenCallback* aCallback, uint32_t aFlags) { bool readonly = aFlags & nsICacheStorage::OPEN_READONLY; @@ -533,7 +521,7 @@ already_AddRefed<CacheEntryHandle> CacheEntry::ReopenTruncated( mLock.AssertCurrentThreadOwns(); // Hold callbacks invocation, AddStorageEntry would invoke from doom - // prematurely + // prematurly mPreventCallbacks = true; RefPtr<CacheEntryHandle> handle; @@ -1095,12 +1083,6 @@ nsresult CacheEntry::GetOnStopTime(uint64_t* aTime) { return mFile->GetOnStopTime(aTime); } -nsresult CacheEntry::GetReadyOrRevalidating(bool* aReady) { - mozilla::MutexAutoLock lock(mLock); - *aReady = (mState == READY || mState == REVALIDATING); - return NS_OK; -} - nsresult CacheEntry::SetNetworkTimes(uint64_t aOnStartTime, uint64_t aOnStopTime) { if (NS_SUCCEEDED(mFileStatus)) { diff --git a/netwerk/cache2/CacheEntry.h b/netwerk/cache2/CacheEntry.h @@ -24,7 +24,6 @@ #include "mozilla/Attributes.h" #include "mozilla/Mutex.h" #include "mozilla/TimeStamp.h" -#include "Dictionary.h" static inline uint32_t PRTimeToSeconds(PRTime t_usec) { return uint32_t(t_usec / PR_USEC_PER_SEC); @@ -76,7 +75,6 @@ class CacheEntry final : public nsIRunnable, nsresult SetExpirationTime(uint32_t expirationTime); nsresult GetOnStartTime(uint64_t* aTime); nsresult GetOnStopTime(uint64_t* aTime); - nsresult GetReadyOrRevalidating(bool* aReady); nsresult SetNetworkTimes(uint64_t onStartTime, uint64_t onStopTime); nsresult SetContentType(uint8_t aContentType); nsresult ForceValidFor(uint32_t aSecondsToTheFuture); @@ -106,8 +104,6 @@ class CacheEntry final : public nsIRunnable, nsIInputStream** _retval); nsresult GetLoadContextInfo(nsILoadContextInfo** aInfo); - nsresult SetDictionary(DictionaryCacheEntry* aDict); - public: uint32_t GetMetadataMemoryConsumption(); nsCString const& GetStorageID() const { return mStorageID; } @@ -431,8 +427,6 @@ class CacheEntry final : public nsIRunnable, mozilla::TimeStamp mLoadStart; uint32_t mUseCount{0}; - RefPtr<DictionaryCacheEntry> mDict; - const uint64_t mCacheEntryId; }; @@ -472,9 +466,6 @@ class CacheEntryHandle final : public nsICacheEntry { NS_IMETHOD GetOnStopTime(uint64_t* aOnStopTime) override { return mEntry->GetOnStopTime(aOnStopTime); } - NS_IMETHOD GetReadyOrRevalidating(bool* aReady) override { - return mEntry->GetReadyOrRevalidating(aReady); - } NS_IMETHOD SetNetworkTimes(uint64_t onStartTime, uint64_t onStopTime) override { return mEntry->SetNetworkTimes(onStartTime, onStopTime); @@ -551,9 +542,6 @@ class CacheEntryHandle final : public nsICacheEntry { nsILoadContextInfo** aLoadContextInfo) override { return mEntry->GetLoadContextInfo(aLoadContextInfo); } - NS_IMETHOD SetDictionary(DictionaryCacheEntry* aDict) override { - return mEntry->SetDictionary(aDict); - } // Specific implementation: NS_IMETHOD Dismiss() override; diff --git a/netwerk/cache2/CacheFile.cpp b/netwerk/cache2/CacheFile.cpp @@ -290,14 +290,6 @@ nsresult CacheFile::Init(const nsACString& aKey, bool aCreateNew, return NS_OK; } -void CacheFile::SetDictionary(DictionaryCacheEntry* aDict) { - CacheFileAutoLock lock(this); - mDict = aDict; - if (OutputStreamExists(false)) { - mOutput->SetDictionary(aDict); - } -} - void CacheFile::Key(nsACString& aKey) { CacheFileAutoLock lock(this); aKey = mKey; @@ -491,11 +483,6 @@ nsresult CacheFile::OnFileOpened(CacheFileHandle* aHandle, nsresult aResult) { mOpeningFile = false; - if (mDict && OutputStreamExists(false)) { - mOutput->SetDictionary(mDict); - // leave mDict set for hash accumulation - } - autoDoom.mListener.swap(mDoomAfterOpenListener); if (mMemoryOnly) { @@ -911,10 +898,6 @@ nsresult CacheFile::OpenOutputStream(CacheOutputCloseListener* aCloseListener, "[this=%p]", mOutput, this)); - if (mDict) { - mOutput->SetDictionary(mDict); - } - mDataAccessed = true; *_retval = do_AddRef(mOutput).take(); return NS_OK; @@ -2122,13 +2105,7 @@ void CacheFile::RemoveOutput(CacheFileOutputStream* aOutput, nsresult aStatus) { return; } - // This is to finalize the Hash calculation - if (mDict) { - mDict->FinishHash(); - mDict = nullptr; - } - - mOutput = nullptr; // XXX should this be after NotifyCloseListener? + mOutput = nullptr; // Cancel all queued chunk and update listeners that cannot be satisfied NotifyListenersAboutOutputRemoval(); diff --git a/netwerk/cache2/CacheFile.h b/netwerk/cache2/CacheFile.h @@ -8,7 +8,6 @@ #include "CacheFileChunk.h" #include "CacheFileIOManager.h" #include "CacheFileMetadata.h" -#include "Dictionary.h" #include "nsRefPtrHashtable.h" #include "nsClassHashtable.h" #include "mozilla/Mutex.h" @@ -126,8 +125,6 @@ class MOZ_CAPABILITY("mutex") CacheFile final bool IsWriteInProgress(); bool EntryWouldExceedLimit(int64_t aOffset, int64_t aSize, bool aIsAltData); - void SetDictionary(DictionaryCacheEntry* aDict); - // Memory reporting size_t SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const; size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const; @@ -228,8 +225,6 @@ class MOZ_CAPABILITY("mutex") CacheFile final nsCString mAltDataType MOZ_GUARDED_BY(this); // The type of the saved alt-data. May be empty. - RefPtr<DictionaryCacheEntry> mDict MOZ_GUARDED_BY(this); - RefPtr<CacheFileHandle> mHandle MOZ_GUARDED_BY(this); RefPtr<CacheFileMetadata> mMetadata MOZ_GUARDED_BY(this); nsCOMPtr<CacheFileListener> mListener MOZ_GUARDED_BY(this); diff --git a/netwerk/cache2/CacheFileContextEvictor.cpp b/netwerk/cache2/CacheFileContextEvictor.cpp @@ -660,8 +660,8 @@ void CacheFileContextEvictor::EvictEntries() { // this must be a new one. Skip it. LOG( ("CacheFileContextEvictor::EvictEntries() - Skipping entry since we " - "found an active handle. [handle=%p key=%s]", - handle.get(), handle->Key().get())); + "found an active handle. [handle=%p]", + handle.get())); continue; } @@ -672,7 +672,7 @@ void CacheFileContextEvictor::EvictEntries() { }; rv = CacheIndex::HasEntry(hash, &status, callback); // This must never fail, since eviction (this code) happens only when the - // index is up-to-date and thus the information is known. + // index is up-to-date and thus the informatin is known. MOZ_ASSERT(NS_SUCCEEDED(rv)); if (pinned != mEntries[0]->mPinned) { @@ -684,22 +684,20 @@ void CacheFileContextEvictor::EvictEntries() { continue; } - // Read metadata from the file synchronously - RefPtr<CacheFileMetadata> metadata = new CacheFileMetadata(); - { + // Check whether we must filter by either base domain or by origin. + if (!mEntries[0]->mBaseDomain.IsEmpty() || + !mEntries[0]->mOrigin.IsEmpty()) { // Get and read metadata for the entry nsCOMPtr<nsIFile> file; CacheFileIOManager::gInstance->GetFile(&hash, getter_AddRefs(file)); + // Read metadata from the file synchronously + RefPtr<CacheFileMetadata> metadata = new CacheFileMetadata(); rv = metadata->SyncReadMetadata(file); if (NS_WARN_IF(NS_FAILED(rv))) { continue; } - } - // Check whether we must filter by either base domain or by origin. - if (!mEntries[0]->mBaseDomain.IsEmpty() || - !mEntries[0]->mOrigin.IsEmpty()) { // Now get the context + enhance id + URL from the key. nsAutoCString uriSpec; RefPtr<nsILoadContextInfo> info = @@ -822,7 +820,7 @@ void CacheFileContextEvictor::EvictEntries() { LOG(("CacheFileContextEvictor::EvictEntries - Removing entry.")); file->Remove(false); - CacheIndex::RemoveEntry(&hash, metadata->GetKey(), false); + CacheIndex::RemoveEntry(&hash); } MOZ_ASSERT_UNREACHABLE("We should never get here"); diff --git a/netwerk/cache2/CacheFileIOManager.cpp b/netwerk/cache2/CacheFileIOManager.cpp @@ -1314,7 +1314,6 @@ nsresult CacheFileIOManager::Shutdown() { } CacheIndex::Shutdown(); - DictionaryCache::Shutdown(); if (CacheObserver::ClearCacheOnShutdown()) { auto totalTimer = @@ -1361,7 +1360,7 @@ void CacheFileIOManager::ShutdownInternal() { // (hashes won't match). if (!h->IsSpecialFile() && !h->mIsDoomed && !h->mFileExists) { - CacheIndex::RemoveEntry(h->Hash(), h->Key()); + CacheIndex::RemoveEntry(h->Hash()); } // Remove the handle from mHandles/mSpecialHandles @@ -1821,7 +1820,7 @@ nsresult CacheFileIOManager::OpenFileInternal(const SHA1Sum::Hash* aHash, NS_ENSURE_SUCCESS(rv, rv); if (exists) { - CacheIndex::RemoveEntry(aHash, handle->Key()); + CacheIndex::RemoveEntry(aHash); LOG( ("CacheFileIOManager::OpenFileInternal() - Removing old file from " @@ -1873,7 +1872,7 @@ nsresult CacheFileIOManager::OpenFileInternal(const SHA1Sum::Hash* aHash, if (exists) { // If this file has been found evicted through the context file evictor // above for any of pinned or non-pinned state, these calls ensure we doom - // the handle ASAP we know the real pinning state after metadata has been + // the handle ASAP we know the real pinning state after metadta has been // parsed. DoomFileInternal on the |handle| doesn't doom right now, since // the pinning state is unknown and we pass down a pinning restriction. if (evictedAsPinned) { @@ -2034,7 +2033,7 @@ void CacheFileIOManager::CloseHandleInternal(CacheFileHandle* aHandle) { if (!aHandle->IsSpecialFile() && !aHandle->mIsDoomed && (aHandle->mInvalid || !aHandle->mFileExists)) { - CacheIndex::RemoveEntry(aHandle->Hash(), aHandle->Key()); + CacheIndex::RemoveEntry(aHandle->Hash()); } // Don't remove handles after shutdown @@ -2437,8 +2436,7 @@ nsresult CacheFileIOManager::DoomFile(CacheFileHandle* aHandle, } nsresult CacheFileIOManager::DoomFileInternal( - CacheFileHandle* aHandle, PinningDoomRestriction aPinningDoomRestriction, - bool aClearDictionary) { + CacheFileHandle* aHandle, PinningDoomRestriction aPinningDoomRestriction) { LOG(("CacheFileIOManager::DoomFileInternal() [handle=%p]", aHandle)); aHandle->Log(); @@ -2514,9 +2512,7 @@ nsresult CacheFileIOManager::DoomFileInternal( } if (!aHandle->IsSpecialFile()) { - // Ensure the string doesn't disappear with the handle - RefPtr<CacheFileHandle> handle(aHandle); - CacheIndex::RemoveEntry(aHandle->Hash(), aHandle->Key(), aClearDictionary); + CacheIndex::RemoveEntry(aHandle->Hash()); } aHandle->mIsDoomed = true; @@ -2529,7 +2525,7 @@ nsresult CacheFileIOManager::DoomFileInternal( CacheFileUtils::ParseKey(aHandle->Key(), &idExtension, &url); MOZ_ASSERT(info); if (info) { - storageService->CacheFileDoomed(aHandle->mKey, info, idExtension, url); + storageService->CacheFileDoomed(info, idExtension, url); } } } @@ -2611,15 +2607,7 @@ nsresult CacheFileIOManager::DoomFileByKeyInternal(const SHA1Sum::Hash* aHash) { static_cast<uint32_t>(rv))); } - // Find the key for the hash - // Read metadata from the file synchronously - RefPtr<CacheFileMetadata> metadata = new CacheFileMetadata(); - rv = metadata->SyncReadMetadata(file); - if (NS_WARN_IF(NS_FAILED(rv))) { - CacheIndex::RemoveEntry(aHash, ""_ns); - } else { - CacheIndex::RemoveEntry(aHash, metadata->GetKey()); - } + CacheIndex::RemoveEntry(aHash); return NS_OK; } @@ -3246,8 +3234,7 @@ nsresult CacheFileIOManager::OverLimitEvictionInternal() { // TODO index is outdated, start update // Make sure index won't return the same entry again - // XXX find the key for the hash - CacheIndex::RemoveEntry(&hash, ""_ns); + CacheIndex::RemoveEntry(&hash); consecutiveFailures = 0; } else { // This shouldn't normally happen, but the eviction must not fail @@ -3397,7 +3384,6 @@ nsresult CacheFileIOManager::EvictByContext( LOG(("CacheFileIOManager::EvictByContext() [loadContextInfo=%p]", aLoadContextInfo)); - // XXX evict dictionary data from memory cache nsresult rv; RefPtr<CacheFileIOManager> ioMan = gInstance; @@ -3416,14 +3402,6 @@ nsresult CacheFileIOManager::EvictByContext( if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } - // Clear the entries from the Index immediately, to comply with - // https://www.w3.org/TR/clear-site-data/#fetch-integration - // aBaseDomain isn't needed for Clear-Site-Data, but is for - // ClearBaseDomain. This can also make CacheStorageService::Clear() and - // ClearBaseDomain() be synchronous. - // Note that we will effectively hide the entries until the actual evict - // happens. - CacheIndex::EvictByContext(aOrigin, aBaseDomain); return NS_OK; } @@ -3457,9 +3435,6 @@ nsresult CacheFileIOManager::EvictByContextInternal( // This happens in xpcshell tests that use cache without profile. We need // to notify observers in this case since the tests are waiting for it. // Also notify for aPinned == true, those are interested as well. - - // XXX This doesn't actually clear anything in this case (is there anything - // to clear?) if (!aLoadContextInfo) { RefPtr<EvictionNotifierRunnable> r = new EvictionNotifierRunnable(); NS_DispatchToMainThread(r); @@ -3482,9 +3457,6 @@ nsresult CacheFileIOManager::EvictByContextInternal( NS_ConvertUTF16toUTF8 baseDomain(aBaseDomain); // Doom all active handles that matches the load context - // NOTE: Dictionaries have already been cleared synchronously, - // so there's no need to re-clear them (which might cause - // problems if they were re-created in to interim). nsTArray<RefPtr<CacheFileHandle>> handles; mHandles.GetActiveHandles(&handles); @@ -3542,8 +3514,7 @@ nsresult CacheFileIOManager::EvictByContextInternal( } // Filter by origin. - if (!origin.IsEmpty()) { // XXX also look for dict:<origin>, or let that - // be handled by Doom? Probably Doom + if (!origin.IsEmpty()) { RefPtr<MozURL> url; rv = MozURL::Init(getter_AddRefs(url), uriSpec); if (NS_FAILED(rv)) { @@ -3568,8 +3539,7 @@ nsresult CacheFileIOManager::EvictByContextInternal( // doom decision will be deferred until pinning status is determined. rv = DoomFileInternal(handle, aPinned ? CacheFileIOManager::DOOM_WHEN_PINNED - : CacheFileIOManager::DOOM_WHEN_NON_PINNED, - false); + : CacheFileIOManager::DOOM_WHEN_NON_PINNED); if (NS_WARN_IF(NS_FAILED(rv))) { LOG( ("CacheFileIOManager::EvictByContextInternal() - Cannot doom handle" @@ -3603,7 +3573,7 @@ nsresult CacheFileIOManager::CacheIndexStateChanged() { // non-null here. MOZ_ASSERT(gInstance); - // We have to re-dispatch even if we are on IO thread to prevent reentering + // We have to re-distatch even if we are on IO thread to prevent reentering // the lock in CacheIndex nsCOMPtr<nsIRunnable> ev = NewRunnableMethod( "net::CacheFileIOManager::CacheIndexStateChangedInternal", diff --git a/netwerk/cache2/CacheFileIOManager.h b/netwerk/cache2/CacheFileIOManager.h @@ -20,7 +20,6 @@ #include "nsString.h" #include "nsTHashtable.h" #include "prio.h" -#include "Dictionary.h" // #define DEBUG_HANDLES 1 #if !defined(MOZ_WIDGET_ANDROID) @@ -414,8 +413,7 @@ class CacheFileIOManager final : public nsITimerCallback, public nsINamed { bool aTruncate); nsresult DoomFileInternal( CacheFileHandle* aHandle, - PinningDoomRestriction aPinningDoomRestriction = NO_RESTRICTION, - bool aClearDirectory = true); + PinningDoomRestriction aPinningDoomRestriction = NO_RESTRICTION); nsresult DoomFileByKeyInternal(const SHA1Sum::Hash* aHash); nsresult MaybeReleaseNSPRHandleInternal(CacheFileHandle* aHandle, bool aIgnoreShutdownLag = false); @@ -483,9 +481,6 @@ class CacheFileIOManager final : public nsITimerCallback, public nsINamed { static StaticRefPtr<CacheFileIOManager> gInstance; - // Pointer to DictionaryCache singleton - RefPtr<DictionaryCache> mDictionaryCache; - TimeStamp mStartTime; // Set true on the IO thread, CLOSE level as part of the internal shutdown // procedure. diff --git a/netwerk/cache2/CacheFileMetadata.cpp b/netwerk/cache2/CacheFileMetadata.cpp @@ -245,8 +245,7 @@ nsresult CacheFileMetadata::WriteMetadata( memcpy(p, mBuf, mElementsSize); p += mElementsSize; } - LOG(("CacheFileMetadata::WriteMetadata() [this=%p, key=%s, mElementsSize=%d]", - this, mKey.get(), mElementsSize)); + CacheHash::Hash32_t hash; hash = CacheHash::Hash(mWriteBuf + sizeof(uint32_t), p - mWriteBuf - sizeof(uint32_t)); diff --git a/netwerk/cache2/CacheFileOutputStream.cpp b/netwerk/cache2/CacheFileOutputStream.cpp @@ -127,12 +127,6 @@ CacheFileOutputStream::Write(const char* aBuf, uint32_t aCount, *_retval = aCount; - if (mDict) { - // We need to calculate the hash for the entry as we save it - // We don't necessarily need to save the data in memory, however - mDict->AccumulateHash(aBuf, aCount); - } - while (aCount) { EnsureCorrectChunk(false); if (NS_FAILED(mStatus)) { diff --git a/netwerk/cache2/CacheFileOutputStream.h b/netwerk/cache2/CacheFileOutputStream.h @@ -9,7 +9,6 @@ #include "nsISeekableStream.h" #include "nsCOMPtr.h" #include "CacheFileChunk.h" -#include "Dictionary.h" namespace mozilla { namespace net { @@ -40,8 +39,6 @@ class CacheFileOutputStream : public nsIAsyncOutputStream, void NotifyCloseListener(); bool IsAlternativeData() const { return mAlternativeData; }; - void SetDictionary(DictionaryCacheEntry* aDict) { mDict = aDict; } - // Memory reporting size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const; @@ -65,7 +62,6 @@ class CacheFileOutputStream : public nsIAsyncOutputStream, nsCOMPtr<nsIOutputStreamCallback> mCallback; uint32_t mCallbackFlags; nsCOMPtr<nsIEventTarget> mCallbackTarget; - RefPtr<DictionaryCacheEntry> mDict; }; } // namespace net diff --git a/netwerk/cache2/CacheIndex.cpp b/netwerk/cache2/CacheIndex.cpp @@ -840,30 +840,12 @@ nsresult CacheIndex::InitEntry(const SHA1Sum::Hash* aHash, } // static -nsresult CacheIndex::RemoveEntry(const SHA1Sum::Hash* aHash, - const nsACString& aKey, - bool aClearDictionary) { - LOG( - ("CacheIndex::RemoveEntry() [hash=%08x%08x%08x%08x%08x] key=%s " - "clear_dictionary=%d", - LOGSHA1(aHash), PromiseFlatCString(aKey).get(), aClearDictionary)); +nsresult CacheIndex::RemoveEntry(const SHA1Sum::Hash* aHash) { + LOG(("CacheIndex::RemoveEntry() [hash=%08x%08x%08x%08x%08x]", + LOGSHA1(aHash))); MOZ_ASSERT(CacheFileIOManager::IsOnIOThread()); - // Remove any dictionary associated with this entry even if we later - // error out - async since removal happens on MainThread. - - // TODO XXX There may be a hole here where a dictionary entry can get - // referenced for a request before RemoveDictionaryFor can run, but after - // the entry is removed here. - - // Note: we don't want to (re)clear dictionaries when the - // CacheFileContextEvictor purges entries; they've already been cleared - // via CacheIndex::EvictByContext synchronously - if (aClearDictionary) { - DictionaryCache::RemoveDictionaryFor(aKey); - } - StaticMutexAutoLock lock(sLock); RefPtr<CacheIndex> index = gInstance; @@ -1097,32 +1079,6 @@ nsresult CacheIndex::UpdateEntry(const SHA1Sum::Hash* aHash, return NS_OK; } -// Clear the entries from the Index immediately, to comply with -// https://www.w3.org/TR/clear-site-data/#fetch-integration -// Note that we will effectively hide the entries until the actual evict -// happens. - -// aOrigin == "" means clear all unless aBaseDomain is set to something -// static -void CacheIndex::EvictByContext(const nsAString& aOrigin, - const nsAString& aBaseDomain) { - StaticMutexAutoLock lock(sLock); - - RefPtr<CacheIndex> index = gInstance; - - // Store in hashset that this origin has been evicted; we'll remove it - // when CacheFileIOManager::EvictByContextInternal() finishes. - // Not valid to set both aOrigin and aBaseDomain - if (!aOrigin.IsEmpty() && aBaseDomain.IsEmpty()) { - // likely CacheStorageService::ClearByPrincipal - nsCOMPtr<nsIURI> uri; - if (NS_SUCCEEDED(NS_NewURI(getter_AddRefs(uri), aOrigin))) { - // Remove the dictionary entries for this origin immediately - DictionaryCache::RemoveDictionariesForOrigin(uri); - } - } -} - // static nsresult CacheIndex::RemoveAll() { LOG(("CacheIndex::RemoveAll()")); diff --git a/netwerk/cache2/CacheIndex.h b/netwerk/cache2/CacheIndex.h @@ -90,9 +90,8 @@ struct CacheIndexRecord { * 0000 1000 0000 0000 0000 0000 0000 0000 : fresh * 0000 0100 0000 0000 0000 0000 0000 0000 : pinned * 0000 0010 0000 0000 0000 0000 0000 0000 : has cached alt data - * 0000 0001 0000 0000 0000 0000 0000 0000 : is a dictionary + * 0000 0001 0000 0000 0000 0000 0000 0000 : reserved * 0000 0000 1111 1111 1111 1111 1111 1111 : file size (in kB) - * Max file size is 16GiB */ uint32_t mFlags{0}; @@ -100,16 +99,6 @@ struct CacheIndexRecord { }; #pragma pack(pop) -// For Compression Dictionaries, we make special entries in the cache for -// each origin with a dictionary. In the data or metadata for each of -// these entries, we store the hashes of the dictionary, the match value -// (required), the match-dest value (optional), the id (optional) and the -// type (optional). - -// We mark an entry if it's a dictionary (use-as-dictionary); if it is, -// when the entry is removed, we remove it from the origin's dictionary -// entry. If the origin's dictionary list is empty, we remove the origin. - static_assert(sizeof(CacheIndexRecord::mHash) + sizeof(CacheIndexRecord::mFrecency) + sizeof(CacheIndexRecord::mOriginAttrsHash) + @@ -389,11 +378,9 @@ class CacheIndexEntry : public PLDHashEntryHdr { // Indicates there is cached alternative data in the entry. static const uint32_t kHasAltDataMask = 0x02000000; + static const uint32_t kReservedMask = 0x01000000; - // Indicates that this entry is a dictionary - static const uint32_t kDictionaryMask = 0x01000000; - - // FileSize in kilobytes (max 16GB) + // FileSize in kilobytes static const uint32_t kFileSizeMask = 0x00FFFFFF; RefPtr<CacheIndexRecordWrapper> mRec; @@ -747,9 +734,7 @@ class CacheIndex final : public CacheFileIOListener, public nsIRunnable { bool aPinned); // Remove entry from index. The entry should be present in index. - static nsresult RemoveEntry(const SHA1Sum::Hash* aHash, - const nsACString& aKey, - bool aClearDictionary = true); + static nsresult RemoveEntry(const SHA1Sum::Hash* aHash); // Update some information in entry. The entry MUST be present in index and // MUST be initialized. Call to AddEntry() or EnsureEntryExists() and to @@ -763,11 +748,6 @@ class CacheIndex final : public CacheFileIOListener, public nsIRunnable { const uint8_t* aContentType, const uint32_t* aSize); - // Mark entries so we won't find them. Used to implement synchronous - // clearing for Clear-Site-Data: cache for Compression Dictionaries - static void EvictByContext(const nsAString& aOrigin, - const nsAString& aBaseDomain); - // Remove all entries from the index. Called when clearing the whole cache. static nsresult RemoveAll(); diff --git a/netwerk/cache2/CacheStorage.cpp b/netwerk/cache2/CacheStorage.cpp @@ -28,24 +28,6 @@ NS_IMETHODIMP CacheStorage::AsyncOpenURI(nsIURI* aURI, const nsACString& aIdExtension, uint32_t aFlags, nsICacheEntryOpenCallback* aCallback) { - NS_ENSURE_ARG(aURI); - - nsresult rv; - - nsCOMPtr<nsIURI> noRefURI; - rv = NS_GetURIWithoutRef(aURI, getter_AddRefs(noRefURI)); - NS_ENSURE_SUCCESS(rv, rv); - - nsAutoCString asciiSpec; - rv = noRefURI->GetAsciiSpec(asciiSpec); - NS_ENSURE_SUCCESS(rv, rv); - - return AsyncOpenURIString(asciiSpec, aIdExtension, aFlags, aCallback); -} - -NS_IMETHODIMP CacheStorage::AsyncOpenURIString( - const nsACString& aURI, const nsACString& aIdExtension, uint32_t aFlags, - nsICacheEntryOpenCallback* aCallback) { if (!CacheStorageService::Self()) return NS_ERROR_NOT_INITIALIZED; if (MOZ_UNLIKELY(!CacheObserver::UseDiskCache()) && mWriteToDisk && @@ -60,11 +42,22 @@ NS_IMETHODIMP CacheStorage::AsyncOpenURIString( return NS_OK; } + NS_ENSURE_ARG(aURI); NS_ENSURE_ARG(aCallback); + nsresult rv; + + nsCOMPtr<nsIURI> noRefURI; + rv = NS_GetURIWithoutRef(aURI, getter_AddRefs(noRefURI)); + NS_ENSURE_SUCCESS(rv, rv); + + nsAutoCString asciiSpec; + rv = noRefURI->GetAsciiSpec(asciiSpec); + NS_ENSURE_SUCCESS(rv, rv); + RefPtr<CacheEntryHandle> entry; - nsresult rv = CacheStorageService::Self()->AddStorageEntry( - this, aURI, aIdExtension, aFlags, getter_AddRefs(entry)); + rv = CacheStorageService::Self()->AddStorageEntry( + this, asciiSpec, aIdExtension, aFlags, getter_AddRefs(entry)); if (NS_FAILED(rv)) { aCallback->OnCacheEntryAvailable(nullptr, false, rv); return NS_OK; diff --git a/netwerk/cache2/CacheStorageService.cpp b/netwerk/cache2/CacheStorageService.cpp @@ -39,10 +39,6 @@ namespace mozilla::net { -// static -GlobalEntryTables* CacheStorageService::sGlobalEntryTables = nullptr; -StaticMutex CacheStorageService::sLock; - namespace { void AppendMemoryStorageTag(nsAutoCString& key) { @@ -54,6 +50,21 @@ void AppendMemoryStorageTag(nsAutoCString& key) { } // namespace +// Not defining as static or class member of CacheStorageService since +// it would otherwise need to include CacheEntry.h and that then would +// need to be exported to make nsNetModule.cpp compilable. +using GlobalEntryTables = nsClassHashtable<nsCStringHashKey, CacheEntryTable>; + +/** + * Keeps tables of entries. There is one entries table for each distinct load + * context type. The distinction is based on following load context info + * states: <isPrivate|isAnon|inIsolatedMozBrowser> which builds a mapping + * key. + * + * Thread-safe to access, protected by the service mutex. + */ +static GlobalEntryTables* sGlobalEntryTables; + CacheMemoryConsumer::CacheMemoryConsumer(uint32_t aFlags) { StoreFlags(aFlags); } @@ -122,7 +133,7 @@ CacheStorageService::~CacheStorageService() { } void CacheStorageService::Shutdown() { - StaticMutexAutoLock lock(sLock); + mozilla::MutexAutoLock lock(mLock); if (mShutdown) return; @@ -150,7 +161,7 @@ void CacheStorageService::ShutdownBackground() { MOZ_ASSERT(IsOnManagementThread()); { - StaticMutexAutoLock lock(sLock); + mozilla::MutexAutoLock lock(mLock); // Cancel purge timer to avoid leaking. if (mPurgeTimer) { @@ -169,7 +180,7 @@ void CacheStorageService::ShutdownBackground() { // Internal management methods -namespace CacheStorageServiceInternal { +namespace { // WalkCacheRunnable // Base class for particular storage entries visiting @@ -226,14 +237,13 @@ class WalkMemoryCacheRunnable : public WalkCacheRunnable { LOG(("WalkMemoryCacheRunnable::Run - collecting [this=%p]", this)); // First, walk, count and grab all entries from the storage - StaticMutexAutoLock lock(CacheStorageService::sLock); + mozilla::MutexAutoLock lock(CacheStorageService::Self()->Lock()); if (!CacheStorageService::IsRunning()) return NS_ERROR_NOT_INITIALIZED; // Count the entries to allocate the array memory all at once. size_t numEntries = 0; - for (const auto& entries : - CacheStorageService::sGlobalEntryTables->Values()) { + for (const auto& entries : sGlobalEntryTables->Values()) { if (entries->Type() != CacheEntryTable::MEMORY_ONLY) { continue; } @@ -242,8 +252,7 @@ class WalkMemoryCacheRunnable : public WalkCacheRunnable { mEntryArray.SetCapacity(numEntries); // Collect the entries. - for (const auto& entries : - CacheStorageService::sGlobalEntryTables->Values()) { + for (const auto& entries : sGlobalEntryTables->Values()) { if (entries->Type() != CacheEntryTable::MEMORY_ONLY) { continue; } @@ -519,10 +528,10 @@ class WalkDiskCacheRunnable : public WalkCacheRunnable { uint32_t mCount; }; -} // namespace CacheStorageServiceInternal +} // namespace void CacheStorageService::DropPrivateBrowsingEntries() { - StaticMutexAutoLock lock(sLock); + mozilla::MutexAutoLock lock(mLock); if (mShutdown) return; @@ -664,7 +673,7 @@ NS_IMETHODIMP CacheStorageService::Clear() { // when all the context have been removed from disk. CacheIndex::OnAsyncEviction(true); - StaticMutexAutoLock lock(sLock); + mozilla::MutexAutoLock lock(mLock); { mozilla::MutexAutoLock forcedValidEntriesLock(mForcedValidEntriesLock); @@ -697,8 +706,6 @@ NS_IMETHODIMP CacheStorageService::ClearOriginsByPrincipal( nsAutoString origin; rv = nsContentUtils::GetWebExposedOriginSerialization(aPrincipal, origin); NS_ENSURE_SUCCESS(rv, rv); - LOG(("CacheStorageService::ClearOriginsByPrincipal %s", - NS_ConvertUTF16toUTF8(origin).get())); rv = ClearOriginInternal(origin, aPrincipal->OriginAttributesRef(), true); NS_ENSURE_SUCCESS(rv, rv); @@ -712,8 +719,6 @@ NS_IMETHODIMP CacheStorageService::ClearOriginsByPrincipal( NS_IMETHODIMP CacheStorageService::ClearOriginsByOriginAttributes( const nsAString& aOriginAttributes) { nsresult rv; - LOG(("CacheStorageService::ClearOriginsByOriginAttributes %s", - NS_ConvertUTF16toUTF8(aOriginAttributes).get())); if (NS_WARN_IF(aOriginAttributes.IsEmpty())) { return NS_ERROR_FAILURE; @@ -744,10 +749,6 @@ static bool RemoveExactEntry(CacheEntryTable* aEntries, nsACString const& aKey, return false; // Already replaced... } - // Remove from DictionaryCache immediately, to ensure the removal is - // synchronous - DictionaryCache::RemoveDictionaryFor(aEntry->GetURI()); - LOG(("RemoveExactEntry [entry=%p removed]", aEntry)); aEntries->Remove(aKey); return true; @@ -755,10 +756,9 @@ static bool RemoveExactEntry(CacheEntryTable* aEntries, nsACString const& aKey, NS_IMETHODIMP CacheStorageService::ClearBaseDomain( const nsAString& aBaseDomain) { - LOG(("CacheStorageService::ClearBaseDomain %s", - NS_ConvertUTF16toUTF8(aBaseDomain).get())); - StaticMutexAutoLock lock(sLock); if (sGlobalEntryTables) { + mozilla::MutexAutoLock lock(mLock); + if (mShutdown) return NS_ERROR_NOT_AVAILABLE; nsCString cBaseDomain = NS_ConvertUTF16toUTF8(aBaseDomain); @@ -826,7 +826,6 @@ NS_IMETHODIMP CacheStorageService::ClearBaseDomain( // Clear matched keys. for (uint32_t i = 0; i < keys.Length(); ++i) { - LOG(("CacheStorageService::ClearBaseDomain Dooming %s", keys[i].get())); DoomStorageEntries(keys[i], nullptr, true, false, nullptr); } } @@ -846,7 +845,7 @@ nsresult CacheStorageService::ClearOriginInternal( return NS_ERROR_FAILURE; } - StaticMutexAutoLock lock(sLock); + mozilla::MutexAutoLock lock(mLock); if (sGlobalEntryTables) { for (const auto& globalEntry : *sGlobalEntryTables) { @@ -900,19 +899,6 @@ nsresult CacheStorageService::ClearOriginInternal( return NS_OK; } -NS_IMETHODIMP CacheStorageService::ClearOriginDictionary(nsIURI* aURI) { - LOG(("CacheStorageService::ClearOriginDictionary")); - // Note: due to cookie samesite rules, we need to clean for all ports - DictionaryCache::RemoveDictionariesForOrigin(aURI); - return NS_OK; -} - -NS_IMETHODIMP CacheStorageService::ClearAllOriginDictionaries() { - LOG(("CacheStorageService::ClearAllOriginDictionaries")); - DictionaryCache::RemoveAllDictionaries(); - return NS_OK; -} - NS_IMETHODIMP CacheStorageService::PurgeFromMemory(uint32_t aWhat) { uint32_t what; @@ -992,9 +978,8 @@ NS_IMETHODIMP CacheStorageService::AsyncVisitAllStorages( NS_ENSURE_FALSE(mShutdown, NS_ERROR_NOT_INITIALIZED); // Walking the disk cache also walks the memory cache. - RefPtr<CacheStorageServiceInternal::WalkDiskCacheRunnable> event = - new CacheStorageServiceInternal::WalkDiskCacheRunnable( - nullptr, aVisitEntries, aVisitor); + RefPtr<WalkDiskCacheRunnable> event = + new WalkDiskCacheRunnable(nullptr, aVisitEntries, aVisitor); return event->Walk(); } @@ -1056,7 +1041,7 @@ bool CacheStorageService::RemoveEntry(CacheEntry* aEntry, return false; } - StaticMutexAutoLock lock(sLock); + mozilla::MutexAutoLock lock(mLock); if (mShutdown) { LOG((" after shutdown")); @@ -1104,7 +1089,7 @@ void CacheStorageService::RecordMemoryOnlyEntry(CacheEntry* aEntry, // not is always recorded in the storage master hash table, the one identified // by CacheEntry.StorageID(). - sLock.AssertCurrentThreadOwns(); + mLock.AssertCurrentThreadOwns(); if (mShutdown) { LOG((" after shutdown")); @@ -1306,7 +1291,7 @@ bool CacheStorageService::MemoryPool::OnMemoryConsumptionChange( void CacheStorageService::SchedulePurgeOverMemoryLimit() { LOG(("CacheStorageService::SchedulePurgeOverMemoryLimit")); - StaticMutexAutoLock lock(sLock); + mozilla::MutexAutoLock lock(mLock); if (mShutdown) { LOG((" past shutdown")); @@ -1333,7 +1318,7 @@ NS_IMETHODIMP CacheStorageService::Notify(nsITimer* aTimer) { LOG(("CacheStorageService::Notify")); - StaticMutexAutoLock lock(sLock); + mozilla::MutexAutoLock lock(mLock); if (aTimer == mPurgeTimer) { #ifdef MOZ_TSAN @@ -1504,7 +1489,7 @@ Result<size_t, nsresult> CacheStorageService::MemoryPool::PurgeByFrecency( return Err(NS_ERROR_OUT_OF_MEMORY); } { - StaticMutexAutoLock lock(CacheStorageService::Self()->Lock()); + mozilla::MutexAutoLock lock(CacheStorageService::Self()->Lock()); for (const auto& entry : mManagedEntries) { // Referenced items cannot be purged and we deliberately want to not look @@ -1607,7 +1592,7 @@ nsresult CacheStorageService::AddStorageEntry( RefPtr<CacheEntryHandle> handle; { - StaticMutexAutoLock lock(sLock); + mozilla::MutexAutoLock lock(mLock); NS_ENSURE_FALSE(mShutdown, NS_ERROR_NOT_INITIALIZED); @@ -1630,15 +1615,6 @@ nsresult CacheStorageService::AddStorageEntry( StaticPrefs::network_cache_bug1708673()) { return NS_ERROR_CACHE_KEY_NOT_FOUND; } - if (entryExists && (aFlags & nsICacheStorage::OPEN_COMPLETE_ONLY)) { - bool ready = false; - // We're looking for complete files, even if they're being revalidated - // (for dictionaries) - entry->GetReadyOrRevalidating(&ready); - if (!ready) { - return NS_ERROR_CACHE_KEY_NOT_FOUND; - } - } bool replace = aFlags & nsICacheStorage::OPEN_TRUNCATE; @@ -1712,7 +1688,7 @@ nsresult CacheStorageService::CheckStorageEntry(CacheStorage const* aStorage, aURI.BeginReading(), aIdExtension.BeginReading(), contextKey.get())); { - StaticMutexAutoLock lock(sLock); + mozilla::MutexAutoLock lock(mLock); NS_ENSURE_FALSE(mShutdown, NS_ERROR_NOT_INITIALIZED); @@ -1860,8 +1836,7 @@ NS_IMPL_ISUPPORTS(CacheEntryDoomByKeyCallback, CacheFileIOListener, nsresult CacheStorageService::DoomStorageEntry( CacheStorage const* aStorage, const nsACString& aURI, const nsACString& aIdExtension, nsICacheEntryDoomCallback* aCallback) { - LOG(("CacheStorageService::DoomStorageEntry %s", - PromiseFlatCString(aURI).get())); + LOG(("CacheStorageService::DoomStorageEntry")); NS_ENSURE_ARG(aStorage); @@ -1874,7 +1849,7 @@ nsresult CacheStorageService::DoomStorageEntry( RefPtr<CacheEntry> entry; { - StaticMutexAutoLock lock(sLock); + mozilla::MutexAutoLock lock(mLock); NS_ENSURE_FALSE(mShutdown, NS_ERROR_NOT_INITIALIZED); @@ -1961,7 +1936,7 @@ nsresult CacheStorageService::DoomStorageEntries( nsAutoCString contextKey; CacheFileUtils::AppendKeyPrefix(aStorage->LoadInfo(), contextKey); - StaticMutexAutoLock lock(sLock); + mozilla::MutexAutoLock lock(mLock); return DoomStorageEntries(contextKey, aStorage->LoadInfo(), aStorage->WriteToDisk(), aStorage->Pinning(), @@ -1974,7 +1949,7 @@ nsresult CacheStorageService::DoomStorageEntries( LOG(("CacheStorageService::DoomStorageEntries [context=%s]", aContextKey.BeginReading())); - sLock.AssertCurrentThreadOwns(); + mLock.AssertCurrentThreadOwns(); NS_ENSURE_TRUE(!mShutdown, NS_ERROR_NOT_INITIALIZED); @@ -2079,20 +2054,17 @@ nsresult CacheStorageService::WalkStorageEntries( NS_ENSURE_ARG(aStorage); if (aStorage->WriteToDisk()) { - RefPtr<CacheStorageServiceInternal::WalkDiskCacheRunnable> event = - new CacheStorageServiceInternal::WalkDiskCacheRunnable( - aStorage->LoadInfo(), aVisitEntries, aVisitor); + RefPtr<WalkDiskCacheRunnable> event = new WalkDiskCacheRunnable( + aStorage->LoadInfo(), aVisitEntries, aVisitor); return event->Walk(); } - RefPtr<CacheStorageServiceInternal::WalkMemoryCacheRunnable> event = - new CacheStorageServiceInternal::WalkMemoryCacheRunnable( - aStorage->LoadInfo(), aVisitEntries, aVisitor); + RefPtr<WalkMemoryCacheRunnable> event = new WalkMemoryCacheRunnable( + aStorage->LoadInfo(), aVisitEntries, aVisitor); return event->Walk(); } -void CacheStorageService::CacheFileDoomed(const nsACString& aKey, - nsILoadContextInfo* aLoadContextInfo, +void CacheStorageService::CacheFileDoomed(nsILoadContextInfo* aLoadContextInfo, const nsACString& aIdExtension, const nsACString& aURISpec) { nsAutoCString contextKey; @@ -2101,7 +2073,7 @@ void CacheStorageService::CacheFileDoomed(const nsACString& aKey, nsAutoCString entryKey; CacheEntry::HashingKey(""_ns, aIdExtension, aURISpec, entryKey); - StaticMutexAutoLock lock(sLock); + mozilla::MutexAutoLock lock(mLock); if (mShutdown) { return; @@ -2138,7 +2110,7 @@ bool CacheStorageService::GetCacheEntryInfo( RefPtr<CacheEntry> entry; { - StaticMutexAutoLock lock(sLock); + mozilla::MutexAutoLock lock(mLock); if (mShutdown) { return false; @@ -2297,7 +2269,7 @@ void CacheStorageService::TelemetryRecordEntryRemoval(CacheEntry* entry) { size_t CacheStorageService::SizeOfExcludingThis( mozilla::MallocSizeOf mallocSizeOf) const { - sLock.AssertCurrentThreadOwns(); + CacheStorageService::Self()->Lock().AssertCurrentThreadOwns(); size_t n = 0; // The elemets are referenced by sGlobalEntryTables and are reported from @@ -2320,7 +2292,7 @@ size_t CacheStorageService::SizeOfIncludingThis( NS_IMETHODIMP CacheStorageService::CollectReports(nsIHandleReportCallback* aHandleReport, nsISupports* aData, bool aAnonymize) { - StaticMutexAutoLock lock(sLock); + MutexAutoLock lock(mLock); MOZ_COLLECT_REPORT("explicit/network/cache2/io", KIND_HEAP, UNITS_BYTES, CacheFileIOManager::SizeOfIncludingThis(MallocSizeOf), "Memory used by the cache IO manager."); diff --git a/netwerk/cache2/CacheStorageService.h b/netwerk/cache2/CacheStorageService.h @@ -19,7 +19,6 @@ #include "nsProxyRelease.h" #include "mozilla/Monitor.h" #include "mozilla/Mutex.h" -#include "mozilla/StaticMutex.h" #include "mozilla/AtomicBitfields.h" #include "mozilla/Atomics.h" #include "mozilla/TimeStamp.h" @@ -42,7 +41,6 @@ class CacheStorageService; class CacheStorage; class CacheEntry; class CacheEntryHandle; -class CacheEntryTable; class CacheMemoryConsumer { private: @@ -75,22 +73,11 @@ class CacheMemoryConsumer { void DoMemoryReport(uint32_t aCurrentSize); }; -using GlobalEntryTables = nsClassHashtable<nsCStringHashKey, CacheEntryTable>; -class WalkMemoryCacheRunnable; - -namespace CacheStorageServiceInternal { -class WalkMemoryCacheRunnable; -class WalkDiskCacheRunnable; -} // namespace CacheStorageServiceInternal - class CacheStorageService final : public nsICacheStorageService, public nsIMemoryReporter, public nsITimerCallback, public nsICacheTesting, public nsINamed { - friend class CacheStorageServiceInternal::WalkMemoryCacheRunnable; - friend class CacheStorageServiceInternal::WalkDiskCacheRunnable; - public: NS_DECL_THREADSAFE_ISUPPORTS NS_DECL_NSICACHESTORAGESERVICE @@ -112,7 +99,7 @@ class CacheStorageService final : public nsICacheStorageService, static bool IsRunning() { return sSelf && !sSelf->mShutdown; } static bool IsOnManagementThread(); already_AddRefed<nsIEventTarget> Thread() const; - StaticMutex& Lock() { return sLock; } + mozilla::Mutex& Lock() { return mLock; } // Tracks entries that may be forced valid in a pruned hashtable. struct ForcedValidData { @@ -158,16 +145,6 @@ class CacheStorageService final : public nsICacheStorageService, virtual ~CacheStorageService(); void ShutdownBackground(); - /** - * Keeps tables of entries. There is one entries table for each distinct load - * context type. The distinction is based on following load context info - * states: <isPrivate|isAnon|inIsolatedMozBrowser> which builds a mapping - * key. - * - * Thread-safe to access, protected by the service mutex. - */ - static GlobalEntryTables* sGlobalEntryTables MOZ_GUARDED_BY(sLock); - private: // The following methods may only be called on the management // thread. @@ -286,11 +263,9 @@ class CacheStorageService final : public nsICacheStorageService, /** * CacheFileIOManager uses this method to notify CacheStorageService that * an active entry was removed. This method is called even if the entry - * removal was originated by CacheStorageService. This also removes the entry - * from the DictionaryCache. + * removal was originated by CacheStorageService. */ - void CacheFileDoomed(const nsACString& aKey, - nsILoadContextInfo* aLoadContextInfo, + void CacheFileDoomed(nsILoadContextInfo* aLoadContextInfo, const nsACString& aIdExtension, const nsACString& aURISpec); @@ -347,7 +322,7 @@ class CacheStorageService final : public nsICacheStorageService, static CacheStorageService* sSelf; - static StaticMutex sLock; + mozilla::Mutex mLock MOZ_UNANNOTATED{"CacheStorageService.mLock"}; mozilla::Mutex mForcedValidEntriesLock{ "CacheStorageService.mForcedValidEntriesLock"}; diff --git a/netwerk/cache2/Dictionary.cpp b/netwerk/cache2/Dictionary.cpp @@ -1,1375 +0,0 @@ -/* vim: set ts=2 sts=2 et sw=2: */ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - -#include <algorithm> -#include <stdlib.h> - -#include "Dictionary.h" - -#include "CacheFileUtils.h" -#include "nsAttrValue.h" -#include "nsContentPolicyUtils.h" -#include "nsString.h" -#include "nsAppDirectoryServiceDefs.h" -#include "nsIAsyncInputStream.h" -#include "nsICacheStorageService.h" -#include "nsICacheStorage.h" -#include "nsICacheEntry.h" -#include "nsICachingChannel.h" -#include "nsICancelable.h" -#include "nsIChannel.h" -#include "nsContentUtils.h" -#include "nsIFile.h" -#include "nsIInputStream.h" -#include "nsILoadContext.h" -#include "nsILoadContextInfo.h" -#include "nsILoadGroup.h" -#include "nsIObserverService.h" -#include "nsIURI.h" -#include "nsIURIMutator.h" -#include "nsInputStreamPump.h" -#include "nsNetUtil.h" -#include "nsServiceManagerUtils.h" -#include "nsSimpleURI.h" -#include "nsStandardURL.h" -#include "nsStreamUtils.h" -#include "nsString.h" -#include "nsThreadUtils.h" -#include "mozilla/Logging.h" - -#include "mozilla/Components.h" -#include "mozilla/dom/Document.h" -#include "mozilla/FlowMarkers.h" -#include "mozilla/OriginAttributes.h" -#include "mozilla/Preferences.h" -#include "mozilla/SchedulerGroup.h" -#include "mozilla/StaticPrefs_network.h" -#include "mozilla/glean/NetwerkMetrics.h" - -#include "mozilla/net/NeckoCommon.h" -#include "mozilla/net/NeckoParent.h" -#include "mozilla/net/NeckoChild.h" -#include "mozilla/net/URLPatternGlue.h" -#include "mozilla/net/urlpattern_glue.h" - -#include "LoadContextInfo.h" -#include "mozilla/ipc/URIUtils.h" -#include "SerializedLoadContext.h" - -#include "mozilla/dom/ContentParent.h" -#include "mozilla/dom/InternalRequest.h" -#include "mozilla/ClearOnShutdown.h" - -#include "ReferrerInfo.h" - -using namespace mozilla; - -namespace mozilla { -namespace net { - -// Access to all these classes is from MainThread unless otherwise specified - -LazyLogModule gDictionaryLog("CompressionDictionaries"); - -#define DICTIONARY_LOG(args) \ - MOZ_LOG(gDictionaryLog, mozilla::LogLevel::Debug, args) - -/** - * Reference to the DictionaryCache singleton. May be null. - */ -StaticRefPtr<DictionaryCache> gDictionaryCache; -StaticRefPtr<nsICacheStorage> DictionaryCache::sCacheStorage; - -// about:cache gets upset about entries that don't fit URL specs, so we need -// to add the trailing '/' to GetPrePath() -static nsresult GetDictPath(nsIURI* aURI, nsACString& aPrePath) { - if (NS_FAILED(aURI->GetPrePath(aPrePath))) { - return NS_ERROR_FAILURE; - } - aPrePath += '/'; - return NS_OK; -} - -DictionaryCacheEntry::DictionaryCacheEntry(const char* aKey) { - mURI = aKey; - DICTIONARY_LOG(("Created DictionaryCacheEntry %p, uri=%s", this, aKey)); -} - -DictionaryCacheEntry::~DictionaryCacheEntry() { - MOZ_ASSERT(mUsers == 0); - DICTIONARY_LOG( - ("Destroyed DictionaryCacheEntry %p, uri=%s, pattern=%s, id=%s", this, - mURI.get(), mPattern.get(), mId.get())); -} - -DictionaryCacheEntry::DictionaryCacheEntry(const nsACString& aURI, - const nsACString& aPattern, - nsTArray<nsCString>& aMatchDest, - const nsACString& aId, - uint32_t aExpiration, - const Maybe<nsCString>& aHash) - : mURI(aURI), mExpiration(aExpiration), mPattern(aPattern), mId(aId) { - ConvertMatchDestToEnumArray(aMatchDest, mMatchDest); - DICTIONARY_LOG( - ("Created DictionaryCacheEntry %p, uri=%s, pattern=%s, id=%s, " - "expiration=%u", - this, PromiseFlatCString(aURI).get(), PromiseFlatCString(aPattern).get(), - PromiseFlatCString(aId).get(), aExpiration)); - if (aHash) { - mHash = aHash.value(); - } -} - -NS_IMPL_ISUPPORTS(DictionaryCacheEntry, nsICacheEntryOpenCallback, - nsIStreamListener) - -// Convert string MatchDest array to enum array -// static -void DictionaryCacheEntry::ConvertMatchDestToEnumArray( - const nsTArray<nsCString>& aMatchDest, - nsTArray<dom::RequestDestination>& aMatchEnums) { - AutoTArray<dom::RequestDestination, 3> temp; - for (auto& string : aMatchDest) { - dom::RequestDestination dest = - dom::StringToEnum<dom::RequestDestination>(string).valueOr( - dom::RequestDestination::_empty); - if (dest != dom::RequestDestination::_empty) { - temp.AppendElement(dest); - } - } - aMatchEnums.SwapElements(temp); -} - -// Returns true if the pattern for the dictionary matches the path given. -// Note: we need to verify that this entry has not expired due to 2.2.1 of -// https://datatracker.ietf.org/doc/draft-ietf-httpbis-compression-dictionary/ -bool DictionaryCacheEntry::Match(const nsACString& aFilePath, - ExtContentPolicyType aType, uint32_t aNow, - uint32_t& aLongest) { - if (mHash.IsEmpty()) { - // We don't have the file yet - return false; - } - if (mNotCached) { - // Not actually in the cache - // May not actually be necessary, but good safety valve. - return false; - } - // Not worth checking if we wouldn't use it - DICTIONARY_LOG(("Match: %p %s to %s, %s (now=%u, expiration=%u)", this, - PromiseFlatCString(aFilePath).get(), mPattern.get(), - NS_CP_ContentTypeName(aType), aNow, mExpiration)); - if ((mExpiration == 0 || aNow < mExpiration) && - mPattern.Length() > aLongest) { - // Need to match using match-dest, if it exists - if (mMatchDest.IsEmpty() || - mMatchDest.IndexOf( - dom::InternalRequest::MapContentPolicyTypeToRequestDestination( - aType)) != mMatchDest.NoIndex) { - UrlpPattern pattern; - UrlpOptions options; - const nsCString base("https://foo.com/"_ns); - if (!urlp_parse_pattern_from_string(&mPattern, &base, options, - &pattern)) { - DICTIONARY_LOG( - ("Failed to parse dictionary pattern %s", mPattern.get())); - return false; - } - - UrlpInput input = net::CreateUrlpInput(aFilePath); - bool result = net::UrlpPatternTest(pattern, input, Some(base)); - DICTIONARY_LOG(("URLPattern result was %d", result)); - if (result) { - aLongest = mPattern.Length(); - DICTIONARY_LOG(("Match: %s (longest %u)", mURI.get(), aLongest)); - } - return result; - } else { - DICTIONARY_LOG((" Failed on matchDest")); - } - } else { - DICTIONARY_LOG( - (" Failed due to expiration: %u vs %u", aNow, mExpiration)); - } - return false; -} - -void DictionaryCacheEntry::InUse() { - mUsers++; - DICTIONARY_LOG(("Dictionary users for %s -- %u Users", mURI.get(), mUsers)); -} - -void DictionaryCacheEntry::UseCompleted() { - MOZ_ASSERT(mUsers > 0); - mUsers--; - // Purge mDictionaryData - if (mUsers == 0) { // XXX perhaps we should hold it for a bit longer? - DICTIONARY_LOG(("Clearing Dictionary data for %s", mURI.get())); - mDictionaryData.clear(); - mDictionaryDataComplete = false; - } else { - DICTIONARY_LOG(("Not clearing Dictionary data for %s -- %u Users", - mURI.get(), mUsers)); - } -} - -// returns aShouldSuspend=true if we should suspend to wait for the prefetch -nsresult DictionaryCacheEntry::Prefetch(nsILoadContextInfo* aLoadContextInfo, - bool& aShouldSuspend, - const std::function<void()>& aFunc) { - DICTIONARY_LOG(("Prefetch for %s", mURI.get())); - // Start reading the cache entry into memory and call completion - // function when done - if (mWaitingPrefetch.IsEmpty()) { - // Note that if the cache entry has been cleared, and we still have active - // users of it, we'll hold onto that data since we have outstanding requests - // for it. Probably we shouldn't allow new requests to use this data (and - // the WPTs assume we shouldn't). - if (mDictionaryDataComplete) { - DICTIONARY_LOG( - ("Prefetch for %s - already have data in memory (%u users)", - mURI.get(), mUsers)); - aShouldSuspend = false; - return NS_OK; - } - - // We haven't requested it yet from the Cache and don't have it in memory - // already. - // We can't use sCacheStorage because we need the correct LoadContextInfo - nsCOMPtr<nsICacheStorageService> cacheStorageService( - components::CacheStorage::Service()); - if (!cacheStorageService) { - aShouldSuspend = false; - return NS_ERROR_FAILURE; - } - nsCOMPtr<nsICacheStorage> cacheStorage; - nsresult rv = cacheStorageService->DiskCacheStorage( - aLoadContextInfo, getter_AddRefs(cacheStorage)); - if (NS_FAILED(rv)) { - aShouldSuspend = false; - return NS_ERROR_FAILURE; - } - // If the file isn't available in the cache, AsyncOpenURIString() - // will synchronously make a callback to OnCacheEntryAvailable() with - // nullptr. We can key off that to fail Prefetch(), and also to - // remove ourselves from the origin. - if (NS_FAILED(cacheStorage->AsyncOpenURIString( - mURI, ""_ns, - nsICacheStorage::OPEN_READONLY | - nsICacheStorage::OPEN_COMPLETE_ONLY | - nsICacheStorage::CHECK_MULTITHREADED, - this)) || - mNotCached) { - DICTIONARY_LOG(("AsyncOpenURIString failed for %s", mURI.get())); - // For some reason the cache no longer has this entry; fail Prefetch - // and also remove this from our origin - aShouldSuspend = false; - // Remove from origin - if (mOrigin) { - mOrigin->RemoveEntry(this); - mOrigin = nullptr; - } - return NS_ERROR_FAILURE; - } - mWaitingPrefetch.AppendElement(aFunc); - DICTIONARY_LOG(("Started Prefetch for %s, anonymous=%d", mURI.get(), - aLoadContextInfo->IsAnonymous())); - aShouldSuspend = true; - return NS_OK; - } - DICTIONARY_LOG(("Prefetch for %s - already waiting", mURI.get())); - aShouldSuspend = true; - return NS_OK; -} - -void DictionaryCacheEntry::AccumulateHash(const char* aBuf, int32_t aCount) { - MOZ_ASSERT(NS_IsMainThread()); - if (!mHash.IsEmpty()) { - if (!mDictionaryData.empty()) { - // We have data from the cache.... but if we change the hash there will - // be problems - // XXX dragons here - return; - } - // accumulating a new hash when we have an existing? - // XXX probably kill the hash when we get an overwrite; tricky, need to - // handle loading the old one into ram to decompress the new one. Also, - // what if the old one is being used for multiple requests, one of which - // is an overwrite? This is an edge case not discussed in the spec - we - // could separate out a structure for in-flight requests where the data - // would be used from, so the Entry could be overwritten as needed - return; // XXX - } - if (!mCrypto) { - DICTIONARY_LOG(("Calculating new hash for %s", mURI.get())); - // If mCrypto is null, and mDictionaryData is set, we've already got the - // data for this dictionary. - nsresult rv; - mCrypto = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID, &rv); - if (NS_WARN_IF(NS_FAILED(rv))) { - return; - } - rv = mCrypto->Init(nsICryptoHash::SHA256); - NS_WARNING_ASSERTION(NS_SUCCEEDED(rv), "Cache InitCrypto failed"); - } - mCrypto->Update(reinterpret_cast<const uint8_t*>(aBuf), aCount); - DICTIONARY_LOG(("Accumulate Hash %p: %d bytes, total %zu", this, aCount, - mDictionaryData.length())); -} - -void DictionaryCacheEntry::FinishHash() { - MOZ_ASSERT(NS_IsMainThread()); - if (mCrypto) { - mCrypto->Finish(true, mHash); - mCrypto = nullptr; - DICTIONARY_LOG(("Hash for %p (%s) is %s", this, mURI.get(), mHash.get())); - if (mOrigin) { - DICTIONARY_LOG(("Write on hash")); - // This will also move us from mPendingEntries to mEntries - if (NS_FAILED(mOrigin->Write(this))) { - mOrigin->RemoveEntry(this); - return; - } - if (!mBlocked) { - mOrigin->FinishAddEntry(this); - } - } - } -} - -// Version of metadata entries we expect -static const uint32_t METADATA_VERSION = 1; - -// Metadata format: -// |version|hash|pattern|[matchdest|]*||id|expiration|type -// -// * Entries: -// ** CString: URI -- the key, not in the entry -// ** CString: Version (1) -// ** CString: Hash -// ** CString: Pattern -// ** match-dest CString list, terminated by empty string -// *** CString: Match-dest -// ** CString: Id -// ** uint32 as a CString: expiration. If missing, 0 (none) -// ** CString: type -- defaults to 'raw' if missing -// We store strings with a delimiter, and use escapes for delimiters or escape -// characters in the source strings. -// - -// Escape the string and append to aOutput -static void EscapeMetadataString(const nsACString& aInput, nsCString& aOutput) { - // First calculate how much we'll need to append. Means we'll walk the source - // twice, but avoids any potential multiple reallocations - const char* src = aInput.BeginReading(); - size_t len = 1; // for initial | - while (*src) { - if (*src == '|' || *src == '\\') { - len += 2; - } else { - len++; - } - src++; - } - aOutput.SetCapacity(aOutput.Length() + len); - src = aInput.BeginReading(); - - aOutput.AppendLiteral("|"); - while (*src) { - if (*src == '|' || *src == '\\') { - aOutput.AppendLiteral("\\"); - } - aOutput.Append(*src++); - } -} - -void DictionaryCacheEntry::MakeMetadataEntry(nsCString& aNewValue) { - aNewValue.AppendLiteral("|"), aNewValue.AppendInt(METADATA_VERSION), - EscapeMetadataString(mHash, aNewValue); - EscapeMetadataString(mPattern, aNewValue); - EscapeMetadataString(mId, aNewValue); - for (auto& dest : mMatchDest) { - EscapeMetadataString(dom::GetEnumString(dest), aNewValue); - } - // List of match-dest values is terminated by an empty string - EscapeMetadataString(""_ns, aNewValue); - // Expiration time, as a CString - nsAutoCStringN<12> expiration; - expiration = nsPrintfCString("%u", mExpiration); - EscapeMetadataString(expiration, aNewValue); - // We don't store type, since we only support type 'raw' We can support - // type in the future by considering missing type as raw without changing the - // format -} - -nsresult DictionaryCacheEntry::Write(nsICacheEntry* aCacheEntry) { - nsAutoCStringN<2048> metadata; - MakeMetadataEntry(metadata); - DICTIONARY_LOG( - ("DictionaryCacheEntry::Write %s %s", mURI.get(), metadata.get())); - return aCacheEntry->SetMetaDataElement(mURI.get(), metadata.get()); -} - -nsresult DictionaryCacheEntry::RemoveEntry(nsICacheEntry* aCacheEntry) { - DICTIONARY_LOG(("RemoveEntry from metadata for %s", mURI.get())); - return aCacheEntry->SetMetaDataElement(mURI.BeginReading(), nullptr); -} - -// Parse - | for field seperator; \ for escape of | or \ . -static const char* GetEncodedString(const char* aSrc, nsACString& aOutput) { - // scan the input string and build the output, handling escapes - aOutput.Truncate(); - MOZ_ASSERT(*aSrc == '|' || *aSrc == 0); - if (!aSrc || *aSrc != '|') { - return aSrc; - } - aSrc++; - while (*aSrc) { - if (*aSrc == '|') { - break; - } - if (*aSrc == '\\') { - aSrc++; - } - aOutput.Append(*aSrc++); - } - return aSrc; -} - -// Parse metadata from DictionaryOrigin -bool DictionaryCacheEntry::ParseMetadata(const char* aSrc) { - // Using mHash as a temp for version - aSrc = GetEncodedString(aSrc, mHash); - const char* tmp = mHash.get(); - uint32_t version = atoi(tmp); - if (version != METADATA_VERSION) { - return false; - } - aSrc = GetEncodedString(aSrc, mHash); - aSrc = GetEncodedString(aSrc, mHash); - aSrc = GetEncodedString(aSrc, mPattern); - aSrc = GetEncodedString(aSrc, mId); - nsAutoCString temp; - // get match-dest values (list ended with empty string) - do { - aSrc = GetEncodedString(aSrc, temp); - if (!temp.IsEmpty()) { - dom::RequestDestination dest = - dom::StringToEnum<dom::RequestDestination>(temp).valueOr( - dom::RequestDestination::_empty); - if (dest != dom::RequestDestination::_empty) { - mMatchDest.AppendElement(dest); - } - } - } while (!temp.IsEmpty()); - if (*aSrc == '|') { - char* newSrc; - mExpiration = strtoul(++aSrc, &newSrc, 10); - aSrc = newSrc; - } // else leave default of 0 - // XXX type - we assume and only support 'raw', may be missing - aSrc = GetEncodedString(aSrc, temp); - - DICTIONARY_LOG( - ("Parse entry %s: |%s| %s match-dest[0]=%s id=%s", mURI.get(), - mHash.get(), mPattern.get(), - mMatchDest.Length() > 0 ? dom::GetEnumString(mMatchDest[0]).get() : "", - mId.get())); - return true; -} - -//----------------------------------------------------------------------------- -// nsIStreamListener implementation -//----------------------------------------------------------------------------- - -NS_IMETHODIMP -DictionaryCacheEntry::OnStartRequest(nsIRequest* request) { - DICTIONARY_LOG(("DictionaryCacheEntry %s OnStartRequest", mURI.get())); - return NS_OK; -} - -NS_IMETHODIMP -DictionaryCacheEntry::OnDataAvailable(nsIRequest* request, - nsIInputStream* aInputStream, - uint64_t aOffset, uint32_t aCount) { - uint32_t n; - DICTIONARY_LOG( - ("DictionaryCacheEntry %s OnDataAvailable %u", mURI.get(), aCount)); - return aInputStream->ReadSegments(&DictionaryCacheEntry::ReadCacheData, this, - aCount, &n); -} - -/* static */ -nsresult DictionaryCacheEntry::ReadCacheData( - nsIInputStream* aInStream, void* aClosure, const char* aFromSegment, - uint32_t aToOffset, uint32_t aCount, uint32_t* aWriteCount) { - DictionaryCacheEntry* self = static_cast<DictionaryCacheEntry*>(aClosure); - - Unused << self->mDictionaryData.append(aFromSegment, aCount); - DICTIONARY_LOG(("Accumulate %p (%s): %d bytes, total %zu", self, - self->mURI.get(), aCount, self->mDictionaryData.length())); - *aWriteCount = aCount; - return NS_OK; -} - -NS_IMETHODIMP -DictionaryCacheEntry::OnStopRequest(nsIRequest* request, nsresult result) { - DICTIONARY_LOG(("DictionaryCacheEntry %s OnStopRequest", mURI.get())); - if (NS_SUCCEEDED(result)) { - mDictionaryDataComplete = true; - DICTIONARY_LOG(("Unsuspending %zu channels, Dictionary len %zu", - mWaitingPrefetch.Length(), mDictionaryData.length())); - // if we suspended, un-suspend the channel(s) - for (auto& lambda : mWaitingPrefetch) { - (lambda)(); - } - mWaitingPrefetch.Clear(); - } else { - // XXX - // This is problematic - we requested with dcb/dcz, but can't actually - // decode them. Probably we should re-request without dcb/dcz, and also nuke - // the entry - // XXX - } - - // If we're being replaced by a new entry, swap now - RefPtr<DictionaryCacheEntry> self; - if (mReplacement) { - DICTIONARY_LOG(("Replacing entry %p with %p for %s", this, - mReplacement.get(), mURI.get())); - // Make sure we don't destroy ourselves - self = this; - mReplacement->mShouldSuspend = false; - mOrigin->RemoveEntry(this); - // When mReplacement gets all it's data, it will be added to mEntries - mReplacement->UnblockAddEntry(mOrigin); - mOrigin = nullptr; - } - - mStopReceived = true; - return NS_OK; -} - -void DictionaryCacheEntry::UnblockAddEntry(DictionaryOrigin* aOrigin) { - MOZ_ASSERT(NS_IsMainThread()); - if (!mHash.IsEmpty()) { - // Already done, we can move to mEntries now - aOrigin->FinishAddEntry(this); - } - mBlocked = false; -} - -void DictionaryCacheEntry::WriteOnHash() { - bool hasHash = false; - { - MOZ_ASSERT(NS_IsMainThread()); - if (!mHash.IsEmpty()) { - hasHash = true; - } - } - if (hasHash && mOrigin) { - DICTIONARY_LOG(("Write already hashed")); - mOrigin->Write(this); - } -} - -//----------------------------------------------------------------------------- -// nsICacheEntryOpenCallback implementation -//----------------------------------------------------------------------------- -// Note: we don't care if the entry is stale since we're not loading it; we're -// just saying with have this specific set of bits with this hash available -// to use as a dictionary. - -// This may be called on a random thread due to -// nsICacheStorage::CHECK_MULTITHREADED -NS_IMETHODIMP -DictionaryCacheEntry::OnCacheEntryCheck(nsICacheEntry* aEntry, - uint32_t* result) { - DICTIONARY_LOG(("OnCacheEntryCheck %s", mURI.get())); - *result = nsICacheEntryOpenCallback::ENTRY_WANTED; - return NS_OK; -} - -NS_IMETHODIMP -DictionaryCacheEntry::OnCacheEntryAvailable(nsICacheEntry* entry, bool isNew, - nsresult status) { - DICTIONARY_LOG(("OnCacheEntryAvailable %s, result %u, entry %p", mURI.get(), - (uint32_t)status, entry)); - if (entry) { - nsCOMPtr<nsIInputStream> stream; - entry->OpenInputStream(0, getter_AddRefs(stream)); - if (!stream) { - return NS_OK; - } - - RefPtr<nsInputStreamPump> pump; - nsresult rv = nsInputStreamPump::Create(getter_AddRefs(pump), stream); - if (NS_FAILED(rv)) { - return NS_OK; // just ignore - } - - rv = pump->AsyncRead(this); - if (NS_FAILED(rv)) { - return NS_OK; // just ignore - } - DICTIONARY_LOG(("Waiting for data")); - } else { - // XXX Error out any channels waiting on this cache entry. Also, - // remove the dictionary entry from the origin. - mNotCached = true; // For Prefetch() - DICTIONARY_LOG(("Prefetched cache entry not available!")); - } - - return NS_OK; -} - -//---------------------------------------------------------------------------------- - -// Read the metadata for an Origin and parse it, creating DictionaryCacheEntrys -// as needed. If aType is TYPE_OTHER, there is no Match() to do -void DictionaryOriginReader::Start( - DictionaryOrigin* aOrigin, nsACString& aKey, nsIURI* aURI, - ExtContentPolicyType aType, DictionaryCache* aCache, - const std::function<nsresult(bool, DictionaryCacheEntry*)>& aCallback) { - mOrigin = aOrigin; - mURI = aURI; - mType = aType; - mCallback = aCallback; - mCache = aCache; - - AUTO_PROFILER_FLOW_MARKER("DictionaryOriginReader::Start", NETWORK, - Flow::FromPointer(this)); - - // The cache entry is for originattribute extension of - // META_DICTIONARY_PREFIX, plus key of prepath - - // This also keeps this alive until we get the callback. We must do this - // BEFORE we call AsyncOpenURIString, or we may get a callback to - // OnCacheEntryAvailable before we can do this - mOrigin->mWaitingCacheRead.AppendElement(this); - if (mOrigin->mWaitingCacheRead.Length() == 1) { // was empty - DICTIONARY_LOG(("DictionaryOriginReader::Start(%s): %p", - PromiseFlatCString(aKey).get(), this)); - DictionaryCache::sCacheStorage->AsyncOpenURIString( - aKey, META_DICTIONARY_PREFIX, - aOrigin - ? nsICacheStorage::OPEN_NORMALLY | - nsICacheStorage::CHECK_MULTITHREADED - : nsICacheStorage::OPEN_READONLY | nsICacheStorage::OPEN_SECRETLY | - nsICacheStorage::CHECK_MULTITHREADED, - this); - // This one will get the direct callback to do Match() - } - // Else we already have a read for this cache entry pending, just wait - // for that -} - -void DictionaryOriginReader::FinishMatch() { - RefPtr<DictionaryCacheEntry> result; - // Don't Match if this was a call from AddEntry() - if (mType != ExtContentPolicy::TYPE_OTHER) { - nsCString path; - mURI->GetPathQueryRef(path); - result = mOrigin->Match(path, mType); - } - DICTIONARY_LOG(("Done with reading origin for %p", mOrigin.get())); - (mCallback)(true, result); -} - -NS_IMPL_ISUPPORTS(DictionaryOriginReader, nsICacheEntryOpenCallback, - nsIStreamListener) - -//----------------------------------------------------------------------------- -// nsICacheEntryOpenCallback implementation -//----------------------------------------------------------------------------- - -// This may be called on a random thread due to -// nsICacheStorage::CHECK_MULTITHREADED -NS_IMETHODIMP DictionaryOriginReader::OnCacheEntryCheck(nsICacheEntry* entry, - uint32_t* result) { - *result = nsICacheEntryOpenCallback::ENTRY_WANTED; - DICTIONARY_LOG( - ("DictionaryOriginReader::OnCacheEntryCheck this=%p for entry %p", this, - entry)); - return NS_OK; -} - -NS_IMETHODIMP DictionaryOriginReader::OnCacheEntryAvailable( - nsICacheEntry* aCacheEntry, bool isNew, nsresult result) { - MOZ_ASSERT(NS_IsMainThread(), "Got cache entry off main thread!"); - DICTIONARY_LOG( - ("DictionaryOriginReader::OnCacheEntryAvailable this=%p for entry %p", - this, aCacheEntry)); - - if (!aCacheEntry) { - // Didn't have any dictionaries for this origin, and must have been readonly - for (auto& reader : mOrigin->mWaitingCacheRead) { - (reader->mCallback)(true, nullptr); - } - mOrigin->mWaitingCacheRead.Clear(); - AUTO_PROFILER_TERMINATING_FLOW_MARKER( - "DictionaryOriginReader::OnCacheEntryAvailable", NETWORK, - Flow::FromPointer(this)); - return NS_OK; - } - - AUTO_PROFILER_FLOW_MARKER("DictionaryOriginReader::VisitMetaData", NETWORK, - Flow::FromPointer(this)); - mOrigin->SetCacheEntry(aCacheEntry); - // There's no data in the cache entry, just metadata - nsCOMPtr<nsICacheEntryMetaDataVisitor> metadata(mOrigin); - aCacheEntry->VisitMetaData(metadata); - - // This list is the only thing keeping us alive - RefPtr<DictionaryOriginReader> safety(this); - for (auto& reader : mOrigin->mWaitingCacheRead) { - reader->FinishMatch(); - } - mOrigin->mWaitingCacheRead.Clear(); - AUTO_PROFILER_TERMINATING_FLOW_MARKER( - "DictionaryOriginReader::OnCacheEntryAvailable", NETWORK, - Flow::FromPointer(this)); - return NS_OK; -} - -//----------------------------------------------------------------------------- -// nsIStreamListener implementation -//----------------------------------------------------------------------------- - -NS_IMETHODIMP -DictionaryOriginReader::OnStartRequest(nsIRequest* request) { - DICTIONARY_LOG(("DictionaryOriginReader %p OnStartRequest", this)); - return NS_OK; -} - -NS_IMETHODIMP -DictionaryOriginReader::OnDataAvailable(nsIRequest* request, - nsIInputStream* aInputStream, - uint64_t aOffset, uint32_t aCount) { - DICTIONARY_LOG( - ("DictionaryOriginReader %p OnDataAvailable %u", this, aCount)); - return NS_OK; -} - -NS_IMETHODIMP -DictionaryOriginReader::OnStopRequest(nsIRequest* request, nsresult result) { - DICTIONARY_LOG(("DictionaryOriginReader %p OnStopRequest", this)); - return NS_OK; -} - -// static -already_AddRefed<DictionaryCache> DictionaryCache::GetInstance() { - // XXX lock? In practice probably not needed, in theory yes - if (!gDictionaryCache) { - gDictionaryCache = new DictionaryCache(); - MOZ_ASSERT(NS_SUCCEEDED(gDictionaryCache->Init())); - } - return do_AddRef(gDictionaryCache); -} - -nsresult DictionaryCache::Init() { - if (XRE_IsParentProcess()) { - nsCOMPtr<nsICacheStorageService> cacheStorageService( - components::CacheStorage::Service()); - if (!cacheStorageService) { - return NS_ERROR_FAILURE; - } - nsCOMPtr<nsICacheStorage> temp; - nsresult rv = cacheStorageService->DiskCacheStorage( - nullptr, getter_AddRefs(temp)); // Don't need a load context - if (NS_FAILED(rv)) { - return rv; - } - sCacheStorage = temp; - } - DICTIONARY_LOG(("Inited DictionaryCache %p", sCacheStorage.get())); - return NS_OK; -} - -// static -void DictionaryCache::Shutdown() { - gDictionaryCache = nullptr; - sCacheStorage = nullptr; -} - -nsresult DictionaryCache::AddEntry(nsIURI* aURI, const nsACString& aKey, - const nsACString& aPattern, - nsTArray<nsCString>& aMatchDest, - const nsACString& aId, - const Maybe<nsCString>& aHash, - bool aNewEntry, uint32_t aExpiration, - DictionaryCacheEntry** aDictEntry) { - // Note that normally we're getting an entry in and until all the data - // has been received, we can't use it. The Hash being null is a flag - // that it's not yet valid. - DICTIONARY_LOG(("AddEntry for %s, pattern %s, id %s, expiration %u", - PromiseFlatCString(aKey).get(), - PromiseFlatCString(aPattern).get(), - PromiseFlatCString(aId).get(), aExpiration)); - // Note that we don't know if there's an entry for this key in the origin - RefPtr<DictionaryCacheEntry> dict = new DictionaryCacheEntry( - aKey, aPattern, aMatchDest, aId, aExpiration, aHash); - dict = AddEntry(aURI, aNewEntry, dict); - if (dict) { - *aDictEntry = do_AddRef(dict).take(); - return NS_OK; - } - DICTIONARY_LOG( - ("Failed adding entry for %s", PromiseFlatCString(aKey).get())); - *aDictEntry = nullptr; - return NS_ERROR_FAILURE; -} - -already_AddRefed<DictionaryCacheEntry> DictionaryCache::AddEntry( - nsIURI* aURI, bool aNewEntry, DictionaryCacheEntry* aDictEntry) { - // Note that normally we're getting an entry in and until all the data - // has been received, we can't use it. The Hash being null is a flag - // that it's not yet valid. - nsCString prepath; - if (NS_FAILED(GetDictPath(aURI, prepath))) { - return nullptr; - } - DICTIONARY_LOG( - ("AddEntry: %s, %d, %p", prepath.get(), aNewEntry, aDictEntry)); - // create for the origin if it doesn't exist - RefPtr<DictionaryCacheEntry> newEntry; - Unused << mDictionaryCache.WithEntryHandle(prepath, [&](auto&& entry) { - auto& origin = entry.OrInsertWith([&] { - RefPtr<DictionaryOrigin> origin = new DictionaryOrigin(prepath, nullptr); - // Create a cache entry for this if it doesn't exist. Note - // that the entry we're adding will need to be saved later once - // we have the cache entry - - // This creates a cycle until the dictionary is removed from the cache - aDictEntry->SetOrigin(origin); - - // Open (and parse metadata) or create - RefPtr<DictionaryOriginReader> reader = new DictionaryOriginReader(); - // the type is irrelevant; we won't be calling Match() - reader->Start( - origin, prepath, aURI, ExtContentPolicy::TYPE_OTHER, this, - [entry = RefPtr(aDictEntry)]( - bool, DictionaryCacheEntry* aDict) { // XXX avoid so many lambdas - // which cause allocations - // Write the dirty entry we couldn't write before once - // we get the hash - entry->WriteOnHash(); - return NS_OK; - }); - // Since this is read asynchronously, we need to either add the entry - // async once the read is done and it's populated, or we have to handle - // collisions on the read - return origin; - }); - - newEntry = origin->AddEntry(aDictEntry, aNewEntry); - DICTIONARY_LOG(("AddEntry: added %s", prepath.get())); - return NS_OK; - }); - return newEntry.forget(); -} - -nsresult DictionaryCache::RemoveEntry(nsIURI* aURI, const nsACString& aKey) { - nsCString prepath; - if (NS_FAILED(GetDictPath(aURI, prepath))) { - return NS_ERROR_FAILURE; - } - DICTIONARY_LOG(("DictionaryCache::RemoveEntry for %s : %s", prepath.get(), - PromiseFlatCString(aKey).get())); - if (auto origin = mDictionaryCache.Lookup(prepath)) { - return origin.Data()->RemoveEntry(aKey); - } - return NS_ERROR_FAILURE; -} - -void DictionaryCache::Clear() { - // There may be active Prefetch()es running, note, and active - // fetches using dictionaries. They will stay alive until the - // channels using them go away. - mDictionaryCache.Clear(); -} - -// Remove a dictionary if it exists for the key given -// static -void DictionaryCache::RemoveDictionaryFor(const nsACString& aKey) { - RefPtr<DictionaryCache> cache = GetInstance(); - NS_DispatchToMainThread(NewRunnableMethod<const nsCString>( - "DictionaryCache::RemoveDictionaryFor", cache, - &DictionaryCache::RemoveDictionary, aKey)); -} - -// Remove a dictionary if it exists for the key given -void DictionaryCache::RemoveDictionary(const nsACString& aKey) { - DICTIONARY_LOG( - ("Removing dictionary for %80s", PromiseFlatCString(aKey).get())); - - nsCOMPtr<nsIURI> uri; - if (NS_FAILED(NS_NewURI(getter_AddRefs(uri), aKey))) { - return; - } - nsAutoCString prepath; - if (NS_SUCCEEDED(GetDictPath(uri, prepath))) { - if (auto origin = mDictionaryCache.Lookup(prepath)) { - origin.Data()->RemoveEntry(aKey); - } - } -} - -// Remove a dictionary if it exists for the key given. Mainthread only. -// Note: due to cookie samesite rules, we need to clean for all ports -// static -void DictionaryCache::RemoveDictionariesForOrigin(nsIURI* aURI) { - // There's no PrePathNoPort() - nsAutoCString temp; - aURI->GetScheme(temp); - nsCString origin(temp); - aURI->GetUserPass(temp); - origin += "://"_ns + temp; - aURI->GetHost(temp); - origin += temp; - - DICTIONARY_LOG(("Removing all dictionaries for origin of %s (%zu)", - PromiseFlatCString(origin).get(), origin.Length())); - RefPtr<DictionaryCache> cache = GetInstance(); - // We can't just use Remove here; the ClearSiteData service strips the - // port. In that case, We need to clear all that match the host with any - // port or none. - cache->mDictionaryCache.RemoveIf([&origin](auto& entry) { - // We need to drop any port from entry (and origin). Assuming they're - // the same up to the / or : in mOrigin, we want to limit the host - // there. We also know that entry is https://. - // Verify that: - // a) they're equal to that point - // b) that the next character of mOrigin is '/' or ':', which avoids - // issues like matching https://foo.bar to (mOrigin) - // https://foo.barsoom.com:666/ - DICTIONARY_LOG( - ("Possibly removing dictionary origin for %s (vs %s), %zu vs %zu", - entry.Data()->mOrigin.get(), PromiseFlatCString(origin).get(), - entry.Data()->mOrigin.Length(), origin.Length())); - if (entry.Data()->mOrigin.Length() > origin.Length() && - (entry.Data()->mOrigin[origin.Length()] == '/' || // no port - entry.Data()->mOrigin[origin.Length()] == ':')) { // port - // no strncmp() for nsCStrings... - nsDependentCSubstring host = - Substring(entry.Data()->mOrigin, 0, - origin.Length()); // not including '/' or ':' - DICTIONARY_LOG(("Compare %s vs %s", entry.Data()->mOrigin.get(), - PromiseFlatCString(host).get())); - if (origin.Equals(host)) { - DICTIONARY_LOG( - ("RemoveDictionaries: Removing dictionary origin %p for %s", - entry.Data().get(), entry.Data()->mOrigin.get())); - entry.Data()->Clear(); - return true; - } - } - return false; - }); -} - -// Remove a dictionary if it exists for the key given. Mainthread only -// static -void DictionaryCache::RemoveAllDictionaries() { - RefPtr<DictionaryCache> cache = GetInstance(); - - DICTIONARY_LOG(("Removing all dictionaries")); - for (auto& origin : cache->mDictionaryCache) { - origin.GetData()->Clear(); - } - cache->mDictionaryCache.Clear(); -} - -// Return an entry via a callback (async). -// If we don't have the origin in-memory, ask the cache for the origin, and -// when we get it, parse the metadata to build a DictionaryOrigin. -// Once we have a DictionaryOrigin (in-memory or parsed), scan it for matches. -// If it's not in the cache, return nullptr via callback. -void DictionaryCache::GetDictionaryFor( - nsIURI* aURI, ExtContentPolicyType aType, bool& aAsync, - nsHttpChannel* aChan, void (*aSuspend)(nsHttpChannel*), - const std::function<nsresult(bool, DictionaryCacheEntry*)>& aCallback) { - aAsync = false; - // Note: IETF 2.2.3 Multiple Matching Directories - // We need to return match-dest matches first - // If no match-dest, then the longest match - nsCString prepath; - if (NS_FAILED(GetDictPath(aURI, prepath))) { - (aCallback)(false, nullptr); - return; - } - // Match immediately if we've already created the origin and read any - // metadata - if (auto existing = mDictionaryCache.Lookup(prepath)) { - if (existing.Data()->mWaitingCacheRead.IsEmpty()) { - // Find the longest match - nsCString path; - RefPtr<DictionaryCacheEntry> result; - - aURI->GetPathQueryRef(path); - DICTIONARY_LOG(("GetDictionaryFor(%s %s)", prepath.get(), path.get())); - - result = existing.Data()->Match(path, aType); - (aCallback)(false, result); - } else { - DICTIONARY_LOG( - ("GetDictionaryFor(%s): Waiting for metadata read to match", - prepath.get())); - // Wait for the metadata read to complete - RefPtr<DictionaryOriginReader> reader = new DictionaryOriginReader(); - // Must do this before calling start, which can run the callbacks and call - // Resume - aAsync = true; - aSuspend(aChan); - reader->Start(existing.Data(), prepath, aURI, aType, this, aCallback); - } - return; - } - // We don't have an entry at all. We need to check if there's an entry - // on disk for <origin>, unless we know we have all entries in the memory - // cache. - - // Handle unknown origins by checking the disk cache - if (!sCacheStorage) { - (aCallback)(false, nullptr); // in case we have no disk storage - return; - } - - // Sync check to see if the entry exists - bool exists; - nsCOMPtr<nsIURI> prepathURI; - - if (NS_SUCCEEDED(NS_MutateURI(new net::nsStandardURL::Mutator()) - .SetSpec(prepath) - .Finalize(prepathURI)) && - NS_SUCCEEDED( - sCacheStorage->Exists(prepathURI, META_DICTIONARY_PREFIX, &exists)) && - exists) { - // To keep track of the callback, we need a new object to get the - // OnCacheEntryAvailable can resolve the callback. - DICTIONARY_LOG(("Reading %s for dictionary entries", prepath.get())); - RefPtr<DictionaryOrigin> origin = new DictionaryOrigin(prepath, nullptr); - // Add the origin to the list; we'll immediately start a reader which - // will set mWaitingCacheRead, so future GetDictionaryFor() calls - // will wait for the metadata to be read before doing Match() - mDictionaryCache.InsertOrUpdate(prepath, origin); - - RefPtr<DictionaryOriginReader> reader = new DictionaryOriginReader(); - // After Start(), if we drop this ref reader will kill itself on - // completion; it holds a self-ref - reader->Start(origin, prepath, aURI, aType, this, aCallback); - aAsync = true; - return; - } - // No dictionaries for origin - (aCallback)(false, nullptr); -} - -//----------------------------------------------------------------------------- -// DictionaryOrigin -//----------------------------------------------------------------------------- -NS_IMPL_ISUPPORTS(DictionaryOrigin, nsICacheEntryMetaDataVisitor) - -nsresult DictionaryOrigin::Write(DictionaryCacheEntry* aDictEntry) { - DICTIONARY_LOG(("DictionaryOrigin::Write %s %p", mOrigin.get(), aDictEntry)); - if (mEntry) { - return aDictEntry->Write(mEntry); - } - // Write it once DictionaryOriginReader creates the entry - mDeferredWrites = true; - return NS_OK; -} - -void DictionaryOrigin::SetCacheEntry(nsICacheEntry* aEntry) { - mEntry = aEntry; - if (mDeferredWrites) { - for (auto& entry : mEntries) { - if (NS_FAILED(Write(entry))) { - RemoveEntry(entry); - } - } - } - mDeferredWrites = false; - // Handle removes that were pending - for (auto& remove : mPendingRemove) { - DICTIONARY_LOG(("Pending RemoveEntry for %s", remove->mURI.get())); - remove->RemoveEntry(mEntry); - } - mPendingRemove.Clear(); -} - -already_AddRefed<DictionaryCacheEntry> DictionaryOrigin::AddEntry( - DictionaryCacheEntry* aDictEntry, bool aNewEntry) { - // Remove any entry for the same item - for (size_t i = 0; i < mEntries.Length(); i++) { - if (mEntries[i]->GetURI().Equals(aDictEntry->GetURI())) { - DictionaryCacheEntry* oldEntry = mEntries[i]; - if (aNewEntry) { - // We're overwriting an existing entry, perhaps with a new hash. It - // might be different, of course. - // Until we receive and save the new data, we should use the old data. - - // We need to pause this channel, regardless of how it's encoded, - // until the entry we're replacing has either no users, or has data - // read in from the cache. Then we can un-Suspend and start - // replacing the data in the cache itself. If there are no current - // users, and we start replacing the data, we need to remove the - // old entry so no one tries to use the old data/hash for a new - // request. - - // Note that when we start replacing data in the cache we need to - // also remove it from the origin's entry in the cache, in case we - // exit or crash before we finish replacing the entry and updating - // the origin's entry with the new hash. - - // Once we've replaced the entry (which will be after we have - // hash), new requests will use the new data/hash. I.e. we'll - // still allow new requests to use the old cache data/hash until - // the swap occurs. Once the swap happens, the channels using the - // old data/hash will still have an mDictDecoding reference to the - // DictionaryCacheEntry for the old data/hash. - - // XXX possible edge case: if a second request to replace the - // entry appears. Is this possible, or would the second request - // for the same URI get subsumed into the older one still in - // process? I'm guessing it doesn't, so we may need to deal with this - - DICTIONARY_LOG(( - "Replacing dictionary %p for %s: new will be %p", mEntries[i].get(), - PromiseFlatCString(oldEntry->GetURI()).get(), oldEntry)); - // May be overkill to check HasHash here - if (mEntries[i]->IsReading() && !aDictEntry->HasHash()) { - DICTIONARY_LOG(("Old entry is reading data")); - // If the old entry doesn't already have the data from the - // dictionary, we'll need to Suspend this channel, and do a - // replace later. Remember this new entry so when the current - // entry has it's data in memory we can un-Suspend the new - // channel/entry. When the new entry finishes saving, it will - // use the mReplacement link to come back and insert itself - // into mEntries and drop the old entry. Use an origin link - // for that since the old entry could in theory get purged and - // removed from the origin before we finish. - mEntries[i]->SetReplacement(aDictEntry, this); - // SetReplacement will also set aDictEntry->mShouldSuspend - return do_AddRef(aDictEntry); - } else { - DICTIONARY_LOG(("Removing old entry, no users or already read data")); - // We can just replace, there are no users active for the old data. - // This stops new requests from trying to use the old data we're in - // the process of replacing Remove the entry from the Origin and - // Write(). - mEntries[i]->RemoveEntry(mEntry); - mEntries.RemoveElementAt(i); - } - } else { - // We're updating an existing entry (on a 304 Not Modified). Assume - // the values may have changed (though likely they haven't). Check Spec - // XXX - DICTIONARY_LOG( - ("Updating dictionary for %s (%p)", mOrigin.get(), oldEntry)); - oldEntry->CopyFrom(aDictEntry); - // write the entry back if something changed - // XXX Check if something changed - oldEntry->Write(mEntry); - - // We don't need to reference the entry - return nullptr; - } - break; - } - } - - DICTIONARY_LOG(("New dictionary %sfor %s: %p", - aDictEntry->HasHash() ? "" : "(pending) ", mOrigin.get(), - aDictEntry)); - if (aDictEntry->HasHash()) { - mEntries.AppendElement(aDictEntry); - } else { - // Still need to receive the data. When we have the hash, move to - // mEntries (and Write) using entry->mOrigin - mPendingEntries.AppendElement(aDictEntry); - aDictEntry->SetReplacement(nullptr, this); - } - - // DictionaryCache/caller is responsible for ensure this gets written if - // needed - return do_AddRef(aDictEntry); -} - -nsresult DictionaryOrigin::RemoveEntry(const nsACString& aKey) { - DICTIONARY_LOG( - ("DictionaryOrigin::RemoveEntry for %s", PromiseFlatCString(aKey).get())); - for (const auto& dict : mEntries) { - DICTIONARY_LOG( - (" Comparing to %s", PromiseFlatCString(dict->GetURI()).get())); - if (dict->GetURI().Equals(aKey)) { - // Ensure it doesn't disappear on us - RefPtr<DictionaryCacheEntry> hold(dict); - DICTIONARY_LOG(("Removing %p", dict.get())); - mEntries.RemoveElement(dict); - if (mEntry) { - hold->RemoveEntry(mEntry); - } else { - // We don't have the cache entry yet. Defer the removal from - // the entry until we do - mPendingRemove.AppendElement(hold); - } - return NS_OK; - } - } - DICTIONARY_LOG(("DictionaryOrigin::RemoveEntry (pending) for %s", - PromiseFlatCString(aKey).get())); - for (const auto& dict : mPendingEntries) { - DICTIONARY_LOG( - (" Comparing to %s", PromiseFlatCString(dict->GetURI()).get())); - if (dict->GetURI().Equals(aKey)) { - // Ensure it doesn't disappear on us - RefPtr<DictionaryCacheEntry> hold(dict); - DICTIONARY_LOG(("Removing %p", dict.get())); - mPendingEntries.RemoveElement(dict); - hold->RemoveEntry(mEntry); - return NS_OK; - } - } - return NS_ERROR_FAILURE; -} - -void DictionaryOrigin::FinishAddEntry(DictionaryCacheEntry* aEntry) { - // if aDictEntry is in mPendingEntries, move to mEntries - if (mPendingEntries.RemoveElement(aEntry)) { - // We need to give priority to elements fetched most recently if they - // have an equivalent match length (and dest) - mEntries.InsertElementAt(0, aEntry); - } - DICTIONARY_LOG(("FinishAddEntry(%s)", aEntry->mURI.get())); - if (MOZ_UNLIKELY(MOZ_LOG_TEST(gDictionaryLog, mozilla::LogLevel::Debug))) { - DumpEntries(); - } -} - -void DictionaryOrigin::RemoveEntry(DictionaryCacheEntry* aEntry) { - DICTIONARY_LOG(("RemoveEntry(%s)", aEntry->mURI.get())); - if (!mEntries.RemoveElement(aEntry)) { - mPendingEntries.RemoveElement(aEntry); - } - if (MOZ_UNLIKELY(MOZ_LOG_TEST(gDictionaryLog, mozilla::LogLevel::Debug))) { - DumpEntries(); - } -} - -void DictionaryOrigin::DumpEntries() { - DICTIONARY_LOG(("*** Origin %s ***", mOrigin.get())); - for (const auto& dict : mEntries) { - DICTIONARY_LOG( - ("* %s: pattern %s, id %s, match-dest[0]: %s, hash: %s, expiration: " - "%u", - dict->mURI.get(), dict->mPattern.get(), dict->mId.get(), - dict->mMatchDest.IsEmpty() - ? "" - : dom::GetEnumString(dict->mMatchDest[0]).get(), - dict->mHash.get(), dict->mExpiration)); - } - DICTIONARY_LOG(("*** Pending ***")); - for (const auto& dict : mPendingEntries) { - DICTIONARY_LOG( - ("* %s: pattern %s, id %s, match-dest[0]: %s, hash: %s, expiration: " - "%u", - dict->mURI.get(), dict->mPattern.get(), dict->mId.get(), - dict->mMatchDest.IsEmpty() - ? "" - : dom::GetEnumString(dict->mMatchDest[0]).get(), - dict->mHash.get(), dict->mExpiration)); - } -} - -void DictionaryOrigin::Clear() { - mEntries.Clear(); - mPendingEntries.Clear(); - // We may be under a lock; doom this asynchronously - NS_DispatchBackgroundTask(NS_NewRunnableFunction( - "DictionaryOrigin::Clear", - [entry = mEntry]() { entry->AsyncDoom(nullptr); })); -} - -// caller will throw this into a RefPtr -DictionaryCacheEntry* DictionaryOrigin::Match(const nsACString& aPath, - ExtContentPolicyType aType) { - uint32_t longest = 0; - DictionaryCacheEntry* result = nullptr; - uint32_t now = mozilla::net::NowInSeconds(); - - for (const auto& dict : mEntries) { - if (dict->Match(aPath, aType, now, longest)) { - result = dict; - } - } - // XXX if we want to LRU the origins so we can push them out of memory based - // on LRU, do something like this: - /* - if (result) { - removeFrom(dictionarycase->mDictionaryCacheLRU); - dictionarycase->mDictionaryCacheLRU.insertFront(this); - } - */ - return result; -} - -//----------------------------------------------------------------------------- -// DictionaryOrigin::nsICacheEntryMetaDataVisitor -//----------------------------------------------------------------------------- -nsresult DictionaryOrigin::OnMetaDataElement(const char* asciiKey, - const char* asciiValue) { - DICTIONARY_LOG(("DictionaryOrigin::OnMetaDataElement %s %s", - asciiKey ? asciiKey : "", asciiValue)); - - // If we already have an entry for this key (pending or in the list), - // don't override it - for (auto& entry : mEntries) { - if (entry->GetURI().Equals(asciiKey)) { - return NS_OK; - } - } - for (auto& entry : mPendingEntries) { - if (entry->GetURI().Equals(asciiKey)) { - return NS_OK; - } - } - RefPtr<DictionaryCacheEntry> entry = new DictionaryCacheEntry(asciiKey); - if (entry->ParseMetadata(asciiValue)) { - mEntries.AppendElement(entry); - } - return NS_OK; -} - -// Overall structure: -// Dictionary: -// DictionaryCache: -// OriginHashmap: -// LinkedList: DictionaryCacheEntry -// Data from cache (sometimes) -// -// Each origin is in the cache as a dictionary-origin entry. In that -// entry's metadata, we have an LRU-sorted list of dictionary entries to be able -// to build a DictionaryCacheEntry. -// When we offer a dictionary on a load, we'll start prefetching the data into -// the DictionaryCacheEntry for the item in the cache. When the response comes -// in, we'll either use it to decompress, or indicate we no longer care about -// the data. If no one cares about the data, we'll purge it from memory. -// Hold refs to the data in requests. When the only ref is in the -// DictionaryCacheEntry, purge the data. This could also be done via the -// InUse counter -// -// XXX be careful about thrashing the cache loading and purging, esp with RCWN. -// Note that this makes RCWN somewhat superfluous for loads that have a -// dictionary. -// XXX Perhaps allow a little dwell time in ram if not too large? - -// When a load comes in, we need to block decompressing it on having the data -// from the cache if it's dcb or dcz. -// XXX If the cache fails for some reason, we probably should consider -// re-fetching the data without Dictionary-Available. - -} // namespace net -} // namespace mozilla diff --git a/netwerk/cache2/Dictionary.h b/netwerk/cache2/Dictionary.h @@ -1,368 +0,0 @@ -/* vim: set ts=2 sts=2 et sw=2: */ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - -#ifndef mozilla_net_Dictionary_h -#define mozilla_net_Dictionary_h - -#include "nsCOMPtr.h" -#include "nsICacheEntry.h" -#include "nsICacheEntryOpenCallback.h" -#include "nsICacheStorageService.h" -#include "nsICacheStorageVisitor.h" -#include "nsICryptoHash.h" -#include "nsIInterfaceRequestor.h" -#include "nsIObserver.h" -#include "nsIStreamListener.h" -#include "mozilla/RefPtr.h" -#include "mozilla/Vector.h" -#include "nsString.h" -#include "nsTArray.h" -#include "mozilla/dom/RequestBinding.h" -#include "mozilla/TimeStamp.h" -#include "nsTHashMap.h" -#include "nsHashKeys.h" - -class nsICacheStorage; -class nsIIOService; -class nsILoadContextInfo; - -// Version of metadata entries we expect -static const uint32_t METADATA_DICTIONARY_VERSION = 1; -#define META_DICTIONARY_PREFIX "dict:"_ns - -namespace mozilla { -namespace net { - -class nsHttpChannel; -class DictionaryOrigin; - -// Outstanding requests that offer this dictionary will hold a reference to it. -// If it's replaced (or removed) during the request, we would a) read the data -// into memory* b) unlink this from the origin in the memory cache. -// -// * or we wait for read-into-memory to finish, if we start reading entries -// when we send the request. -// -// When creating an entry from incoming data, we'll create it with no hash -// initially until the full data has arrived, then update the Hash. -class DictionaryCacheEntry final : public nsICacheEntryOpenCallback, - public nsIStreamListener { - friend class DictionaryOrigin; - - private: - ~DictionaryCacheEntry(); - - public: - NS_DECL_THREADSAFE_ISUPPORTS - NS_DECL_NSICACHEENTRYOPENCALLBACK - NS_DECL_NSIREQUESTOBSERVER - NS_DECL_NSISTREAMLISTENER - - explicit DictionaryCacheEntry(const char* aKey); - DictionaryCacheEntry(const nsACString& aKey, const nsACString& aPattern, - nsTArray<nsCString>& aMatchDest, const nsACString& aId, - uint32_t aExpiration = 0, - const Maybe<nsCString>& aHash = Nothing()); - - static void ConvertMatchDestToEnumArray( - const nsTArray<nsCString>& aMatchDest, - nsTArray<dom::RequestDestination>& aMatchEnums); - - // returns true if the pattern for the dictionary matches the path given - bool Match(const nsACString& aFilePath, ExtContentPolicyType aType, - uint32_t aNow, uint32_t& aLongest); - - // This will fail if the cache entry is no longer available. - // Start reading the cache entry into memory and call completion - // function when done - nsresult Prefetch(nsILoadContextInfo* aLoadContextInfo, bool& aShouldSuspend, - const std::function<void()>& aFunc); - - const nsACString& GetHash() const { return mHash; } - - bool HasHash() { - // Hard to statically check since we're called from lambdas in - // GetDictionaryFor - return !mHash.IsEmpty(); - } - - void SetHash(const nsACString& aHash) { - MOZ_ASSERT(NS_IsMainThread()); - mHash = aHash; - } - - void WriteOnHash(); - - void SetOrigin(DictionaryOrigin* aOrigin) { mOrigin = aOrigin; } - - const nsCString& GetId() const { return mId; } - - // keep track of requests that may need the data - void InUse(); - void UseCompleted(); - bool IsReading() const { return mUsers > 0 && !mWaitingPrefetch.IsEmpty(); } - - void SetReplacement(DictionaryCacheEntry* aEntry, DictionaryOrigin* aOrigin) { - mReplacement = aEntry; - mOrigin = aOrigin; - if (mReplacement) { - mReplacement->mShouldSuspend = true; - mReplacement->mBlocked = true; - } - } - - bool ShouldSuspendUntilCacheRead() const { return mShouldSuspend; } - - // aFunc is called when we have finished reading a dictionary from the - // cache, or we have no users waiting for cache data (cancelled, etc) - void CallbackOnCacheRead(const std::function<void()>& aFunc) { - // the reasons to call back are identical to Prefetch() - mWaitingPrefetch.AppendElement(aFunc); - } - - const nsACString& GetURI() const { return mURI; } - - const Vector<uint8_t>& GetDictionary() const { return mDictionaryData; } - - // Accumulate a hash while saving a file being received to the cache - void AccumulateHash(const char* aBuf, int32_t aCount); - void FinishHash(); - - // return a pointer to the data and length - uint8_t* DictionaryData(size_t* aLength) const { - *aLength = mDictionaryData.length(); - return (uint8_t*)mDictionaryData.begin(); - } - - bool DictionaryReady() const { return mDictionaryDataComplete; } - - size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const { - // XXX - return mallocSizeOf(this); - } - - static nsresult ReadCacheData(nsIInputStream* aInStream, void* aClosure, - const char* aFromSegment, uint32_t aToOffset, - uint32_t aCount, uint32_t* aWriteCount); - - void MakeMetadataEntry(nsCString& aNewValue); - - nsresult Write(nsICacheEntry* aEntry); - - nsresult RemoveEntry(nsICacheEntry* aCacheEntry); - - // Parse metadata from DictionaryOrigin - bool ParseMetadata(const char* aSrc); - - void CopyFrom(DictionaryCacheEntry* aOther) { - mURI = aOther->mURI; - mPattern = aOther->mPattern; - mId = aOther->mId; - // XXX match-dest - // XXX type - } - - void UnblockAddEntry(DictionaryOrigin* aOrigin); - - private: - // URI (without ref) for the dictionary - nsCString mURI; - // Expiration time, or 0 for none (default) - uint32_t mExpiration{0}; - - nsCString mPattern; - nsCString mId; // max length 1024 - nsTArray<dom::RequestDestination> mMatchDest; - // dcb and dcz use type 'raw'. We're allowed to ignore types we don't - // understand, so we can fail to record a dictionary with type != 'raw' - // nsCString mType; - - // SHA-256 hash value ready to put into a header - nsCString mHash; - uint32_t mUsers{0}; // active requests using this entry - // in-memory copy of the entry to use to decompress incoming data - Vector<uint8_t> mDictionaryData; - bool mDictionaryDataComplete{false}; - - // for accumulating SHA-256 hash values for dictionaries - nsCOMPtr<nsICryptoHash> mCrypto; - - // call these when prefetch is complete - nsTArray<std::function<void()>> mWaitingPrefetch; - - // If we need to Write() an entry before we know the hash, remember the origin - // here (creates a temporary cycle). Clear on StopRequest - RefPtr<DictionaryOrigin> mOrigin; - // Don't store origin for write if we've already received OnStopRequest - bool mStopReceived{false}; - - // If set, a new entry wants to replace us, and we have active decoding users. - // When we finish reading data into this entry for decoding, do 2 things: - // Remove our entry from origin->mEntries (so no future requests find this, - // and un-Suspend the new channel so it can start saving data into the cache. - RefPtr<DictionaryCacheEntry> mReplacement; - - // We should suspend until the ond entry has been read - bool mShouldSuspend{false}; - - // The cache entry has been removed - bool mNotCached{false}; - - // We're blocked from taking over for the old entry for now - bool mBlocked{false}; -}; - -// XXX Do we want to pre-read dictionaries into RAM at startup (lazily)? -// If we have all dictionaries stored in the cache, we don't need to do -// lookups to find if an origin has dictionaries or not, and we don't need to -// store empty entries (and LRU them). Downside would be if there are a LOT of -// origins with dictionaries, which may eventually happen, it would use more -// memory for rarely used origins. We could have a limit for dictionaries, and -// above that switch to partial caching and empty entries for origins without. - -class DictionaryCache; - -class DictionaryOriginReader final : public nsICacheEntryOpenCallback, - public nsIStreamListener { - NS_DECL_THREADSAFE_ISUPPORTS - NS_DECL_NSICACHEENTRYOPENCALLBACK - NS_DECL_NSIREQUESTOBSERVER - NS_DECL_NSISTREAMLISTENER - - DictionaryOriginReader() {} - - void Start( - DictionaryOrigin* aOrigin, nsACString& aKey, nsIURI* aURI, - ExtContentPolicyType aType, DictionaryCache* aCache, - const std::function<nsresult(bool, DictionaryCacheEntry*)>& aCallback); - void FinishMatch(); - - private: - ~DictionaryOriginReader() {} - - RefPtr<DictionaryOrigin> mOrigin; - nsCOMPtr<nsIURI> mURI; - ExtContentPolicyType mType; - std::function<nsresult(bool, DictionaryCacheEntry*)> mCallback; - RefPtr<DictionaryCache> mCache; -}; - -// using DictCacheList = AutoCleanLinkedList<RefPtr<DictionaryCacheEntry>>; -using DictCacheList = nsTArray<RefPtr<DictionaryCacheEntry>>; - -// XXX if we want to have a parallel LRU list for pushing origins out of memory, -// add this: public LinkedListElement<RefPtr<DictionaryOrigin>>, -class DictionaryOrigin : public nsICacheEntryMetaDataVisitor { - friend class DictionaryCache; - friend class DictionaryOriginReader; - - public: - NS_DECL_THREADSAFE_ISUPPORTS - NS_DECL_NSICACHEENTRYMETADATAVISITOR - - DictionaryOrigin(const nsACString& aOrigin, nsICacheEntry* aEntry) - : mOrigin(aOrigin), mEntry(aEntry) {} - - void SetCacheEntry(nsICacheEntry* aEntry); - nsresult Write(DictionaryCacheEntry* aDictEntry); - already_AddRefed<DictionaryCacheEntry> AddEntry( - DictionaryCacheEntry* aDictEntry, bool aNewEntry); - nsresult RemoveEntry(const nsACString& aKey); - void RemoveEntry(DictionaryCacheEntry* aEntry); - DictionaryCacheEntry* Match(const nsACString& path, - ExtContentPolicyType aType); - void FinishAddEntry(DictionaryCacheEntry* aEntry); - void DumpEntries(); - void Clear(); - - private: - virtual ~DictionaryOrigin() {} - - nsCString mOrigin; - nsCOMPtr<nsICacheEntry> mEntry; - DictCacheList mEntries; - // Dictionaries currently being received. Once these get a Hash, move to - // mEntries - DictCacheList mPendingEntries; - // Dictionaries removed from mEntries but waiting to be removed from the - // Cache metadata - DictCacheList mPendingRemove; - // Write out all entries once we have a cacheentry - bool mDeferredWrites{false}; - - // readers that are waiting for this origin's metadata to be read - nsTArray<RefPtr<DictionaryOriginReader>> mWaitingCacheRead; -}; - -// singleton class -class DictionaryCache final { - private: - DictionaryCache() { - nsresult rv = Init(); - Unused << rv; - MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); - } - ~DictionaryCache() {} - - friend class DictionaryOriginReader; - friend class DictionaryCacheEntry; - - public: - NS_INLINE_DECL_THREADSAFE_REFCOUNTING(DictionaryCache) - - static already_AddRefed<DictionaryCache> GetInstance(); - - nsresult Init(); - static void Shutdown(); - - nsresult AddEntry(nsIURI* aURI, const nsACString& aKey, - const nsACString& aPattern, nsTArray<nsCString>& aMatchDest, - const nsACString& aId, const Maybe<nsCString>& aHash, - bool aNewEntry, uint32_t aExpiration, - DictionaryCacheEntry** aDictEntry); - - already_AddRefed<DictionaryCacheEntry> AddEntry( - nsIURI* aURI, bool aNewEntry, DictionaryCacheEntry* aDictEntry); - - static void RemoveDictionaryFor(const nsACString& aKey); - - // Remove a dictionary if it exists for the key given - void RemoveDictionary(const nsACString& aKey); - - nsresult RemoveEntry(nsIURI* aURI, const nsACString& aKey); - - static void RemoveDictionariesForOrigin(nsIURI* aURI); - static void RemoveAllDictionaries(); - - // Clears all ports at host - void Clear(); - - // return an entry - void GetDictionaryFor( - nsIURI* aURI, ExtContentPolicyType aType, bool& aAsync, - nsHttpChannel* aChan, void (*aSuspend)(nsHttpChannel*), - const std::function<nsresult(bool, DictionaryCacheEntry*)>& aCallback); - - size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const { - // XXX - return mallocSizeOf(this); - } - - private: - static StaticRefPtr<nsICacheStorage> sCacheStorage; - - // In-memory cache of dictionary entries. HashMap, keyed by origin, of - // Linked list (LRU order) of valid dictionaries for the origin. - // We keep empty entries in there to avoid hitting the disk cache to find out - // if there are dictionaries for an origin. - // Static assertions fire if we try to have a LinkedList directly in an - // nsTHashMap - nsTHashMap<nsCStringHashKey, RefPtr<DictionaryOrigin>> mDictionaryCache; -}; - -} // namespace net -} // namespace mozilla - -#endif // mozilla_net_Dictionary_h diff --git a/netwerk/cache2/moz.build b/netwerk/cache2/moz.build @@ -25,10 +25,7 @@ EXPORTS += [ "CacheStorageService.h", ] -EXPORTS.mozilla.net += [ - "CachePurgeLock.h", - "Dictionary.h", -] +EXPORTS.mozilla.net += ["CachePurgeLock.h"] SOURCES += [ "CacheStorage.cpp", @@ -53,7 +50,6 @@ UNIFIED_SOURCES += [ "CacheLog.cpp", "CacheObserver.cpp", "CacheStorageService.cpp", - "Dictionary.cpp", ] if CONFIG["MOZ_WIDGET_TOOLKIT"] != "android": diff --git a/netwerk/cache2/nsICacheEntry.idl b/netwerk/cache2/nsICacheEntry.idl @@ -12,13 +12,6 @@ interface nsILoadContextInfo; interface nsIOutputStream; interface nsITransportSecurityInfo; -%{C++ -namespace mozilla::net { -class DictionaryCacheEntry; -} -%} -[ptr] native DictionaryCacheEntry(mozilla::net::DictionaryCacheEntry); - [scriptable, uuid(607c2a2c-0a48-40b9-a956-8cf2bb9857cf)] interface nsICacheEntry : nsISupports { @@ -60,11 +53,6 @@ interface nsICacheEntry : nsISupports readonly attribute boolean persistent; /** - * Get if the cache file is READY or REVALIDATING - */ - readonly attribute boolean readyOrRevalidating; - - /** * Get the number of times the cache entry has been opened. */ readonly attribute uint32_t fetchCount; @@ -321,13 +309,6 @@ interface nsICacheEntry : nsISupports * Get the nsILoadContextInfo of the cache entry */ readonly attribute nsILoadContextInfo loadContextInfo; - - /** - * This method gets called to indicate that this entry will be used - * as a Dictionary in the future, so we know to calculate a hash for it. - */ - [noscript] void SetDictionary(in DictionaryCacheEntry dict); - }; /** diff --git a/netwerk/cache2/nsICacheStorage.idl b/netwerk/cache2/nsICacheStorage.idl @@ -63,16 +63,11 @@ interface nsICacheStorage : nsISupports const uint32_t OPEN_INTERCEPTED = 1 << 6; /** - * Only open an existing entry which is complete (i.e. not being written) - */ - const uint32_t OPEN_COMPLETE_ONLY = 1 << 7; - - /** * Asynchronously opens a cache entry for the specified URI. * Result is fetched asynchronously via the callback. * * @param aURI - * The URI to search in cache or to open for writing. + * The URI to search in cache or to open for writting. * @param aIdExtension * Any string that will extend (distinguish) the entry. Two entries * with the same aURI but different aIdExtension will be comletely @@ -95,33 +90,6 @@ interface nsICacheStorage : nsISupports in nsICacheEntryOpenCallback aCallback); /** - * Asynchronously opens a cache entry for the specified URI. - * Result is fetched asynchronously via the callback. - * - * @param aURI - * The URI to search in cache or to open for writing. - * @param aIdExtension - * Any string that will extend (distinguish) the entry. Two entries - * with the same aURI but different aIdExtension will be comletely - * different entries. If you don't know what aIdExtension should be - * leave it empty. - * @param aFlags - * OPEN_NORMALLY - open cache entry normally for read and write - * OPEN_TRUNCATE - delete any existing entry before opening it - * OPEN_READONLY - don't create an entry if there is none - * OPEN_PRIORITY - give this request a priority over others - * OPEN_BYPASS_IF_BUSY - backward compatibility only, LOAD_BYPASS_LOCAL_CACHE_IF_BUSY - * CHECK_MULTITHREADED - onCacheEntryCheck may be called on any thread, consumer - * implementation is thread-safe - * @param aCallback - * The consumer that receives the result. - * IMPORTANT: The callback may be called sooner the method returns. - */ - void asyncOpenURIString(in ACString aURI, in ACString aIdExtension, - in uint32_t aFlags, - in nsICacheEntryOpenCallback aCallback); - - /** * Immediately opens a new and empty cache entry in the storage, any existing * entries are immediately doomed. This is similar to the recreate() method * on nsICacheEntry. diff --git a/netwerk/cache2/nsICacheStorageService.idl b/netwerk/cache2/nsICacheStorageService.idl @@ -10,7 +10,6 @@ interface nsIEventTarget; interface nsICacheStorageConsumptionObserver; interface nsICacheStorageVisitor; interface nsIPrincipal; -interface nsIURI; /** * Provides access to particual cache storages of the network URI cache. @@ -77,19 +76,6 @@ interface nsICacheStorageService : nsISupports void clear(); /** - * Evict any Dictionary cache entry by site - * - * @param aURI - * The URI to compare the dictionary entries with. - */ - void clearOriginDictionary(in nsIURI aURI); - - /** - * Evict all Dictionary cache entries - */ - void clearAllOriginDictionaries(); - - /** * Purge only data of disk backed entries. Metadata are left for * performance purposes. */ diff --git a/netwerk/docs/cache2/doc.rst b/netwerk/docs/cache2/doc.rst @@ -16,7 +16,7 @@ be clear directly from the `IDL files <https://searchfox.org/mozilla-central/sea - The cache API is **completely thread-safe** and **non-blocking**. - There is **no IPC support**. It's only accessible on the default chrome process. -- When there is no profile the HTTP cache works, but everything is +- When there is no profile the new HTTP cache works, but everything is stored only in memory not obeying any particular limits. .. _nsICacheStorageService: @@ -567,67 +567,3 @@ checking - the memory cache pool is controlled by ``browser.cache.memory.capacity``, the disk entries pool is already described above. The pool can be accessed and modified only on the cache background thread. - -Compression Dictionaries ---------------------------- - -Compression Dictionaries are specced by the IETF: -https://datatracker.ietf.org/doc/draft-ietf-httpbis-compression-dictionary/ - -See also: https://developer.chrome.com/blog/shared-dictionary-compression -and https://github.com/WICG/compression-dictionary-transport - -Gecko's design for compression dictionary support: - -We have special dict:<origin> entries with a listing of all dictionaries -for that origin, stored in metadata. - -When a fetch is made, we check if there's a dict:<origin> cache entry. If -not, we know there are no dictionaries. If there is an entry, and we -haven't previously loaded it into memory, we read and parse the metadata -and create in-memory structures for all dictionaries for <origin>. This -includes the data needed to match and decide if we want to send a -"Available-Dictionary:" header with the request. - -If a response to any request is received and it has a "Use-As-Dictionary" -header, we create a new dictionary entry in-memory and flag it for saving -to the dict:<origin> metadata. We set the stream up to decompress before -storing into the cache (see later options for alternatives in the future), -so that we can be ensured to be able to decompress later. We start -accumulating a hash value for the metadata entry. Once the resource is -fully received, we finalize the hash value and the metadata can be written. - -When a response is received with dcb or dcz compress (dictionaries), we use -the cache entry for the dictionary that we sent in Available-Dictionary to -decompress the resource. This means reading it into memory and then -allowing the decompression to occur. - -Several of these actions require a level of asynchronous action (waiting -for a cache entry to be loaded for use as a dictionary, or waiting for a -dict:<origin> entry to be loaded. This is generally handled via lambdas. - -The metadata and in-memory entries are kept in sync with the cache by -clearing entries out when cache entries are Doomed. This also interacts -with Clear Site Data and cookie clear headers (see IETF spec). - -Dictionary loading can also be triggered via <link rel="Compression -Dictionary" ...> and link headers. These will cause prefetches of the -dictionaries. - -Things to watch on landing: -- Cache hitrate -- dictionary utilization --- Add probes -- pageload metrics --- Would require OHTTP-based collection - -Future optimizations: -- Compressing dictionaries with zstd in the cache --- Trades CPU use and some latency decoding dictionary-encoded files for hitrate --- Perhaps only above some size -- Compressing dictionary-encoded files with zstd in the cache --- Trades CPU use for hitrate --- Perhaps only above some size -- Preemptively reading dict:<origin> entries into memory in the background at startup --- Up to some limit -- LRU-ing dict:<origin> entries and dropping old ones from memory diff --git a/netwerk/protocol/http/EarlyHintsService.cpp b/netwerk/protocol/http/EarlyHintsService.cpp @@ -105,13 +105,6 @@ void EarlyHintsService::EarlyHint( mOngoingEarlyHints, linkHeader, aBaseURI, principal, cookieJarSettings, aReferrerPolicy, aCSPHeader, loadInfo->GetBrowsingContextID(), aLoadingBrowsingContext, true); - } else if (linkHeader.mRel.LowerCaseEqualsLiteral( - "compression-dictionary")) { - mLinkType |= dom::LinkStyle::eCOMPRESSION_DICTIONARY; - EarlyHintPreloader::MaybeCreateAndInsertPreload( - mOngoingEarlyHints, linkHeader, aBaseURI, principal, - cookieJarSettings, aReferrerPolicy, aCSPHeader, - loadInfo->GetBrowsingContextID(), aLoadingBrowsingContext, true); } } } diff --git a/netwerk/protocol/http/HttpBaseChannel.cpp b/netwerk/protocol/http/HttpBaseChannel.cpp @@ -395,7 +395,7 @@ nsresult HttpBaseChannel::Init(nsIURI* aURI, uint32_t aCaps, } rv = gHttpHandler->AddStandardRequestHeaders( - &mRequestHead, aURI, isHTTPS, contentPolicyType, + &mRequestHead, isHTTPS, contentPolicyType, nsContentUtils::ShouldResistFingerprinting(this, RFPTarget::HttpUserAgent)); if (NS_FAILED(rv)) return rv; @@ -1472,23 +1472,6 @@ HttpBaseChannel::DoApplyContentConversions(nsIStreamListener* aNextListener, LOG(("HttpBaseChannel::DoApplyContentConversions [this=%p]\n", this)); -#ifdef DEBUG - { - nsAutoCString contentEncoding; - nsresult rv = - mResponseHead->GetHeader(nsHttp::Content_Encoding, contentEncoding); - if (NS_SUCCEEDED(rv) && !contentEncoding.IsEmpty()) { - nsAutoCString newEncoding; - char* cePtr = contentEncoding.BeginWriting(); - while (char* val = nsCRT::strtok(cePtr, HTTP_LWS ",", &cePtr)) { - if (strcmp(val, "dcb") == 0 || strcmp(val, "dcz") == 0) { - MOZ_ASSERT(LoadApplyConversion() && !LoadHasAppliedConversion()); - } - } - } - } -#endif - if (!LoadApplyConversion()) { LOG(("not applying conversion per ApplyConversion\n")); return NS_OK; @@ -1521,10 +1504,10 @@ HttpBaseChannel::DoApplyContentConversions(nsIStreamListener* aNextListener, char* cePtr = contentEncoding.BeginWriting(); uint32_t count = 0; - bool removeEncodings = false; while (char* val = nsCRT::strtok(cePtr, HTTP_LWS ",", &cePtr)) { if (++count > 16) { - // For compatibility with old code, we will just carry on without + // That's ridiculous. We only understand 2 different ones :) + // but for compatibility with old code, we will just carry on without // removing the encodings LOG(("Too many Content-Encodings. Ignoring remainder.\n")); break; @@ -1542,7 +1525,7 @@ HttpBaseChannel::DoApplyContentConversions(nsIStreamListener* aNextListener, return rv; } - LOG(("Adding converter for content-encoding '%s'", val)); + LOG(("converter removed '%s' content-encoding\n", val)); if (Telemetry::CanRecordPrereleaseData()) { int mode = 0; if (from.EqualsLiteral("gzip") || from.EqualsLiteral("x-gzip")) { @@ -1554,43 +1537,14 @@ HttpBaseChannel::DoApplyContentConversions(nsIStreamListener* aNextListener, mode = 3; } else if (from.EqualsLiteral("zstd")) { mode = 4; - } else if (from.EqualsLiteral("dcb")) { - mode = 5; - } else if (from.EqualsLiteral("dcz")) { - mode = 6; } glean::http::content_encoding.AccumulateSingleSample(mode); } - if (from.EqualsLiteral("dcb") || from.EqualsLiteral("dcz")) { - MOZ_ASSERT(XRE_IsParentProcess()); - removeEncodings = true; - } nextListener = converter; } else { - if (val) { - LOG(("Unknown content encoding '%s'\n", val)); - } - // we *should* return NS_ERROR_UNEXPECTED here, but that will break sites - // that use things like content-encoding: x-gzip, x-gzip (or any other - // weird encoding) + if (val) LOG(("Unknown content encoding '%s', ignoring\n", val)); } } - - // dcb and dcz encodings are removed when it's decompressed (always in - // the parent process). However, in theory you could have - // Content-Encoding: dcb,gzip - // in which case we could pass it down to the content process as - // Content-Encoding: gzip. We won't do that; we'll remove all compressors - // if we need to remove any. - // This double compression of course is silly, but supported by the spec. - if (removeEncodings) { - // if we have dcb or dcz, all content-encodings in the header should - // be removed as we're decompressing before the tee in the parent - // process - LOG(("Changing Content-Encoding from '%s' to ''", contentEncoding.get())); - // Can't use SetHeader; we need to overwrite the current value - rv = mResponseHead->SetHeaderOverride(nsHttp::Content_Encoding, ""_ns); - } *aNewNextListener = do_AddRef(nextListener).take(); return NS_OK; } diff --git a/netwerk/protocol/http/HttpChannelChild.h b/netwerk/protocol/http/HttpChannelChild.h @@ -84,15 +84,6 @@ class HttpChannelChild final : public PHttpChannelChild, // nsIChannel NS_IMETHOD GetSecurityInfo(nsITransportSecurityInfo** aSecurityInfo) override; NS_IMETHOD AsyncOpen(nsIStreamListener* aListener) override; - NS_IMETHOD GetDecompressDictionary( - DictionaryCacheEntry** aDictionary) override { - *aDictionary = nullptr; - return NS_OK; - } - NS_IMETHOD SetDecompressDictionary( - DictionaryCacheEntry* aDictionary) override { - return NS_OK; - } // HttpBaseChannel::nsIHttpChannel NS_IMETHOD SetRequestHeader(const nsACString& aHeader, diff --git a/netwerk/protocol/http/HttpChannelParent.cpp b/netwerk/protocol/http/HttpChannelParent.cpp @@ -651,7 +651,7 @@ bool HttpChannelParent::DoAsyncOpen( MOZ_ASSERT(!mBgParent); MOZ_ASSERT(mPromise.IsEmpty()); - // Wait for HttpBackgroundChannel to continue the async open procedure. + // Wait for HttpBackgrounChannel to continue the async open procedure. ++mAsyncOpenBarrier; RefPtr<HttpChannelParent> self = this; WaitForBgParent(mChannel->ChannelId()) diff --git a/netwerk/protocol/http/HttpLog.h b/netwerk/protocol/http/HttpLog.h @@ -41,7 +41,6 @@ void LogCallingScriptLocation(void* instance, const Maybe<nsCString>& aLogLocation); extern LazyLogModule gHttpLog; extern LazyLogModule gHttpIOLog; -extern LazyLogModule gDictionaryLog; } // namespace net } // namespace mozilla @@ -72,7 +71,4 @@ extern LazyLogModule gDictionaryLog; MOZ_LOG_TEST(mozilla::net::gHttpLog, mozilla::LogLevel::Verbose) #define LOG_ENABLED() LOG4_ENABLED() -#define LOG_DICTIONARIES(args) \ - MOZ_LOG(mozilla::net::gDictionaryLog, mozilla::LogLevel::Debug, args) - #endif // HttpLog_h__ diff --git a/netwerk/protocol/http/InterceptedHttpChannel.h b/netwerk/protocol/http/InterceptedHttpChannel.h @@ -319,16 +319,6 @@ class InterceptedHttpChannel final void DoNotifyListenerCleanup() override; void DoAsyncAbort(nsresult aStatus) override; - - NS_IMETHOD GetDecompressDictionary( - DictionaryCacheEntry** aDictionary) override { - *aDictionary = nullptr; - return NS_OK; - } - NS_IMETHOD SetDecompressDictionary( - DictionaryCacheEntry* aDictionary) override { - return NS_OK; - } }; } // namespace mozilla::net diff --git a/netwerk/protocol/http/NullHttpChannel.cpp b/netwerk/protocol/http/NullHttpChannel.cpp @@ -846,19 +846,6 @@ NullHttpChannel::GetRenderBlocking(bool* aRenderBlocking) { return NS_ERROR_NOT_IMPLEMENTED; } -NS_IMETHODIMP -NullHttpChannel::GetDecompressDictionary( - mozilla::net::DictionaryCacheEntry** aDictionary) { - *aDictionary = nullptr; - return NS_OK; -} - -NS_IMETHODIMP -NullHttpChannel::SetDecompressDictionary( - mozilla::net::DictionaryCacheEntry* aDictionary) { - return NS_OK; -} - #define IMPL_TIMING_ATTR(name) \ NS_IMETHODIMP \ NullHttpChannel::Get##name##Time(PRTime* _retval) { \ diff --git a/netwerk/protocol/http/ObliviousHttpChannel.cpp b/netwerk/protocol/http/ObliviousHttpChannel.cpp @@ -880,15 +880,4 @@ NS_IMETHODIMP ObliviousHttpChannel::GetDocumentCharacterSet( return NS_ERROR_NOT_IMPLEMENTED; } -NS_IMETHODIMP ObliviousHttpChannel::GetDecompressDictionary( - DictionaryCacheEntry** aDictionary) { - *aDictionary = nullptr; - return NS_OK; -} - -NS_IMETHODIMP ObliviousHttpChannel::SetDecompressDictionary( - DictionaryCacheEntry* aDictionary) { - return NS_OK; -} - } // namespace mozilla::net diff --git a/netwerk/protocol/http/PHttpChannelParams.h b/netwerk/protocol/http/PHttpChannelParams.h @@ -115,10 +115,6 @@ struct ParamTraits<mozilla::net::nsHttpHeaderArray::nsEntry> { break; case mozilla::net::nsHttpHeaderArray::eVarietyResponse: WriteParam(aWriter, (uint8_t)6); - break; - case mozilla::net::nsHttpHeaderArray::eVarietyResponseOverride: - WriteParam(aWriter, (uint8_t)7); - break; } } @@ -162,10 +158,6 @@ struct ParamTraits<mozilla::net::nsHttpHeaderArray::nsEntry> { case 6: aResult->variety = mozilla::net::nsHttpHeaderArray::eVarietyResponse; break; - case 7: - aResult->variety = - mozilla::net::nsHttpHeaderArray::eVarietyResponseOverride; - break; default: return false; } diff --git a/netwerk/protocol/http/TRRServiceChannel.h b/netwerk/protocol/http/TRRServiceChannel.h @@ -76,15 +76,6 @@ class TRRServiceChannel : public HttpBaseChannel, NS_IMETHOD SetNotificationCallbacks( nsIInterfaceRequestor* aCallbacks) override; - NS_IMETHOD GetDecompressDictionary( - DictionaryCacheEntry** aDictionary) override { - *aDictionary = nullptr; - return NS_OK; - } - NS_IMETHOD SetDecompressDictionary( - DictionaryCacheEntry* aDictionary) override { - return NS_OK; - } // nsISupportsPriority NS_IMETHOD SetPriority(int32_t value) override; // nsIClassOfService diff --git a/netwerk/protocol/http/nsHttpAtomList.h b/netwerk/protocol/http/nsHttpAtomList.h @@ -29,7 +29,6 @@ HTTP_ATOM(Alternate_Service_Used, "Alt-Used") HTTP_ATOM(Assoc_Req, "Assoc-Req") HTTP_ATOM(Authentication, "Authentication") HTTP_ATOM(Authorization, "Authorization") -HTTP_ATOM(Available_Dictionary, "Available-Dictionary") HTTP_ATOM(Cache_Control, "Cache-Control") HTTP_ATOM(Connection, "Connection") HTTP_ATOM(Content_Disposition, "Content-Disposition") @@ -49,7 +48,6 @@ HTTP_ATOM(Date, "Date") HTTP_ATOM(DAV, "DAV") HTTP_ATOM(Depth, "Depth") HTTP_ATOM(Destination, "Destination") -HTTP_ATOM(Dictionary_Id, "Dictionary-Id") HTTP_ATOM(DoNotTrack, "DNT") HTTP_ATOM(ETag, "Etag") HTTP_ATOM(Expect, "Expect") @@ -104,7 +102,6 @@ HTTP_ATOM(Transfer_Encoding, "Transfer-Encoding") HTTP_ATOM(URI, "URI") HTTP_ATOM(Upgrade, "Upgrade") HTTP_ATOM(User_Agent, "User-Agent") -HTTP_ATOM(Use_As_Dictionary, "Use-As-Dictionary") HTTP_ATOM(Vary, "Vary") HTTP_ATOM(Version, "Version") HTTP_ATOM(WWW_Authenticate, "WWW-Authenticate") diff --git a/netwerk/protocol/http/nsHttpChannel.cpp b/netwerk/protocol/http/nsHttpChannel.cpp @@ -113,7 +113,6 @@ #include "nsISocketProvider.h" #include "mozilla/extensions/StreamFilterParent.h" #include "mozilla/net/Predictor.h" -#include "mozilla/net/SFVService.h" #include "mozilla/MathAlgorithms.h" #include "mozilla/NullPrincipal.h" #include "CacheControlParser.h" @@ -504,10 +503,6 @@ nsHttpChannel::~nsHttpChannel() { if (gHttpHandler) { gHttpHandler->RemoveHttpChannel(mChannelId); } - - if (mDictDecompress && mUsingDictionary) { - mDictDecompress->UseCompleted(); - } } void nsHttpChannel::ReleaseMainThreadOnlyReferences() { @@ -543,9 +538,11 @@ void nsHttpChannel::ReleaseMainThreadOnlyReferences() { nsresult nsHttpChannel::Init(nsIURI* uri, uint32_t caps, nsProxyInfo* proxyInfo, uint32_t proxyResolveFlags, nsIURI* proxyURI, uint64_t channelId, nsILoadInfo* aLoadInfo) { - LOG1(("nsHttpChannel::Init [this=%p]\n", this)); nsresult rv = HttpBaseChannel::Init(uri, caps, proxyInfo, proxyResolveFlags, proxyURI, channelId, aLoadInfo); + if (NS_FAILED(rv)) return rv; + + LOG1(("nsHttpChannel::Init [this=%p]\n", this)); return rv; } @@ -630,71 +627,6 @@ bool nsHttpChannel::StorageAccessReloadedChannel() { nsresult nsHttpChannel::PrepareToConnect() { LOG(("nsHttpChannel::PrepareToConnect [this=%p]\n", this)); - // This may be async; the dictionary headers may need to fetch an origin - // dictionary cache entry from disk before adding the headers. We can - // continue with channel creation, and just block on this being done later - bool async = false; - AUTO_PROFILER_FLOW_MARKER("nsHttpHandler::AddAcceptAndDictionaryHeaders", - NETWORK, Flow::FromPointer(this)); - // AddAcceptAndDictionaryHeaders must call this->Suspend before kicking - // off the async operation that can result in calling the lambda (which - // will Resume), to avoid a race condition. - bool aAsync; - nsresult rv = gHttpHandler->AddAcceptAndDictionaryHeaders( - mURI, mLoadInfo->GetExternalContentPolicyType(), &mRequestHead, IsHTTPS(), - aAsync, this, nsHttpChannel::StaticSuspend, - [self = RefPtr(this)](bool aNeedsResume, DictionaryCacheEntry* aDict) { - self->mDictDecompress = aDict; - if (aNeedsResume) { - LOG_DICTIONARIES(("Resuming after getting Dictionary headers")); - self->Resume(); - } - if (self->mDictDecompress) { - LOG_DICTIONARIES( - ("Added dictionary header for %p, DirectoryCacheEntry %p", - self.get(), aDict)); - AUTO_PROFILER_FLOW_MARKER( - "nsHttpHandler::AddAcceptAndDictionaryHeaders Add " - "Available-Dictionary", - NETWORK, Flow::FromPointer(self)); - // mDictDecompress is set if we added Available-Dictionary - self->mDictDecompress->InUse(); - self->mUsingDictionary = true; - PROFILER_MARKER("Dictionary Prefetch", NETWORK, - MarkerTiming::IntervalStart(), FlowMarker, - Flow::FromPointer(self)); - // XXX if this fails, retry the connection (we assume that the - // DictionaryCacheEntry has been removed). Failure should be only in - // weird cases like no storage service. - return NS_SUCCEEDED(self->mDictDecompress->Prefetch( - GetLoadContextInfo(self), self->mShouldSuspendForDictionary, - [self]() { - // this is called when the prefetch is complete to - // un-Suspend the channel - MOZ_ASSERT(self->mDictDecompress->DictionaryReady()); - if (self->mSuspendedForDictionary) { - LOG( - ("nsHttpChannel::SetupChannelForTransaction [this=%p] " - "Resuming channel " - "suspended for Dictionary", - self.get())); - self->mSuspendedForDictionary = false; - self->Resume(); - } - PROFILER_MARKER("Dictionary Prefetch", NETWORK, - MarkerTiming::IntervalEnd(), FlowMarker, - Flow::FromPointer(self)); - })); - } - return true; - }); - if (NS_FAILED(rv)) return rv; - if (async) { - // we'll continue later if GetDictionaryFor is still reading - LOG_DICTIONARIES(("Suspending to get Dictionary headers")); - Suspend(); - } - // notify "http-on-modify-request-before-cookies" observers gHttpHandler->OnModifyRequestBeforeCookies(this); @@ -1398,7 +1330,7 @@ nsresult nsHttpChannel::Connect() { // Step 8.18 of HTTP-network-or-cache fetch // https://fetch.spec.whatwg.org/#http-network-or-cache-fetch nsAutoCString rangeVal; - if (NS_SUCCEEDED(mRequestHead.GetHeader(nsHttp::Range, rangeVal))) { + if (NS_SUCCEEDED(GetRequestHeader("Range"_ns, rangeVal))) { SetRequestHeader("Accept-Encoding"_ns, "identity"_ns, true); } @@ -1630,10 +1562,7 @@ nsresult nsHttpChannel::DoConnectActual( return rv; } - return CallOrWaitForResume( - [trans = RefPtr(aTransWithStickyConn)](auto* self) { - return self->DispatchTransaction(trans); - }); + return DispatchTransaction(aTransWithStickyConn); } nsresult nsHttpChannel::DispatchTransaction( @@ -3480,7 +3409,6 @@ void nsHttpChannel::UpdateCacheDisposition(bool aSuccessfulReval, ReportHttpResponseVersion(mResponseHead->Version()); } -// Only used for redirects (3XX responses) nsresult nsHttpChannel::ContinueProcessResponse4(nsresult rv) { bool doNotRender = DoNotRender3xxBody(rv); @@ -3566,71 +3494,6 @@ nsresult nsHttpChannel::ContinueProcessNormal(nsresult rv) { if (NS_FAILED(rv)) CloseCacheEntry(true); } - // We may need to install the cache listener before CallonStartRequest, - // since InstallCacheListener can modify the Content-Encoding to remove - // dcb/dcz (and perhaps others), and CallOnStartRequest() sends the - // Content-Encoding to the content process. If this doesn't install a - // listener (because this isn't a dictionary or dictionary-compressed), - // call it after CallOnStartRequest so that we save the compressed data - // in the cache, and run the decompressor in the content process. - bool isDictionaryCompressed = false; - nsAutoCString contentEncoding; - Unused << mResponseHead->GetHeader(nsHttp::Content_Encoding, contentEncoding); - // Note: doesn't handle dcb, gzip or gzip, dcb (etc) - if (contentEncoding.Equals("dcb") || contentEncoding.Equals("dcz")) { - isDictionaryCompressed = true; - } - - if (mCacheEntry && !LoadCacheEntryIsReadOnly()) { - // XXX We may want to consider recompressing any dcb/dcz files to save space - // and improve hitrate. Downside is CPU use, complexity and perhaps delay, - // maybe. - nsAutoCString dictionary; - if (StaticPrefs::network_http_dictionaries_enable() && IsHTTPS()) { - Unused << mResponseHead->GetHeader(nsHttp::Use_As_Dictionary, dictionary); - if (!dictionary.IsEmpty()) { - if (!ParseDictionary(mCacheEntry, mResponseHead.get(), true)) { - LOG_DICTIONARIES(("Failed to parse use-as-dictionary")); - } else { - MOZ_ASSERT(mDictSaving); - - // We need to record the hash as we save it - mCacheEntry->SetDictionary(mDictSaving); - } - } - } - - if (isDictionaryCompressed || mDictSaving) { - LOG(("Decompressing before saving into cache [channel=%p]", this)); - rv = DoInstallCacheListener(isDictionaryCompressed, &dictionary, 0); - } - } else { - if (isDictionaryCompressed) { - // We still need to decompress in the parent if it's dcb or dcz even if - // not saving to the cache - LOG_DICTIONARIES( - ("Removing Content-Encoding %s for %p", contentEncoding.get(), this)); - nsCOMPtr<nsIStreamListener> listener; - // otherwise we won't convert in the parent process - // XXX may be redundant, but safe - SetApplyConversion(true); - rv = DoApplyContentConversions(mListener, getter_AddRefs(listener), - nullptr); - if (NS_FAILED(rv)) { - return rv; - } - if (listener) { - LOG_DICTIONARIES(("Installed nsHTTPCompressConv %p without cache tee", - listener.get())); - mListener = listener; - mCompressListener = listener; - StoreHasAppliedConversion(true); - } else { - LOG_DICTIONARIES(("Didn't install decompressor without cache tee")); - } - } - } // else we'll call InstallCacheListener after CallOnStartRequest - // Check that the server sent us what we were asking for if (LoadResuming()) { // Create an entity id from the response @@ -3657,30 +3520,15 @@ nsresult nsHttpChannel::ContinueProcessNormal(nsresult rv) { } } - // If we don't have the entire dictionary yet, Suspend() the channel - // until the dictionary is in-memory. - if (mDictDecompress && mUsingDictionary && mShouldSuspendForDictionary && - !mDictDecompress->DictionaryReady()) { - LOG( - ("nsHttpChannel::ContinueProcessNormal [this=%p] Suspending the " - "transaction, waiting for dictionary", - this)); - Suspend(); - mSuspendedForDictionary = true; - } - rv = CallOnStartRequest(); if (NS_FAILED(rv)) return rv; - // If we didn't install cache listeners to decompress above - // install the cache listener now (so they'll get compressed data) - if (!isDictionaryCompressed && !mDictSaving) { - // install cache listener if we still have a cache entry open - if (mCacheEntry && !LoadCacheEntryIsReadOnly()) { - rv = InstallCacheListener(); - if (NS_FAILED(rv)) return rv; - } + // install cache listener if we still have a cache entry open + if (mCacheEntry && !LoadCacheEntryIsReadOnly()) { + rv = InstallCacheListener(); + if (NS_FAILED(rv)) return rv; } + return NS_OK; } @@ -4222,39 +4070,6 @@ bool nsHttpChannel::ResponseWouldVary(nsICacheEntry* entry) { return false; } -// Remove an entry from Vary header, if it exists -void RemoveFromVary(nsHttpResponseHead* aResponseHead, - const nsACString& aRemove) { - nsAutoCString buf; - Unused << aResponseHead->GetHeader(nsHttp::Vary, buf); - - bool remove = false; - for (const nsACString& token : - nsCCharSeparatedTokenizer(buf, NS_HTTP_HEADER_SEP).ToRange()) { - if (token.Equals(aRemove)) { - // Need to build a new string without aRemove - remove = true; - break; - } - } - if (!remove) { - return; - } - nsAutoCString newValue; - for (const nsACString& token : - nsCCharSeparatedTokenizer(buf, NS_HTTP_HEADER_SEP).ToRange()) { - if (!token.Equals(aRemove)) { - if (!newValue.IsEmpty()) { - newValue += ","_ns; - } - newValue += token; - } - } - LOG(("RemoveFromVary %s removed, new value -> %s", - PromiseFlatCString(aRemove).get(), newValue.get())); - Unused << aResponseHead->SetHeaderOverride(nsHttp::Vary, newValue); -} - // We need to have an implementation of this function just so that we can keep // all references to mCallOnResume of type nsHttpChannel: it's not OK in C++ // to set a member function ptr to a base class function. @@ -4579,7 +4394,7 @@ nsresult nsHttpChannel::ProcessNotModified( rv = UpdateExpirationTime(); if (NS_FAILED(rv)) return rv; - rv = AddCacheEntryHeaders(mCacheEntry, false); + rv = AddCacheEntryHeaders(mCacheEntry); if (NS_FAILED(rv)) return rv; // notify observers interested in looking at a reponse that has been @@ -5947,7 +5762,7 @@ nsresult nsHttpChannel::InitCacheEntry() { // mark this weakly framed until a response body is seen mCacheEntry->SetMetaDataElement("strongly-framed", "0"); - rv = AddCacheEntryHeaders(mCacheEntry, false); + rv = AddCacheEntryHeaders(mCacheEntry); if (NS_FAILED(rv)) return rv; StoreInitedCacheEntry(true); @@ -5983,8 +5798,7 @@ void nsHttpChannel::UpdateInhibitPersistentCachingFlag() { nsresult DoAddCacheEntryHeaders(nsHttpChannel* self, nsICacheEntry* entry, nsHttpRequestHead* requestHead, nsHttpResponseHead* responseHead, - nsITransportSecurityInfo* securityInfo, - bool aModified) { + nsITransportSecurityInfo* securityInfo) { nsresult rv; LOG(("nsHttpChannel::AddCacheEntryHeaders [this=%p] begin", self)); @@ -5993,11 +5807,6 @@ nsresult DoAddCacheEntryHeaders(nsHttpChannel* self, nsICacheEntry* entry, entry->SetSecurityInfo(securityInfo); } - // Note: if aModified == false, then we're processing a 304 Not Modified, - // and we *shouldn't* have any change to the Dictionary (and won't be - // replacing the DictionaryEntry, though if the Match/Match-dest/Id/Type - // changed, we may need to rewrite it. XXX? - // Store the HTTP request method with the cache entry so we can distinguish // for example GET and HEAD responses. nsAutoCString method; @@ -6009,20 +5818,6 @@ nsresult DoAddCacheEntryHeaders(nsHttpChannel* self, nsICacheEntry* entry, rv = StoreAuthorizationMetaData(entry, requestHead); if (NS_FAILED(rv)) return rv; - rv = self->UpdateCacheEntryHeaders(entry, nullptr); - return rv; -} - -nsresult nsHttpChannel::AddCacheEntryHeaders(nsICacheEntry* entry, - bool aModified) { - return DoAddCacheEntryHeaders(this, entry, &mRequestHead, mResponseHead.get(), - mSecurityInfo, aModified); -} - -nsresult nsHttpChannel::UpdateCacheEntryHeaders(nsICacheEntry* entry, - const nsHttpAtom* aAtom) { - nsresult rv = NS_OK; - // Iterate over the headers listed in the Vary response header, and // store the value of the corresponding request header so we can verify // that it has not varied when we try to re-use the cached response at @@ -6036,127 +5831,74 @@ nsresult nsHttpChannel::UpdateCacheEntryHeaders(nsICacheEntry* entry, // the check. { nsAutoCString buf, metaKey; - Unused << mResponseHead->GetHeader(nsHttp::Vary, buf); + Unused << responseHead->GetHeader(nsHttp::Vary, buf); constexpr auto prefix = "request-"_ns; for (const nsACString& token : nsCCharSeparatedTokenizer(buf, NS_HTTP_HEADER_SEP).ToRange()) { LOG( - ("nsHttpChannel::ProcessVaryCacheEntryHeaders [this=%p] " + ("nsHttpChannel::AddCacheEntryHeaders [this=%p] " "processing %s", - this, nsPromiseFlatCString(token).get())); + self, nsPromiseFlatCString(token).get())); if (!token.EqualsLiteral("*")) { nsHttpAtom atom = nsHttp::ResolveAtom(token); - if (!aAtom || atom == *aAtom) { - nsAutoCString val; - nsAutoCString hash; - if (NS_SUCCEEDED(mRequestHead.GetHeader(atom, val))) { - // If cookie-header, store a hash of the value - if (atom == nsHttp::Cookie) { - LOG( - ("nsHttpChannel::ProcessVaryCacheEntryHeaders [this=%p] " - "cookie-value %s", - this, val.get())); - rv = Hash(val.get(), hash); - // If hash failed, store a string not very likely - // to be the result of subsequent hashes - if (NS_FAILED(rv)) { - val = "<hash failed>"_ns; - } else { - val = hash; - } - - LOG((" hashed to %s\n", val.get())); + nsAutoCString val; + nsAutoCString hash; + if (NS_SUCCEEDED(requestHead->GetHeader(atom, val))) { + // If cookie-header, store a hash of the value + if (atom == nsHttp::Cookie) { + LOG( + ("nsHttpChannel::AddCacheEntryHeaders [this=%p] " + "cookie-value %s", + self, val.get())); + rv = Hash(val.get(), hash); + // If hash failed, store a string not very likely + // to be the result of subsequent hashes + if (NS_FAILED(rv)) { + val = "<hash failed>"_ns; + } else { + val = hash; } - // build cache meta data key and set meta data element... - metaKey = prefix + token; - entry->SetMetaDataElement(metaKey.get(), val.get()); - } else { - LOG( - ("nsHttpChannel::ProcessVaryCacheEntryHeaders [this=%p] " - "clearing metadata for %s", - this, nsPromiseFlatCString(token).get())); - metaKey = prefix + token; - entry->SetMetaDataElement(metaKey.get(), nullptr); + LOG((" hashed to %s\n", val.get())); } + + // build cache meta data key and set meta data element... + metaKey = prefix + token; + entry->SetMetaDataElement(metaKey.get(), val.get()); + } else { + LOG( + ("nsHttpChannel::AddCacheEntryHeaders [this=%p] " + "clearing metadata for %s", + self, nsPromiseFlatCString(token).get())); + metaKey = prefix + token; + entry->SetMetaDataElement(metaKey.get(), nullptr); } } } } - if (NS_FAILED(rv)) { - return rv; - } + // Store the received HTTP head with the cache entry as an element of // the meta data. nsAutoCString head; - mResponseHead->Flatten(head, true); + responseHead->Flatten(head, true); rv = entry->SetMetaDataElement("response-head", head.get()); if (NS_FAILED(rv)) return rv; head.Truncate(); - mResponseHead->FlattenNetworkOriginalHeaders(head); + responseHead->FlattenNetworkOriginalHeaders(head); rv = entry->SetMetaDataElement("original-response-headers", head.get()); if (NS_FAILED(rv)) return rv; - // Indicate we have successfully finished setting metadata on the cache - // entry. - return entry->MetaDataReady(); -} - -bool nsHttpChannel::ParseDictionary(nsICacheEntry* aEntry, - nsHttpResponseHead* aResponseHead, - bool aModified) { - nsAutoCString val; - if (NS_SUCCEEDED(aResponseHead->GetHeader(nsHttp::Use_As_Dictionary, val))) { - nsAutoCStringN<128> matchVal; - nsAutoCStringN<64> matchIdVal; - nsTArray<nsCString> matchDestItems; - nsAutoCString typeVal; - - if (!NS_ParseUseAsDictionary(val, matchVal, matchIdVal, matchDestItems, - typeVal)) { - return false; - } + // Indicate we have successfully finished setting metadata on the cache entry. + rv = entry->MetaDataReady(); - nsCString key; - nsresult rv; - if (NS_FAILED(rv = aEntry->GetKey(key))) { - return false; - } + return rv; +} - nsCString hash; - // Available now for use - RefPtr<DictionaryCache> dicts(DictionaryCache::GetInstance()); - LOG_DICTIONARIES( - ("Adding DictionaryCache entry for %s: key %s, matchval %s, id=%s, " - "match-dest[0]=%s, type=%s", - mURI->GetSpecOrDefault().get(), key.get(), matchVal.get(), - matchIdVal.get(), - matchDestItems.Length() > 0 ? matchDestItems[0].get() : "<none>", - typeVal.get())); - - uint32_t expTime = 0; - Unused << GetCacheTokenExpirationTime(&expTime); - - dicts->AddEntry(mURI, key, matchVal, matchDestItems, matchIdVal, Some(hash), - aModified, expTime, getter_AddRefs(mDictSaving)); - // If this was 304 Not Modified, then we don't need the dictionary data - // (though we may update the dictionary entry if the match/id/etc changed). - // If this is 304, mDictSaving will be cleared by AddEntry. - if (mDictSaving) { - if (mDictSaving->ShouldSuspendUntilCacheRead()) { - LOG_DICTIONARIES(("Suspending %p to wait for cache read", this)); - mTransactionPump->Suspend(); - mDictSaving->CallbackOnCacheRead([self = RefPtr(this)]() { - LOG_DICTIONARIES(("Resuming %p after cache read", self.get())); - self->Resume(); - }); - } - } - return true; - } - return true; // succeeded, no use-as-dictionary +nsresult nsHttpChannel::AddCacheEntryHeaders(nsICacheEntry* entry) { + return DoAddCacheEntryHeaders(this, entry, &mRequestHead, mResponseHead.get(), + mSecurityInfo); } inline void GetAuthType(const char* challenge, nsCString& authType) { @@ -6206,22 +5948,9 @@ nsresult nsHttpChannel::FinalizeCacheEntry() { return NS_OK; } -nsresult nsHttpChannel::InstallCacheListener(int64_t offset) { - return DoInstallCacheListener(false, nullptr, offset); -} - // Open an output stream to the cache entry and insert a listener tee into -// the chain of response listeners, so the data will go the cache and the -// normal listener chain, which often will eventually include a -// decompressor. If the Content-Encoding is dcb or dcz, we'll include a -// decompressor *before* the tee, so the cache will see decompressed data -// (we can't decompress dcb/dcz when reading from the cache). Also, if an -// entry is being used as a dictionary (Use-As-Dictionary), we want the data -// to in the cache to be decompressed, so we should install a decompressor -// before the tee as well. -nsresult nsHttpChannel::DoInstallCacheListener(bool aIsDictionaryCompressed, - nsACString* aDictionary, - int64_t offset) { +// the chain of response listeners. +nsresult nsHttpChannel::InstallCacheListener(int64_t offset) { nsresult rv; LOG(("Preparing to write data into the cache [uri=%s]\n", mSpec.get())); @@ -6231,6 +5960,28 @@ nsresult nsHttpChannel::DoInstallCacheListener(bool aIsDictionaryCompressed, mRaceCacheWithNetwork); MOZ_ASSERT(mListener); + nsAutoCString contentEncoding, contentType; + Unused << mResponseHead->GetHeader(nsHttp::Content_Encoding, contentEncoding); + mResponseHead->ContentType(contentType); + // If the content is compressible and the server has not compressed it, + // mark the cache entry for compression. + if (contentEncoding.IsEmpty() && + (contentType.EqualsLiteral(TEXT_HTML) || + contentType.EqualsLiteral(TEXT_PLAIN) || + contentType.EqualsLiteral(TEXT_CSS) || + contentType.EqualsLiteral(TEXT_JAVASCRIPT) || + contentType.EqualsLiteral(TEXT_ECMASCRIPT) || + contentType.EqualsLiteral(TEXT_XML) || + contentType.EqualsLiteral(APPLICATION_JAVASCRIPT) || + contentType.EqualsLiteral(APPLICATION_ECMASCRIPT) || + contentType.EqualsLiteral(APPLICATION_XJAVASCRIPT) || + contentType.EqualsLiteral(APPLICATION_XHTML_XML))) { + rv = mCacheEntry->SetMetaDataElement("uncompressed-len", "0"); + if (NS_FAILED(rv)) { + LOG(("unable to mark cache entry for compression")); + } + } + LOG(("Trading cache input stream for output stream [channel=%p]", this)); // We must close the input stream first because cache entries do not @@ -6282,64 +6033,12 @@ nsresult nsHttpChannel::DoInstallCacheListener(bool aIsDictionaryCompressed, do_CreateInstance(kStreamListenerTeeCID, &rv); if (NS_FAILED(rv)) return rv; - rv = tee->Init(mListener, out, nullptr); LOG(("nsHttpChannel::InstallCacheListener sync tee %p rv=%" PRIx32, tee.get(), static_cast<uint32_t>(rv))); + rv = tee->Init(mListener, out, nullptr); if (NS_FAILED(rv)) return rv; - mListener = tee; - - // If this is Use-As-Dictionary we need to be able to read it quickly for - // dictionary use, OR if it's encoded in dcb or dcz (using a dictionary), - // we must decompress it before storing since we won't have the dictionary - // when we go to read it out later. - // In this case, we hook an nsHTTPCompressConv instance in before the tee - // since we don't want to have to decompress it here and again in the content - // process (if it's not dcb/dcz); if it is dcb/dcz we must decompress it - // before the content process gets to see it - // XXX We could recompress this with e.g. gzip to save space and improve - // hitrate, at the cost of some CPU. - - // Note: this doesn't handle cases like "dcb, gzip" or (worse?) "gzip, dcb". - // We could in theory handle them. - if (aDictionary || aIsDictionaryCompressed) { - nsCOMPtr<nsIStreamListener> listener; - // otherwise we won't convert in the parent process - SetApplyConversion(true); - rv = - DoApplyContentConversions(mListener, getter_AddRefs(listener), nullptr); - if (NS_FAILED(rv)) { - return rv; - } - // Remove Available-Dictionary from Vary header if present. This - // avoids us refusing to match on a future load, for example if this - // dictionary was decoded from an earlier version using a dictionary - // (i.e. the update jquery to new version using the old version as a - // dictionary; no future load will use that old version). - - // XXX It would be slightly more efficient to remove all at once - // instead of sequentially by passing an array of strings - RemoveFromVary(mResponseHead.get(), "available-dictionary"_ns); - RemoveFromVary(mResponseHead.get(), "accept-encoding"_ns); - - if (listener) { - LOG_DICTIONARIES( - ("Installed nsHTTPCompressConv %p before tee", listener.get())); - mListener = listener; - mCompressListener = listener; - StoreHasAppliedConversion(true); - - } else { - LOG_DICTIONARIES(("Didn't install decompressor before tee")); - } - // We may have modified Content-Encoding; make sure cache metadata - // reflects that. Pass nullptr so we pick up the Vary updates above - rv = UpdateCacheEntryHeaders(mCacheEntry, nullptr); - if (NS_FAILED(rv)) { - mCacheEntry->AsyncDoom(nullptr); - return rv; - } - } + mListener = tee; return NS_OK; } @@ -7147,9 +6846,6 @@ nsHttpChannel::Suspend() { return NS_FAILED(rvTransaction) ? rvTransaction : rvCache; } -// static -void nsHttpChannel::StaticSuspend(nsHttpChannel* aChan) { aChan->Suspend(); } - NS_IMETHODIMP nsHttpChannel::Resume() { NS_ENSURE_TRUE(mSuspendCount > 0, NS_ERROR_UNEXPECTED); @@ -7956,28 +7652,6 @@ nsHttpChannel::GetEncodedBodySize(uint64_t* aEncodedBodySize) { return NS_OK; } -NS_IMETHODIMP -nsHttpChannel::GetDecompressDictionary(DictionaryCacheEntry** aDictionary) { - *aDictionary = do_AddRef(mDictDecompress).take(); - return NS_OK; -} - -NS_IMETHODIMP -nsHttpChannel::SetDecompressDictionary(DictionaryCacheEntry* aDictionary) { - if (!aDictionary) { - if (mDictDecompress && mUsingDictionary) { - mDictDecompress->UseCompleted(); - } - mUsingDictionary = false; - } else { - MOZ_ASSERT(!mDictDecompress); - aDictionary->InUse(); - mUsingDictionary = true; - } - mDictDecompress = aDictionary; - return NS_OK; -} - //----------------------------------------------------------------------------- // nsHttpChannel::nsIHttpChannelInternal //----------------------------------------------------------------------------- diff --git a/netwerk/protocol/http/nsHttpChannel.h b/netwerk/protocol/http/nsHttpChannel.h @@ -139,7 +139,6 @@ class nsHttpChannel final : public HttpBaseChannel, const nsACString& reason) override; NS_IMETHOD Cancel(nsresult status) override; NS_IMETHOD Suspend() override; - static void StaticSuspend(nsHttpChannel* aChan); NS_IMETHOD Resume() override; // nsIChannel NS_IMETHOD @@ -206,11 +205,6 @@ class nsHttpChannel final : public HttpBaseChannel, NS_IMETHOD SetResponseStatus(uint32_t aStatus, const nsACString& aStatusText) override; - NS_IMETHOD GetDecompressDictionary( - DictionaryCacheEntry** aDictionary) override; - NS_IMETHOD SetDecompressDictionary( - DictionaryCacheEntry* aDictionary) override; - void SetWarningReporter(HttpChannelSecurityWarningReporter* aReporter); HttpChannelSecurityWarningReporter* GetWarningReporter(); @@ -418,17 +412,9 @@ class nsHttpChannel final : public HttpBaseChannel, void CloseCacheEntry(bool doomOnFailure); [[nodiscard]] nsresult InitCacheEntry(); void UpdateInhibitPersistentCachingFlag(); - bool ParseDictionary(nsICacheEntry* aEntry, nsHttpResponseHead* aResponseHead, - bool aModified); - [[nodiscard]] nsresult AddCacheEntryHeaders(nsICacheEntry* entry, - bool aModified); - [[nodiscard]] nsresult UpdateCacheEntryHeaders(nsICacheEntry* entry, - const nsHttpAtom* aAtom); + [[nodiscard]] nsresult AddCacheEntryHeaders(nsICacheEntry* entry); [[nodiscard]] nsresult FinalizeCacheEntry(); [[nodiscard]] nsresult InstallCacheListener(int64_t offset = 0); - [[nodiscard]] nsresult DoInstallCacheListener(bool aIsDictionaryCompressed, - nsACString* aDictionary, - int64_t offset = 0); void MaybeInvalidateCacheEntryForSubsequentGet(); void AsyncOnExamineCachedResponse(); @@ -569,16 +555,6 @@ class nsHttpChannel final : public HttpBaseChannel, // state of whether tracking protection is enabled or not. RefPtr<nsChannelClassifier> mChannelClassifier; - // Dictionary entry for the entry being used to decompress this stream - // (i.e. we added Dictionary-Available to the request). - RefPtr<DictionaryCacheEntry> mDictDecompress; - // This is for channels we're going to use a dictionaries in the future - // (i.e. ResponseHeaders has Use-As-Dictionary) - RefPtr<DictionaryCacheEntry> mDictSaving; - // Note that in the case of using a file to be a dictionary for future - // versions of itself, these may have the same URI (but likely different - // hashes). - // Proxy release all members above on main thread. void ReleaseMainThreadOnlyReferences(); @@ -734,7 +710,7 @@ class nsHttpChannel final : public HttpBaseChannel, MOZ_ATOMIC_BITFIELDS(mAtomicBitfields6, 32, ( // True if network request gets to OnStart before we get a response from the cache (uint32_t, NetworkWonRace, 1), - // Valid values are CachedContentValidity::Unset/Invalid/Valid + // Valid values are CachedContentValid (uint32_t, CachedContentIsValid, 2), // Only set to true when we receive an HTTPSSVC record before the // transaction is created. @@ -904,10 +880,6 @@ class nsHttpChannel final : public HttpBaseChannel, // Used to handle cancellation while suspended waiting for LNA permission bool mWaitingForLNAPermission{false}; - bool mUsingDictionary{false}; // we added Available-Dictionary - bool mShouldSuspendForDictionary{false}; - bool mSuspendedForDictionary{false}; - protected: virtual void DoNotifyListenerCleanup() override; diff --git a/netwerk/protocol/http/nsHttpHandler.cpp b/netwerk/protocol/http/nsHttpHandler.cpp @@ -27,13 +27,11 @@ #include "nsCOMPtr.h" #include "nsNetCID.h" #include "mozilla/AppShutdown.h" -#include "mozilla/Base64.h" #include "mozilla/ClearOnShutdown.h" #include "mozilla/Components.h" #include "mozilla/Printf.h" #include "mozilla/RandomNum.h" #include "mozilla/SHA1.h" -#include "mozilla/ScopeExit.h" #include "mozilla/Sprintf.h" #include "mozilla/StaticPrefs_network.h" #include "mozilla/StaticPrefs_privacy.h" @@ -361,8 +359,6 @@ nsresult nsHttpHandler::Init() { // This preference is only used in parent process. if (!IsNeckoChild()) { if (XRE_IsParentProcess()) { - mDictionaryCache = DictionaryCache::GetInstance(); - std::bitset<3> usageOfHTTPSRRPrefs; usageOfHTTPSRRPrefs[0] = StaticPrefs::network_dns_upgrade_with_https_rr(); usageOfHTTPSRRPrefs[1] = @@ -653,91 +649,8 @@ nsresult nsHttpHandler::InitConnectionMgr() { mBeConservativeForProxy); } -// We're using RequestOverride because this can get called when these are -// set by Fetch from the old request. We need to pass a function pointer to -// let GetDictionaryFor suspend the channel before starting the async -// dictionary load. -nsresult nsHttpHandler::AddAcceptAndDictionaryHeaders( - nsIURI* aURI, ExtContentPolicyType aType, nsHttpRequestHead* aRequest, - bool aSecure, bool& aAsync, nsHttpChannel* aChan, - void (*aSuspend)(nsHttpChannel*), - const std::function<bool(bool, DictionaryCacheEntry*)>& aCallback) { - LOG(("Adding Dictionary headers")); - auto guard = MakeScopeExit([&]() { (aCallback)(false, nullptr); }); - - nsresult rv = NS_OK; - // Add the "Accept-Encoding" header and possibly Dictionary headers - if (aSecure) { - // The dictionary info may require us to check the cache. - if (StaticPrefs::network_http_dictionaries_enable()) { - // Note: this is async; the lambda can happen later - // aCallback will now be owned by GetDictionaryFor - guard.release(); - mDictionaryCache->GetDictionaryFor( - aURI, aType, aAsync, aChan, aSuspend, - [self = RefPtr(this), aRequest, aCallback]( - bool aNeedsResume, DictionaryCacheEntry* aDict) { - if (!aDict) { - // Accept-Encoding was already set in AddStandardHeaders - (aCallback)(aNeedsResume, nullptr); - return NS_OK; - } - - nsAutoCStringN<64> encodedHash = ":"_ns + aDict->GetHash() + ":"_ns; - - // Need to retain access to the dictionary until the request - // completes. Note that this includes if the dictionary we offered - // gets replaced by another request while we're waiting for a - // response; in that case we need to read in a copy of the - // dictionary into memory before overwriting it and store in dict - // temporarily. - aRequest->SetDictionary(aDict); - - // We want to make sure that the cache entry doesn't disappear out - // from under us if we set the header, so do the callback to - // Prefetch() the entry before adding the headers (so we don't - // have to remove the headers if Prefetch() fails). It might fail - // if something is asynchronously Dooming the entry but the - // DictionaryCache hasn't been updated to remove the entry yet (or - // any other thing that were to desynchronize the DictionaryCache - // with the actual cache. - if ((aCallback)(aNeedsResume, aDict)) { - LOG_DICTIONARIES( - ("Setting Available-Dictionary: %s", encodedHash.get())); - nsresult rv = aRequest->SetHeader( - nsHttp::Available_Dictionary, encodedHash, false, - nsHttpHeaderArray::eVarietyRequestOverride); - if (NS_FAILED(rv)) { - return rv; - } - if (!aDict->GetId().IsEmpty()) { - nsPrintfCString id("\"%s\"", aDict->GetId().get()); - LOG_DICTIONARIES(("Setting Dictionary-Id: %s", id.get())); - rv = aRequest->SetHeader( - nsHttp::Dictionary_Id, id, false, - nsHttpHeaderArray::eVarietyRequestOverride); - if (NS_FAILED(rv)) { - return rv; - } - } - return aRequest->SetHeader( - nsHttp::Accept_Encoding, self->mDictionaryAcceptEncodings, - false, nsHttpHeaderArray::eVarietyRequestOverride); - } - return NS_OK; - }); - } else { - aAsync = false; - } - } else { - aAsync = false; - } - // guard may call aCallback here - return rv; -} - nsresult nsHttpHandler::AddStandardRequestHeaders( - nsHttpRequestHead* request, nsIURI* aURI, bool aIsHTTPS, + nsHttpRequestHead* request, bool isSecure, ExtContentPolicyType aContentPolicyType, bool aShouldResistFingerprinting) { nsresult rv; @@ -784,20 +697,22 @@ nsresult nsHttpHandler::AddStandardRequestHeaders( if (NS_FAILED(rv)) return rv; } - // add the "Send Hint" header - if (mSafeHintEnabled || sParentalControlsEnabled) { - rv = request->SetHeader(nsHttp::Prefer, "safe"_ns, false, - nsHttpHeaderArray::eVarietyRequestDefault); - if (NS_FAILED(rv)) return rv; - } - - if (aIsHTTPS) { + // Add the "Accept-Encoding" header + if (isSecure) { rv = request->SetHeader(nsHttp::Accept_Encoding, mHttpsAcceptEncodings, false, nsHttpHeaderArray::eVarietyRequestDefault); } else { rv = request->SetHeader(nsHttp::Accept_Encoding, mHttpAcceptEncodings, false, nsHttpHeaderArray::eVarietyRequestDefault); } + if (NS_FAILED(rv)) return rv; + + // add the "Send Hint" header + if (mSafeHintEnabled || sParentalControlsEnabled) { + rv = request->SetHeader(nsHttp::Prefer, "safe"_ns, false, + nsHttpHeaderArray::eVarietyRequestDefault); + if (NS_FAILED(rv)) return rv; + } return NS_OK; } @@ -826,10 +741,8 @@ bool nsHttpHandler::IsAcceptableEncoding(const char* enc, bool isSecure) { // continuing bad behavior.. so limit it to known x-* patterns bool rv; if (isSecure) { - // Should be a superset of mAcceptEncodings (unless someone messes with - // prefs) - rv = nsHttp::FindToken(mDictionaryAcceptEncodings.get(), enc, - HTTP_LWS ",") != nullptr; + rv = nsHttp::FindToken(mHttpsAcceptEncodings.get(), enc, HTTP_LWS ",") != + nullptr; } else { rv = nsHttp::FindToken(mHttpAcceptEncodings.get(), enc, HTTP_LWS ",") != nullptr; @@ -1503,30 +1416,17 @@ void nsHttpHandler::PrefsChanged(const char* pref) { nsAutoCString acceptEncodings; rv = Preferences::GetCString(HTTP_PREF("accept-encoding"), acceptEncodings); if (NS_SUCCEEDED(rv)) { - rv = SetAcceptEncodings(acceptEncodings.get(), false, false); + rv = SetAcceptEncodings(acceptEncodings.get(), false); MOZ_ASSERT(NS_SUCCEEDED(rv)); } } - if (PREF_CHANGED(HTTP_PREF("accept-encoding.secure")) || - PREF_CHANGED(HTTP_PREF("accept-encoding.dictionary"))) { + if (PREF_CHANGED(HTTP_PREF("accept-encoding.secure"))) { nsAutoCString acceptEncodings; rv = Preferences::GetCString(HTTP_PREF("accept-encoding.secure"), acceptEncodings); if (NS_SUCCEEDED(rv)) { - rv = SetAcceptEncodings(acceptEncodings.get(), true, false); - - // Since dictionary encodings are dependent on both accept-encoding.secure - // and accept-encoding.dictionary, update both if either changes (which is - // quite rare, so there's no real perf hit) - nsAutoCString acceptDictionaryEncodings; - rv = Preferences::GetCString(HTTP_PREF("accept-encoding.dictionary"), - acceptDictionaryEncodings); - if (NS_SUCCEEDED(rv) && !acceptDictionaryEncodings.IsEmpty()) { - acceptEncodings.Append(", "_ns); - acceptEncodings.Append(acceptDictionaryEncodings); - rv = SetAcceptEncodings(acceptEncodings.get(), true, true); - } + rv = SetAcceptEncodings(acceptEncodings.get(), true); MOZ_ASSERT(NS_SUCCEEDED(rv)); } } @@ -2102,10 +2002,8 @@ nsresult nsHttpHandler::SetAcceptLanguages() { } nsresult nsHttpHandler::SetAcceptEncodings(const char* aAcceptEncodings, - bool isSecure, bool isDictionary) { - if (isDictionary) { - mDictionaryAcceptEncodings = aAcceptEncodings; - } else if (isSecure) { + bool isSecure) { + if (isSecure) { mHttpsAcceptEncodings = aAcceptEncodings; } else { // use legacy list if a secure override is not specified diff --git a/netwerk/protocol/http/nsHttpHandler.h b/netwerk/protocol/http/nsHttpHandler.h @@ -21,7 +21,6 @@ #include "nsString.h" #include "nsCOMPtr.h" #include "nsWeakReference.h" -#include "mozilla/net/Dictionary.h" #include "nsIHttpProtocolHandler.h" #include "nsIObserver.h" @@ -117,13 +116,8 @@ class nsHttpHandler final : public nsIHttpProtocolHandler, static already_AddRefed<nsHttpHandler> GetInstance(); - [[nodiscard]] nsresult AddAcceptAndDictionaryHeaders( - nsIURI* aURI, ExtContentPolicyType aType, nsHttpRequestHead* aRequest, - bool aSecure, bool& aAsync, nsHttpChannel* aChan, - void (*aSuspend)(nsHttpChannel*), - const std::function<bool(bool, DictionaryCacheEntry*)>& aCallback); [[nodiscard]] nsresult AddStandardRequestHeaders( - nsHttpRequestHead*, nsIURI* aURI, bool aIsHTTPS, + nsHttpRequestHead*, bool isSecure, ExtContentPolicyType aContentPolicyType, bool aShouldResistFingerprinting); [[nodiscard]] nsresult AddConnectionHeader(nsHttpRequestHead*, uint32_t caps); @@ -532,8 +526,7 @@ class nsHttpHandler final : public nsIHttpProtocolHandler, void PrefsChanged(const char* pref); [[nodiscard]] nsresult SetAcceptLanguages(); - [[nodiscard]] nsresult SetAcceptEncodings(const char*, bool aIsSecure, - bool aDictionary); + [[nodiscard]] nsresult SetAcceptEncodings(const char*, bool mIsSecure); [[nodiscard]] nsresult InitConnectionMgr(); @@ -570,9 +563,6 @@ class nsHttpHandler final : public nsIHttpProtocolHandler, UniquePtr<AltSvcCache> mAltSvcCache; - // Pointer to DictionaryCache singleton - RefPtr<DictionaryCache> mDictionaryCache; - // // prefs // @@ -633,7 +623,6 @@ class nsHttpHandler final : public nsIHttpProtocolHandler, nsCString mAcceptLanguages; nsCString mHttpAcceptEncodings; nsCString mHttpsAcceptEncodings; - nsCString mDictionaryAcceptEncodings; nsCString mDefaultSocketType; diff --git a/netwerk/protocol/http/nsHttpHeaderArray.cpp b/netwerk/protocol/http/nsHttpHeaderArray.cpp @@ -43,17 +43,14 @@ nsresult nsHttpHeaderArray::SetHeader( MOZ_ASSERT( (variety == eVarietyResponse) || (variety == eVarietyRequestDefault) || (variety == eVarietyRequestOverride) || - (variety == eVarietyResponseOverride) || (variety == eVarietyRequestEnforceDefault), "Net original headers can only be set using SetHeader_internal()."); nsEntry* entry = nullptr; int32_t index = LookupEntry(header, &entry); - // If an empty value is received and we aren't merging headers discard it, - // unless we're overriding - if (value.IsEmpty() && header != nsHttp::X_Frame_Options && - variety != eVarietyResponseOverride) { + // If an empty value is received and we aren't merging headers discard it + if (value.IsEmpty() && header != nsHttp::X_Frame_Options) { if (!merge && entry) { if (entry->variety == eVarietyResponseNetOriginalAndResponse) { MOZ_ASSERT(variety == eVarietyResponse); @@ -82,8 +79,7 @@ nsresult nsHttpHeaderArray::SetHeader( if (!IsIgnoreMultipleHeader(header)) { // Replace the existing string with the new value if (entry->variety == eVarietyResponseNetOriginalAndResponse) { - MOZ_ASSERT(variety == eVarietyResponse || - variety == eVarietyResponseOverride); + MOZ_ASSERT(variety == eVarietyResponse); entry->variety = eVarietyResponseNetOriginal; return SetHeader_internal(header, headerName, value, variety); } @@ -284,12 +280,10 @@ nsresult nsHttpHeaderArray::GetOriginalHeader(const nsHttpAtom& aHeader, MOZ_ASSERT((entry.variety == eVarietyResponseNetOriginalAndResponse) || (entry.variety == eVarietyResponseNetOriginal) || - (entry.variety == eVarietyResponse) || - (entry.variety == eVarietyResponseOverride), + (entry.variety == eVarietyResponse), "This must be a response header."); index++; - if (entry.variety == eVarietyResponse || - entry.variety == eVarietyResponseOverride) { + if (entry.variety == eVarietyResponse) { continue; } diff --git a/netwerk/protocol/http/nsHttpHeaderArray.h b/netwerk/protocol/http/nsHttpHeaderArray.h @@ -46,8 +46,7 @@ class nsHttpHeaderArray { // Used only for response header. eVarietyResponseNetOriginalAndResponse, eVarietyResponseNetOriginal, - eVarietyResponse, - eVarietyResponseOverride, + eVarietyResponse }; // Used by internal setters: to set header from network use SetHeaderFromNet diff --git a/netwerk/protocol/http/nsHttpRequestHead.cpp b/netwerk/protocol/http/nsHttpRequestHead.cpp @@ -8,7 +8,6 @@ #include "nsHttpRequestHead.h" #include "nsIHttpHeaderVisitor.h" -#include "mozilla/net/Dictionary.h" //----------------------------------------------------------------------------- // nsHttpRequestHead @@ -99,11 +98,6 @@ void nsHttpRequestHead::SetPath(const nsACString& s) { mPath = s; } -void nsHttpRequestHead::SetDictionary(DictionaryCacheEntry* aDict) { - RecursiveMutexAutoLock mon(mRecursiveMutex); // XXX necessary? - mDict = aDict; -} - uint32_t nsHttpRequestHead::HeaderCount() { RecursiveMutexAutoLock mon(mRecursiveMutex); return mHeaders.Count(); diff --git a/netwerk/protocol/http/nsHttpRequestHead.h b/netwerk/protocol/http/nsHttpRequestHead.h @@ -23,8 +23,6 @@ struct ParamTraits; namespace mozilla { namespace net { -class DictionaryCacheEntry; - //----------------------------------------------------------------------------- // nsHttpRequestHead represents the request line and headers from an HTTP // request. @@ -56,8 +54,6 @@ class nsHttpRequestHead { void SetVersion(HttpVersion version); void SetRequestURI(const nsACString& s); void SetPath(const nsACString& s); - // keep a ref to the dictionary we offered, if any - void SetDictionary(DictionaryCacheEntry* aDict); uint32_t HeaderCount(); // Using this function it is possible to itereate through all headers @@ -140,8 +136,6 @@ class nsHttpRequestHead { nsCString mRequestURI MOZ_GUARDED_BY(mRecursiveMutex); nsCString mPath MOZ_GUARDED_BY(mRecursiveMutex); - RefPtr<DictionaryCacheEntry> mDict MOZ_GUARDED_BY(mRecursiveMutex); - nsCString mOrigin MOZ_GUARDED_BY(mRecursiveMutex); ParsedMethodType mParsedMethod MOZ_GUARDED_BY(mRecursiveMutex){kMethod_Get}; bool mHTTPS MOZ_GUARDED_BY(mRecursiveMutex){false}; diff --git a/netwerk/protocol/http/nsHttpResponseHead.cpp b/netwerk/protocol/http/nsHttpResponseHead.cpp @@ -193,19 +193,6 @@ nsresult nsHttpResponseHead::SetHeader(const nsHttpAtom& hdr, return SetHeader_locked(hdr, ""_ns, val, merge); } -// override the current value -nsresult nsHttpResponseHead::SetHeaderOverride(const nsHttpAtom& atom, - const nsACString& val) { - RecursiveMutexAutoLock monitor(mRecursiveMutex); - - if (mInVisitHeaders) { - return NS_ERROR_FAILURE; - } - - return mHeaders.SetHeader(atom, ""_ns, val, false, - nsHttpHeaderArray::eVarietyResponseOverride); -} - nsresult nsHttpResponseHead::SetHeader_locked(const nsHttpAtom& atom, const nsACString& hdr, const nsACString& val, diff --git a/netwerk/protocol/http/nsHttpResponseHead.h b/netwerk/protocol/http/nsHttpResponseHead.h @@ -72,8 +72,6 @@ class nsHttpResponseHead { [[nodiscard]] nsresult SetHeader(const nsACString& h, const nsACString& v, bool m = false); - [[nodiscard]] nsresult SetHeaderOverride(const nsHttpAtom& h, - const nsACString& v); [[nodiscard]] nsresult SetHeader(const nsHttpAtom& h, const nsACString& v, bool m = false); [[nodiscard]] nsresult GetHeader(const nsHttpAtom& h, nsACString& v) const; diff --git a/netwerk/protocol/http/nsIHttpChannel.idl b/netwerk/protocol/http/nsIHttpChannel.idl @@ -10,13 +10,8 @@ interface nsIReferrerInfo; %{C++ #include "GeckoProfiler.h" -namespace mozilla::net { -class DictionaryCacheEntry; -} %} -[ptr] native DictionaryCacheEntry(mozilla::net::DictionaryCacheEntry); - native UniqueProfileChunkedBuffer(mozilla::UniquePtr<mozilla::ProfileChunkedBuffer>); /** @@ -513,8 +508,4 @@ interface nsIHttpChannel : nsIIdentChannel * and user agent on the channel is outdated. */ [noscript, must_use] attribute boolean isUserAgentHeaderOutdated; - /** - * Dictionary for decompression, if any - */ - [noscript] attribute DictionaryCacheEntry decompressDictionary; }; diff --git a/netwerk/protocol/viewsource/nsViewSourceChannel.cpp b/netwerk/protocol/viewsource/nsViewSourceChannel.cpp @@ -1224,19 +1224,6 @@ nsViewSourceChannel::AsyncOnChannelRedirect( return NS_OK; } -NS_IMETHODIMP -nsViewSourceChannel::GetDecompressDictionary( - mozilla::net::DictionaryCacheEntry** aDictionary) { - *aDictionary = nullptr; - return NS_OK; -} - -NS_IMETHODIMP -nsViewSourceChannel::SetDecompressDictionary( - mozilla::net::DictionaryCacheEntry* aDictionary) { - return NS_OK; -} - // nsIInterfaceRequestor NS_IMETHODIMP diff --git a/netwerk/streamconv/converters/nsHTTPCompressConv.cpp b/netwerk/streamconv/converters/nsHTTPCompressConv.cpp @@ -9,21 +9,19 @@ #include "nsCOMPtr.h" #include "nsCRT.h" #include "nsError.h" -#include "nsIChannel.h" -#include "nsIForcePendingChannel.h" -#include "nsIHttpChannel.h" -#include "nsIRequest.h" -#include "nsIThreadRetargetableRequest.h" #include "nsIThreadRetargetableStreamListener.h" -#include "nsThreadUtils.h" #include "nsStreamUtils.h" #include "nsStringStream.h" #include "nsComponentManagerUtils.h" -#include "mozilla/net/Dictionary.h" +#include "nsThreadUtils.h" #include "mozilla/Preferences.h" #include "mozilla/StaticPrefs_network.h" #include "mozilla/Logging.h" +#include "nsIForcePendingChannel.h" +#include "nsIRequest.h" #include "mozilla/UniquePtrExtensions.h" +#include "nsIThreadRetargetableRequest.h" +#include "nsIChannel.h" // brotli headers #undef assert @@ -37,48 +35,16 @@ namespace mozilla { namespace net { -class DictionaryCacheEntry; - extern LazyLogModule gHttpLog; #define LOG(args) \ MOZ_LOG(mozilla::net::gHttpLog, mozilla::LogLevel::Debug, args) -extern LazyLogModule gDictionaryLog; -#define DICTIONARY_LOG(args) \ - MOZ_LOG(mozilla::net::gDictionaryLog, mozilla::LogLevel::Debug, args) - class BrotliWrapper { public: - BrotliWrapper() {} - ~BrotliWrapper() { BrotliDecoderStateCleanup(&mState); } - - bool Init(nsIRequest* aRequest) { - if (!BrotliDecoderStateInit(&mState, nullptr, nullptr, nullptr)) { - return false; - } - - nsCOMPtr<nsIHttpChannel> httpchannel(do_QueryInterface(aRequest)); - if (!httpchannel) { - return false; - } - // XXX Wait for dictionary to be read into RAM!! - if (NS_SUCCEEDED(httpchannel->GetDecompressDictionary( - getter_AddRefs(mDictionary))) && - mDictionary) { - size_t length = mDictionary->GetDictionary().length(); - DICTIONARY_LOG(("Brotli: dictionary %zu bytes", length)); - if (length > 0) { - BROTLI_BOOL result = BrotliDecoderAttachDictionary( - &mState, BROTLI_SHARED_DICTIONARY_RAW, length, - mDictionary->GetDictionary().begin()); - if (!result) { - DICTIONARY_LOG(("Brotli: AttachDictionary failed")); - return false; - } - } - } - return true; + BrotliWrapper() { + BrotliDecoderStateInit(&mState, nullptr, nullptr, nullptr); } + ~BrotliWrapper() { BrotliDecoderStateCleanup(&mState); } BrotliDecoderState mState{}; Atomic<size_t, Relaxed> mTotalOut{0}; @@ -88,11 +54,6 @@ class BrotliWrapper { nsIRequest* mRequest{nullptr}; nsISupports* mContext{nullptr}; uint64_t mSourceOffset{0}; - - RefPtr<DictionaryCacheEntry> mDictionary; - - uint8_t mEaten{0}; - uint8_t mHeader[36]; // \FF\44\43\42 + 32-byte SHA-256 }; #ifdef ZSTD_INFALLIBLE @@ -106,24 +67,7 @@ ZSTD_customMem const zstd_allocators = {zstd_malloc, zstd_free, nullptr}; class ZstdWrapper { public: - ZstdWrapper(nsIRequest* aRequest, nsHTTPCompressConv::CompressMode aMode) { - size_t length = 0; - if (aMode == nsHTTPCompressConv::HTTP_COMPRESS_ZSTD_DICTIONARY) { - nsCOMPtr<nsIHttpChannel> httpchannel(do_QueryInterface(aRequest)); - if (httpchannel) { - // XXX Wait for dictionary to be read into RAM!! - if (NS_FAILED(httpchannel->GetDecompressDictionary( - getter_AddRefs(mDictionary))) || - !mDictionary) { - return; - } - length = mDictionary->GetDictionary().length(); - } else { - // Can't decode without a dictionary - return; - } - } - + ZstdWrapper() { #ifdef ZSTD_INFALLIBLE mDStream = ZSTD_createDStream_advanced(zstd_allocators); // infallible #else @@ -135,15 +79,6 @@ class ZstdWrapper { return; } #endif - if (mDictionary) { - DICTIONARY_LOG(("zstd: dictionary %zu bytes", length)); - ZSTD_DCtx_reset(mDStream, ZSTD_reset_session_only); - if (ZSTD_isError(ZSTD_DCtx_loadDictionary( - mDStream, mDictionary->GetDictionary().begin(), length))) { - return; - } - } - ZSTD_DCtx_setParameter(mDStream, ZSTD_d_windowLogMax, 23 /*8*1024*1024*/); } ~ZstdWrapper() { @@ -158,8 +93,6 @@ class ZstdWrapper { nsISupports* mContext{nullptr}; uint64_t mSourceOffset{0}; ZSTD_DStream* mDStream{nullptr}; - - RefPtr<DictionaryCacheEntry> mDictionary; }; // nsISupports implementation @@ -227,12 +160,6 @@ nsHTTPCompressConv::AsyncConvertData(const char* aFromType, const char* aToType, } else if (!nsCRT::strncasecmp(aFromType, HTTP_ZST_TYPE, sizeof(HTTP_ZST_TYPE) - 1)) { mMode = HTTP_COMPRESS_ZSTD; - } else if (!nsCRT::strncasecmp(aFromType, HTTP_BROTLI_DICTIONARY_TYPE, - sizeof(HTTP_BROTLI_DICTIONARY_TYPE) - 1)) { - mMode = HTTP_COMPRESS_BROTLI_DICTIONARY; - } else if (!nsCRT::strncasecmp(aFromType, HTTP_ZSTD_DICTIONARY_TYPE, - sizeof(HTTP_ZSTD_DICTIONARY_TYPE) - 1)) { - mMode = HTTP_COMPRESS_ZSTD_DICTIONARY; } LOG(("nsHttpCompresssConv %p AsyncConvertData %s %s mode %d\n", this, aFromType, aToType, (CompressMode)mMode)); @@ -344,8 +271,7 @@ nsHTTPCompressConv::OnStopRequest(nsIRequest* request, nsresult aStatus) { status = NS_ERROR_NET_PARTIAL_TRANSFER; LOG(("nsHttpCompresssConv %p onstop partial gzip\n", this)); } - if (NS_SUCCEEDED(status) && (mMode == HTTP_COMPRESS_BROTLI || - mMode == HTTP_COMPRESS_BROTLI_DICTIONARY)) { + if (NS_SUCCEEDED(status) && mMode == HTTP_COMPRESS_BROTLI) { nsCOMPtr<nsIForcePendingChannel> fpChannel = do_QueryInterface(request); bool isPending = false; if (request) { @@ -363,17 +289,6 @@ nsHTTPCompressConv::OnStopRequest(nsIRequest* request, nsresult aStatus) { fpChannel->ForcePending(false); } } - // We don't need the dictionary data anymore - if (mBrotli || mZstd) { - RefPtr<DictionaryCacheEntry> dict; - nsCOMPtr<nsIHttpChannel> httpchannel(do_QueryInterface(request)); - if (httpchannel) { - httpchannel->SetDecompressDictionary(nullptr); - } - // paranoia - mBrotli = nullptr; - mZstd = nullptr; - } nsCOMPtr<nsIStreamListener> listener; { @@ -404,36 +319,6 @@ nsresult nsHTTPCompressConv::BrotliHandler(nsIInputStream* stream, return NS_OK; } - // Dictionary-encoded brotli has a 36-byte header (4 byte fixed + 32 byte - // SHA-256) - if (self->mBrotli->mDictionary && self->mBrotli->mEaten < 36) { - uint8_t header_needed = 36 - self->mBrotli->mEaten; - if (avail >= header_needed) { - memcpy(&self->mBrotli->mHeader[self->mBrotli->mEaten], dataIn, - header_needed); - avail -= header_needed; - dataIn += header_needed; - self->mBrotli->mEaten = 36; - - // Validate header - // XXX we could verify the SHA-256 matches what we offered - static uint8_t brotli_header[4] = {0xff, 0x44, 0x43, 0x42}; - if (memcmp(self->mBrotli->mHeader, brotli_header, 4) != 0) { - DICTIONARY_LOG( - ("!! %p Brotli failed: bad magic header 0x%02x%02x%02x%02x", self, - self->mBrotli->mHeader[0], self->mBrotli->mHeader[1], - self->mBrotli->mHeader[2], self->mBrotli->mHeader[3])); - self->mBrotli->mStatus = NS_ERROR_INVALID_CONTENT_ENCODING; - return self->mBrotli->mStatus; - } - } else { - memcpy(&self->mBrotli->mHeader[self->mBrotli->mEaten], dataIn, aAvail); - self->mBrotli->mEaten += aAvail; - *countRead = aAvail; - return NS_OK; - } - } - auto outBuffer = MakeUniqueFallible<uint8_t[]>(kOutSize); if (outBuffer == nullptr) { self->mBrotli->mStatus = NS_ERROR_OUT_OF_MEMORY; @@ -454,15 +339,12 @@ nsresult nsHTTPCompressConv::BrotliHandler(nsIInputStream* stream, self->mBrotli->mTotalOut = totalOut; self->mBrotli->mBrotliStateIsStreamEnd = BrotliDecoderIsFinished(&self->mBrotli->mState); - LOG(("nsHttpCompressConv %p brotlihandler decompress rv=%" PRIx32 + LOG(("nsHttpCompresssConv %p brotlihandler decompress rv=%" PRIx32 " out=%zu\n", self, static_cast<uint32_t>(res), outSize)); if (res == BROTLI_DECODER_RESULT_ERROR) { - DICTIONARY_LOG( - ("nsHttpCompressConv %p decoding error: marking invalid encoding " - "(%zu)", - self, avail)); + LOG(("nsHttpCompressConv %p marking invalid encoding", self)); self->mBrotli->mStatus = NS_ERROR_INVALID_CONTENT_ENCODING; return self->mBrotli->mStatus; } @@ -817,13 +699,9 @@ nsHTTPCompressConv::OnDataAvailable(nsIRequest* request, nsIInputStream* iStr, } /* gzip */ break; - case HTTP_COMPRESS_BROTLI: - case HTTP_COMPRESS_BROTLI_DICTIONARY: { + case HTTP_COMPRESS_BROTLI: { if (!mBrotli) { mBrotli = MakeUnique<BrotliWrapper>(); - if (!mBrotli->Init(request)) { - return NS_ERROR_FAILURE; - } } mBrotli->mRequest = request; @@ -840,10 +718,9 @@ nsHTTPCompressConv::OnDataAvailable(nsIRequest* request, nsIInputStream* iStr, } } break; - case HTTP_COMPRESS_ZSTD: - case HTTP_COMPRESS_ZSTD_DICTIONARY: { + case HTTP_COMPRESS_ZSTD: { if (!mZstd) { - mZstd = MakeUnique<ZstdWrapper>(request, mMode); + mZstd = MakeUnique<ZstdWrapper>(); if (!mZstd->mDStream) { return NS_ERROR_OUT_OF_MEMORY; } diff --git a/netwerk/streamconv/converters/nsHTTPCompressConv.h b/netwerk/streamconv/converters/nsHTTPCompressConv.h @@ -35,8 +35,6 @@ class nsIStringInputStream; # define HTTP_UNCOMPRESSED_TYPE "uncompressed" # define HTTP_ZSTD_TYPE "zstd" # define HTTP_ZST_TYPE "zst" -# define HTTP_BROTLI_DICTIONARY_TYPE "dcb" -# define HTTP_ZSTD_DICTIONARY_TYPE "dcz" namespace mozilla { namespace net { @@ -66,8 +64,6 @@ class nsHTTPCompressConv : public nsIStreamConverter, HTTP_COMPRESS_BROTLI, HTTP_COMPRESS_IDENTITY, HTTP_COMPRESS_ZSTD, - HTTP_COMPRESS_BROTLI_DICTIONARY, - HTTP_COMPRESS_ZSTD_DICTIONARY, }; private: diff --git a/netwerk/test/gtest/TestUseAsDictionary.cpp b/netwerk/test/gtest/TestUseAsDictionary.cpp @@ -1,226 +0,0 @@ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - -#include <ostream> - -#include "gtest/gtest-param-test.h" -#include "gtest/gtest.h" - -#include "mozilla/gtest/MozAssertions.h" -#include "nsNetUtil.h" - -using namespace mozilla::net; - -struct TestData { - bool mResult; - const nsCString mHeader; - // output to match: - const nsCString mMatchVal; - const nsCString mMatchIdVal; - const nsCString mTypeVal; - // matchDestVal ends with ""_ns - const nsCString mMatchDestVal[5]; -}; - -TEST(TestUseAsDictionary, Match) -{ - // Note: we're not trying to test Structured Fields - // (https://datatracker.ietf.org/doc/html/rfc8941) here, but the data within - // it, so generally we aren't looking for format errors - const struct TestData gTestArray[] = { - {true, - "match=\"/app/*/main.js\""_ns, - "/app/*/main.js"_ns, - ""_ns, - ""_ns, - {""_ns}}, - {true, - "match=\"/app/*/main.js\", id=\"some_id\""_ns, - "/app/*/main.js"_ns, - "some_id"_ns, - ""_ns, - {""_ns}}, - // match= is required - {false, "id=\"some_id\""_ns, ""_ns, "some_id"_ns, ""_ns, {""_ns}}, - {true, - "match=\"/app/*/main.js\", id=\"some_id\", type=raw"_ns, - "/app/*/main.js"_ns, - "some_id"_ns, - "raw"_ns, - {""_ns}}, - // only raw is supported for type - {false, - "match=\"/app/*/main.js\", id=\"some_id\", type=not_raw"_ns, - "/app/*/main.js"_ns, - "some_id"_ns, - "raw"_ns, - {""_ns}}, - {true, - "match=\"/app/*/main.js\", id=\"some_id\", match-dest=(\"style\")"_ns, - "/app/*/main.js"_ns, - "some_id"_ns, - ""_ns, - {"style"_ns, ""_ns}}, - {true, - "match=\"/app/*/main.js\", id=\"some_id\", match-dest=(\"style\"), type=raw"_ns, - "/app/*/main.js"_ns, - "some_id"_ns, - "raw"_ns, - {"style"_ns, ""_ns}}, - {true, - "match=\"/app/*/main.js\", id=\"some_id\", match-dest=(\"style\" \"document\"), type=raw"_ns, - "/app/*/main.js"_ns, - "some_id"_ns, - "raw"_ns, - {"style"_ns, "document"_ns, ""_ns}}, - // adding the comma after style is a syntax error for structured fields - {false, - "match=\"/app/*/main.js\", id=\"some_id\", match-dest=(\"style\", \"document\"), type=raw"_ns, - "/app/*/main.js"_ns, - "some_id"_ns, - "raw"_ns, - {"style"_ns, "document"_ns, ""_ns}}, - // --- Additional tests for spec compliance and edge cases --- - // 1. Missing quotes around match value - {false, - "match=/app/*/main.js, id=\"id1\""_ns, - ""_ns, - "id1"_ns, - ""_ns, - {""_ns}}, - // 2. Extra unknown parameter - {true, - "match=\"/foo.js\", foo=bar"_ns, - "/foo.js"_ns, - ""_ns, - ""_ns, - {""_ns}}, - // 3. Whitespace variations - {true, - " match=\"/foo.js\" , id=\"id2\" "_ns, - "/foo.js"_ns, - "id2"_ns, - ""_ns, - {""_ns}}, - // 4. Empty match value - {false, "match=\"\""_ns, ""_ns, ""_ns, ""_ns, {""_ns}}, - // 5. Duplicate match parameter (should use the last) - {true, - "match=\"/foo.js\", match=\"/bar.js\""_ns, - "/bar.js"_ns, - ""_ns, - ""_ns, - {""_ns}}, - // 6. Duplicate id parameter (should use the last) - {true, - "match=\"/foo.js\", id=\"id1\", id=\"id2\""_ns, - "/foo.js"_ns, - "id2"_ns, - ""_ns, - {""_ns}}, - // 7. Parameter order: id before match - {true, - "id=\"id3\", match=\"/foo.js\""_ns, - "/foo.js"_ns, - "id3"_ns, - ""_ns, - {""_ns}}, - // 8. Non-raw type (should fail) - {false, - "match=\"/foo.js\", type=compressed"_ns, - "/foo.js"_ns, - ""_ns, - "raw"_ns, - {""_ns}}, - // 9. Empty header - {false, ""_ns, ""_ns, ""_ns, ""_ns, {""_ns}}, - // 10. match-dest with empty list - {true, - "match=\"/foo.js\", match-dest=()"_ns, - "/foo.js"_ns, - ""_ns, - ""_ns, - {""_ns}}, - // 11. match-dest with whitespace and multiple values - {true, - "match=\"/foo.js\", match-dest=( \"a\" \"b\" )"_ns, - "/foo.js"_ns, - ""_ns, - ""_ns, - {"a"_ns, "b"_ns, ""_ns}}, - // 12. match-dest with invalid value (missing quotes) - {false, - "match=\"/foo.js\", match-dest=(a)"_ns, - "/foo.js"_ns, - ""_ns, - ""_ns, - {""_ns}}, - // 13. match-dest with duplicate values - {true, - "match=\"/foo.js\", match-dest=(\"a\" \"a\")"_ns, - "/foo.js"_ns, - ""_ns, - ""_ns, - {"a"_ns, "a"_ns, ""_ns}}, - // 14. Case sensitivity: type=RAW (should fail, only 'raw' allowed) - {false, - "match=\"/foo.js\", type=RAW"_ns, - "/foo.js"_ns, - ""_ns, - "raw"_ns, - {""_ns}}, - - // Note: Structured Fields requires all input to be ASCII - - // 18. match-dest with trailing whitespace - {true, - "match=\"/foo.js\", match-dest=(\"a\" )"_ns, - "/foo.js"_ns, - ""_ns, - ""_ns, - {"a"_ns, ""_ns}}, - // 19. match-dest with only whitespace in list (should be empty) - {true, - "match=\"/foo.js\", match-dest=( )"_ns, - "/foo.js"_ns, - ""_ns, - ""_ns, - {""_ns}}, - // 20. match-dest with comma and whitespace (invalid) - {false, - "match=\"/foo.js\", match-dest=(\"a\", \"b\")"_ns, - "/foo.js"_ns, - ""_ns, - ""_ns, - {"a"_ns, "b"_ns, ""_ns}}, - }; - - for (auto& test : gTestArray) { - nsCString match, matchId, type; - nsTArray<nsCString> matchDest; - nsTArray<nsCString> matchDestVal; - - for (auto& dest : test.mMatchDestVal) { - if (dest.IsEmpty()) { - break; - } - matchDestVal.AppendElement(dest); - } - - fprintf(stderr, "Testing %s\n", test.mHeader.get()); - ASSERT_EQ( - NS_ParseUseAsDictionary(test.mHeader, match, matchId, matchDest, type), - test.mResult); - - if (test.mResult) { - ASSERT_EQ(match, test.mMatchVal); - ASSERT_EQ(matchId, test.mMatchIdVal); - ASSERT_EQ(matchDest.Length(), matchDestVal.Length()); - for (size_t i = 0; i < matchDest.Length(); i++) { - ASSERT_EQ(matchDest[i], matchDestVal[i]); - } - ASSERT_EQ(type, test.mTypeVal); - } - } -} diff --git a/netwerk/test/gtest/moz.build b/netwerk/test/gtest/moz.build @@ -41,7 +41,6 @@ UNIFIED_SOURCES += [ "TestURIMutator.cpp", "TestUriTemplate.cpp", "TestURLPatternGlue.cpp", - "TestUseAsDictionary.cpp", "TestWebTransportFlowControl.cpp", ] diff --git a/netwerk/test/httpserver/NodeServer.sys.mjs b/netwerk/test/httpserver/NodeServer.sys.mjs @@ -243,7 +243,6 @@ class NodeHTTPSServerCode extends BaseNodeHTTPServerCode { const options = { key: fs.readFileSync(__dirname + "/http2-cert.key"), cert: fs.readFileSync(__dirname + "/http2-cert.pem"), - maxHeaderSize: 128 * 1024, }; const https = require("https"); global.server = https.createServer( diff --git a/netwerk/test/unit/head_cache.js b/netwerk/test/unit/head_cache.js @@ -4,7 +4,7 @@ var { XPCOMUtils } = ChromeUtils.importESModule( "resource://gre/modules/XPCOMUtils.sys.mjs" ); -function evict_cache_entries(where, lci = null) { +function evict_cache_entries(where) { var clearDisk = !where || where == "disk" || where == "all"; var clearMem = !where || where == "memory" || where == "all"; @@ -12,14 +12,14 @@ function evict_cache_entries(where, lci = null) { if (clearMem) { storage = Services.cache2.memoryCacheStorage( - lci ? lci : Services.loadContextInfo.default + Services.loadContextInfo.default ); storage.asyncEvictStorage(null); } if (clearDisk) { storage = Services.cache2.diskCacheStorage( - lci ? lci : Services.loadContextInfo.default + Services.loadContextInfo.default ); storage.asyncEvictStorage(null); } @@ -113,28 +113,15 @@ function get_device_entry_count(where, lci, continuation) { storage.asyncVisitStorage(visitor, false); } -function asyncCheckCacheEntryPresence( - key, - where, - shouldExist, - lci, - continuation -) { +function asyncCheckCacheEntryPresence(key, where, shouldExist, continuation) { asyncOpenCacheEntry( key, where, Ci.nsICacheStorage.OPEN_READONLY, - lci, + null, function (status, entry) { if (shouldExist) { - dump( - "TEST-INFO | status: " + - status + - " checking cache key " + - key + - " exists @ " + - where - ); + dump("TEST-INFO | checking cache key " + key + " exists @ " + where); Assert.equal(status, Cr.NS_OK); Assert.ok(!!entry); } else { diff --git a/netwerk/test/unit/test_cache2_compression_dictionary.js b/netwerk/test/unit/test_cache2_compression_dictionary.js @@ -1,132 +0,0 @@ -/* - * Tests for cache2 Compression Dictionary support (draft-ietf-httpbis-compression-dictionary-19) - * - Storing dictionaries via Use-As-Dictionary - * - Using Available-Dictionary for decompression - */ - -"use strict"; - -// Load cache helpers -Services.scriptloader.loadSubScript("resource://test/head_cache.js", this); - -const { NodeHTTPSServer } = ChromeUtils.importESModule( - "resource://testing-common/NodeServer.sys.mjs" -); - -var server = null; -// Keep these in sync with duplicates below! -const dictContent = "DICTIONARY_DATA"; -const decompressedContent = "COMPRESSED_DATA"; -const resourcePath = "/resource"; -const dictPath = "/dict"; - -function makeChan(url) { - let chan = NetUtil.newChannel({ - uri: url, - loadUsingSystemPrincipal: true, - contentPolicyType: Ci.nsIContentPolicy.TYPE_DOCUMENT, - }).QueryInterface(Ci.nsIHttpChannel); - return chan; -} - -function channelOpenPromise(chan) { - return new Promise(resolve => { - function finish(req, buffer) { - resolve([req, buffer]); - } - chan.asyncOpen(new ChannelListener(finish, null, CL_ALLOW_UNKNOWN_CL)); - }); -} - -// Serve a dictionary with Use-As-Dictionary header -function serveDictionary(request, response) { - // the server can't see the global versions of these. - // Note: keep in sync with above! - let dict = "dict1"; - const dictContent = "DICTIONARY_DATA"; - response.writeHead(200, { - "Content-Type": "application/octet-stream", - "Use-As-Dictionary": `match=\"*\", id=\"${dict}\", type=raw`, - "Cache-Control": "max-age=3600", - }); - response.end(dictContent, "binary"); -} - -// Serve a resource with Available-Dictionary header -function serveCompressedResource(request, response) { - // brotli compressed data is 4 byte magic + 32-byte SHA-256 hash (which we - // don't check) - const compressedContent = - "\xff\x44\x43\x42" + - "12345678901234567890123456789012" + - "\x21\x38\x00\x04COMPRESSED_DATA\x03"; - let availDict = request.headers["available-dictionary"]; - if (availDict != undefined) { - response.writeHead(200, { - "Content-Type": "application/octet-stream", - "Content-Encoding": "dcb", - }); - response.end(compressedContent, "binary"); - } else { - response.writeHead(200, { - "Content-Type": "application/octet-stream", - }); - response.end("UNCOMPRESSED_DATA", "binary"); - } -} - -add_setup(async function () { - if (!server) { - server = new NodeHTTPSServer(); - await server.start(); - registerCleanupFunction(async () => { - await server.stop(); - }); - - await server.registerPathHandler(dictPath, serveDictionary); - await server.registerPathHandler(resourcePath, serveCompressedResource); - } -}); - -add_task(async function test_resource_without_dictionary() { - let uri = `${server.origin()}${resourcePath}`; - let chan = makeChan(uri); - let [, data] = await channelOpenPromise(chan); - Assert.equal(data, "UNCOMPRESSED_DATA", "Received uncompressed data"); -}); - -add_task(async function test_store_dictionary() { - let uri = `${server.origin()}${dictPath}`; - let chan = makeChan(uri); - let [, data] = await channelOpenPromise(chan); - Assert.equal(data, dictContent, "Dictionary body matches"); - - await new Promise(resolve => { - // Check that dictionary is stored in cache (disk) - let lci = Services.loadContextInfo.custom(false, { - partitionKey: `(https,localhost)`, - }); - asyncCheckCacheEntryPresence(uri, "disk", true, lci, resolve); - }); -}); - -add_task(async function test_use_dictionary_for_resource() { - let uri = `${server.origin()}${resourcePath}`; - - let chan = makeChan(uri); - let [req, data] = await channelOpenPromise(chan); - // Check for expected uncompressed content - Assert.strictEqual( - data, - decompressedContent, - "Received compressed data (decompression not supported in test)" - ); - // Check response headers - Assert.equal( - req.getResponseHeader("Content-Encoding"), - "", - "Content-Encoding dcb was removed" - ); - let availdict = req.getRequestHeader("available-dictionary"); - Assert.equal(availdict, ":iFRBfhN7ePMquH3Lmw/oL4xRkaa8QjW43JQO+04KA7I=:"); -}); diff --git a/netwerk/test/unit/test_dictionary_compression_dcb.js b/netwerk/test/unit/test_dictionary_compression_dcb.js @@ -1,1113 +0,0 @@ -/** - * Tests for HTTP Compression Dictionary Brotli (dcb) compression functionality - * - Dictionary-based Brotli compression and decompression - * - Content integrity verification with dcb encoding - * - Available-Dictionary header integration for compression - * - Error handling for missing/invalid dictionaries - * - Compression window size limits and edge cases - */ - -"use strict"; - -// Load cache helpers -Services.scriptloader.loadSubScript("resource://test/head_cache.js", this); - -const { NodeHTTPSServer } = ChromeUtils.importESModule( - "resource://testing-common/NodeServer.sys.mjs" -); - -// Test dictionaries optimized for compression testing -// Since we're not actually brotli-encoding, all decodes will yield 15 bytes -const DCB_TEST_DICTIONARIES = { - html_common: { - id: "html-dict", - content: - '<html><head><title>Common HTML Template</title></head><body><div class="container"><p>', - expected_length: 15, - pattern: "*.html", - type: "raw", - }, - html_common_no_dictionary: { - id: "html-dict", - content: - '<html><head><title>Common HTML Template</title></head><body><div class="container"><p>', - expected_length: 196, - pattern: "*.html", - type: "raw", - }, - api_json: { - id: "api-dict", - content: - '{"status":"success","data":{"id":null,"name":"","created_at":"","updated_at":""},"message":"","errors":[]}', - expected_length: 15, - pattern: "/api/*", - type: "raw", - }, - api_v1: { - id: "longer-match-dict", - content: - '{"status":"success","data":{"id":null,"name":"","created_at":"","updated_at":""},"message":"","errors":[]}', - expected_length: 15, - pattern: "/api/v1/*", - type: "raw", - }, - js_common: { - id: "js-dict", - content: - "function(){return this;};var=function();const=function();let=function();", - expected_length: 15, - pattern: "*.js", - type: "raw", - }, - large_dict: { - id: "large-dict", - content: "REPEATED_PATTERN_".repeat(1000), // ~1.5MB dictionary - expected_length: 15, - pattern: "/large/*", - type: "raw", - }, -}; - -// Test content designed to compress well with dictionaries -const DCB_TEST_CONTENT = { - html_page: - '<html><head><title>Test Page</title></head><body><div class="container"><p>This is test content that should compress well with the HTML dictionary.</p><p>More content here.</p></div></body></html>', - - api_response: - '{"status":"success","data":{"id":12345,"name":"Test User","created_at":"2024-01-01T00:00:00Z","updated_at":"2024-01-02T12:00:00Z"},"message":"User retrieved successfully","errors":[]}', - - api_v1: - '{"status":"success","data":{"id":12345,"name":"Test User","created_at":"2024-01-01T00:00:00Z","updated_at":"2024-01-02T12:00:00Z"},"message":"User retrieved successfully","errors":[]}', - - js_code: - 'function testFunction(){return this.value;};var result=function(){console.log("test");};const API_URL=function(){return "https://api.example.com";};let userData=function(){return {id:1,name:"test"};}', - - large_content: "REPEATED_PATTERN_DATA_CHUNK_".repeat(50000), // Content that will compress well with large dictionary - - jpeg: "ARBITRARY_DATA_".repeat(1000), -}; - -let server = null; -let requestLog = []; // Track requests for verification - -// Create channel for dictionary requests -function makeChan(url) { - let chan = NetUtil.newChannel({ - uri: url, - loadUsingSystemPrincipal: true, - contentPolicyType: Ci.nsIContentPolicy.TYPE_DOCUMENT, - }).QueryInterface(Ci.nsIHttpChannel); - return chan; -} - -function channelOpenPromise(chan) { - return new Promise(resolve => { - function finish(req, buffer) { - resolve([req, buffer]); - } - // CL_EXPECT_GZIP is needed if we're transferring compressed data; else it asserts content-length - // equals the data length. (We could also not send content-length) - chan.asyncOpen( - new ChannelListener( - finish, - null, - CL_ALLOW_UNKNOWN_CL | CL_IGNORE_DELAYS | CL_EXPECT_GZIP - ) - ); - }); -} - -// Setup DCB test server with dictionaries and compressed content endpoints -async function setupDCBTestServer() { - let httpServer = new NodeHTTPSServer(); - await httpServer.start(); - - // Dictionary endpoints - store dictionaries for later compression use - await httpServer.registerPathHandler( - "/dict/html", - function (request, response) { - const DCB_TEST_DICTIONARIES = { - html_common: { - id: "html-dict", - content: - '<html><head><title>Common HTML Template</title></head><body><div class="container"><p>', - pattern: "*.html", - type: "raw", - }, - }; - let dict = DCB_TEST_DICTIONARIES.html_common; - response.writeHead(200, { - "Content-Type": "application/octet-stream", - "Use-As-Dictionary": `match="${dict.pattern}", id="${dict.id}", type=${dict.type}`, - "Cache-Control": "max-age=3600", - }); - response.end(dict.content, "binary"); - } - ); - - await httpServer.registerPathHandler( - "/dict/api", - function (request, response) { - const DCB_TEST_DICTIONARIES = { - api_json: { - id: "api-dict", - content: - '{"status":"success","data":{"id":null,"name":"","created_at":"","updated_at":""},"message":"","errors":[]}', - pattern: "/api/*", - type: "raw", - }, - }; - let dict = DCB_TEST_DICTIONARIES.api_json; - response.writeHead(200, { - "Content-Type": "application/octet-stream", - "Use-As-Dictionary": `match="${dict.pattern}", id="${dict.id}", type=${dict.type}`, - "Cache-Control": "max-age=3600", - }); - response.end(dict.content, "binary"); - } - ); - - await httpServer.registerPathHandler( - "/dict/js", - function (request, response) { - const DCB_TEST_DICTIONARIES = { - js_common: { - id: "js-dict", - content: - "function(){return this;};var=function();const=function();let=function();", - pattern: "*.js", - type: "raw", - }, - }; - let dict = DCB_TEST_DICTIONARIES.js_common; - response.writeHead(200, { - "Content-Type": "application/octet-stream", - "Use-As-Dictionary": `match="${dict.pattern}", id="${dict.id}", type=${dict.type}`, - "Cache-Control": "max-age=3600", - }); - response.end(dict.content, "binary"); - } - ); - - await httpServer.registerPathHandler( - "/dict/large", - function (request, response) { - const DCB_TEST_DICTIONARIES = { - large_dict: { - id: "large-dict", - content: "REPEATED_PATTERN_".repeat(1000), // ~1.5MB dictionary - pattern: "/large/*", - type: "raw", - }, - }; - let dict = DCB_TEST_DICTIONARIES.large_dict; - response.writeHead(200, { - "Content-Type": "application/octet-stream", - "Use-As-Dictionary": `match="${dict.pattern}", id="${dict.id}", type=${dict.type}`, - "Cache-Control": "max-age=3600", - }); - response.end(dict.content, "binary"); - } - ); - - // Basic dictionary with valid Use-As-Dictionary header - await httpServer.registerPathHandler( - "/dict/basic", - function (request, response) { - const TEST_DICTIONARIES = { - basic: { - id: "basic-dict", - content: "BASIC_DICTIONARY_DATA", - pattern: "/api/*", - type: "raw", - }, - }; - let dict = TEST_DICTIONARIES.basic; - response.writeHead(200, { - "Content-Type": "application/octet-stream", - "Use-As-Dictionary": `match="${dict.pattern}", id="${dict.id}", type=${dict.type}`, - "Cache-Control": "max-age=3600", - }); - response.end(dict.content, "binary"); - } - ); - - // Dictionary with longer match value - await httpServer.registerPathHandler( - "/dict/longer", - function (request, response) { - const TEST_DICTIONARIES = { - specific: { - id: "longer-match-dict", - content: - '{"status":"success","data":{"id":null,"name":"","created_at":"","updated_at":""},"message":"","errors":[]}', - pattern: "/api/v1/*", - type: "raw", - }, - }; - let dict = TEST_DICTIONARIES.specific; - response.writeHead(200, { - "Content-Type": "application/octet-stream", - "Use-As-Dictionary": `match="${dict.pattern}", id="${dict.id}", type=${dict.type}`, - "Cache-Control": "max-age=3600", - }); - response.end(dict.content, "binary"); - } - ); - - registerCleanupFunction(async () => { - try { - await httpServer.stop(); - } catch (e) { - // Ignore server stop errors during cleanup - } - }); - - return httpServer; -} - -async function sync_to_server() { - if (server.processId) { - await server.execute(`global.requestLog = ${JSON.stringify(requestLog)};`); - } else { - dump("Server not running?\n"); - } -} - -async function sync_from_server() { - if (server.processId) { - dump(`*** requestLog: ${JSON.stringify(requestLog)}\n`); - requestLog = await server.execute(`global.requestLog`); - } else { - dump("Server not running? (from)\n"); - } -} - -// Calculate expected SHA-256 hash for dictionary content -async function calculateDictionaryHash(content) { - const encoded = new TextEncoder().encode(content); - const digest = await crypto.subtle.digest("SHA-256", encoded); - return btoa(String.fromCharCode(...new Uint8Array(digest))); // base64 -} - -// Verify dcb decompression result -function verifyDCBResponse(channel, data, dictionary) { - // XXX verify decoded content once we use real Brotli encoding - Assert.equal(data.length, dictionary.expected_length); - - try { - // Note: since we remove dcb encoding in the parent process, we can't see - // it in Content-Encoding here - var contentEncoding; - channel.getOriginalResponseHeader("Content-Encoding", { - visitHeader: function visitOrg(aName, aValue) { - contentEncoding = aValue; - }, - }); - Assert.equal; - if (contentEncoding === "dcb") { - return true; - } - } catch (e) { - // Content-Encoding header not present or not dcb - } - return false; -} - -// Setup dcb-aware server endpoint -async function registerDCBEndpoint( - httpServer, - path, - dictionary, - content, - shouldCompress = true -) { - // We have to put all values and functions referenced in the handler into - // this string which will be turned into a function for the handler, because - // NodeHTTPSServer handlers can't access items in the local or global scopes of the - // containing file - let func = ` - let path = "${path}"; - let dictionary = ${JSON.stringify(dictionary)}; - let content = '${content}'; - let shouldCompress = ${shouldCompress}; - - let availableDict = ""; - let hasDictHeader = false; - - // Get content type based on file path - function getContentTypeForPath(path) { - if (path.endsWith('.html')) return 'text/html; charset=utf-8'; - if (path.endsWith('.js')) return 'application/javascript'; - if (path.includes('/api/')) return 'application/json'; - return 'text/plain; charset=utf-8'; - } - - // Calculate compression ratio - function calculateCompressionRatio(original, compressed) { - if (typeof original === 'string') original = original.length; - if (typeof compressed === 'string') compressed = compressed.length; - return original / compressed; - } - - // Simulate dcb compression (for server responses) - function simulateDCBCompression(content, dictionary) { - // Note: Real implementation would use actual Brotli compression - // For testing, we simulate with compression markers and realistic size reduction - let simulatedCompressedSize = Math.floor(content.length * 0.4); // Simulate 60% savings - // This needs to be something that the brotli decoder will correctly read, even though this - // will produce the wrong output - let compressedData = "\x21\x38\x00\x04COMPRESSED_DATA\x03"; - - return { - compressedData: "\xff\x44\x43\x42" + "12345678901234567890123456789012" + compressedData, - originalSize: content.length, - compressedSize: compressedData.length + 36, - compressionRatio: calculateCompressionRatio(content.length, simulatedCompressedSize + 36) - }; - } - - if (request.headers && request.headers['available-dictionary']) { - availableDict = request.headers['available-dictionary']; - hasDictHeader = true; - } else { - shouldCompress = false; - } - - // Log the request for analysis - global.requestLog[global.requestLog.length] = { - path: path, - hasAvailableDict: hasDictHeader, - availableDict: availableDict, - method: request.method - }; - - if (shouldCompress && hasDictHeader && availableDict.includes(dictionary.hash)) { - // Simulate dcb compression - let compressed = simulateDCBCompression(content, dictionary); - response.writeHead(200, { - "Content-Encoding": "dcb", - "Content-Type": getContentTypeForPath(path), - "Content-Length": compressed.compressedSize.toString(), - }); - - // In a real implementation, this would be actual compressed brotli data - // For testing, we simulate the compressed response - - // Note: these aren't real dictionaries; we've prepended a dummy header - // to pass the requirements for a Brotli dictionary - 4 byte magic number - // plus 32 bytes of hash (which we don't currently check, nor does Brotli). - response.end(compressed.compressedData, "binary"); - } else { - // Serve uncompressed - response.writeHead(200, { - "Content-Type": getContentTypeForPath(path), - "Content-Length": content.length, - }); - response.end(content, "binary"); - } - `; - let handler = new Function("request", "response", func); - return httpServer.registerPathHandler(path, handler); -} - -// Verify dictionary is stored in cache (reused from previous tests) -function verifyDictionaryStored(url, shouldExist, callback) { - let lci = Services.loadContextInfo.custom(false, { - partitionKey: `(https,localhost)`, - }); - asyncCheckCacheEntryPresence(url, "disk", shouldExist, lci, callback); -} - -async function setupDicts() { - requestLog = []; - await sync_to_server(); - - // Store all test dictionaries and calculate their hashes - const dictPaths = [ - "/dict/html", - "/dict/api", - "/dict/js", - "/dict/large", - "/dict/longer", - ]; - const dictKeys = [ - "html_common", - "api_json", - "js_common", - "large_dict", - "api_v1", - ]; - - for (let i = 0; i < dictPaths.length; i++) { - let path = dictPaths[i]; - let dictKey = dictKeys[i]; - let url = `${server.origin()}${path}`; - dump( - `registering dictionary ${path} for match patter ${DCB_TEST_DICTIONARIES[dictKey].patterh}\n` - ); - - let chan = makeChan(url); - let [, data] = await channelOpenPromise(chan); - // Calculate and store hash for later use - DCB_TEST_DICTIONARIES[dictKey].hash = - ":" + - (await calculateDictionaryHash(DCB_TEST_DICTIONARIES[dictKey].content)) + - ":"; - - // Verify dictionary content matches - Assert.equal( - data, - DCB_TEST_DICTIONARIES[dictKey].content, - `Dictionary content matches` - ); - - // Verify dictionary was stored - await new Promise(resolve => { - verifyDictionaryStored(url, true, resolve); - }); - } - - dump(`**** DCB test setup complete. Dictionaries stored with hashes.\n`); -} - -add_setup(async function () { - if (!server) { - server = await setupDCBTestServer(); - } - // Setup baseline dictionaries for compression testing - - // Clear any existing cache - let lci = Services.loadContextInfo.custom(false, { - partitionKey: `(https,localhost)`, - }); - evict_cache_entries("all", lci); - - await setupDicts(); -}); - -// Test basic dictionary-compressed Brotli functionality -add_task(async function test_basic_dcb_compression() { - dump("**** test_basic_dcb_compression\n"); - requestLog = []; - await sync_to_server(); - - // Setup DCB endpoint for HTML content - let dict = DCB_TEST_DICTIONARIES.html_common; - let content = DCB_TEST_CONTENT.html_page; - await registerDCBEndpoint(server, "/test.html", dict, content, true); - - let url = `${server.origin()}/test.html`; - let chan = makeChan(url); - let [request, data] = await channelOpenPromise(chan); - - // Check if DCB compression was used - let usedDCB = verifyDCBResponse( - request.QueryInterface(Ci.nsIHttpChannel), - data, - dict - ); - Assert.ok(usedDCB, "DCB compression should be used"); -}); - -// Test correct dictionary selection for dcb compression -add_task(async function test_dcb_dictionary_selection() { - requestLog = []; - await sync_to_server(); - - dump("**** Testing DCB dictionary selection\n"); - - // Test specific pattern matching for dictionary selection - let htmlDict = DCB_TEST_DICTIONARIES.html_common; - let apiDict = DCB_TEST_DICTIONARIES.api_json; - - // Register endpoints that should match different dictionaries - await registerDCBEndpoint( - server, - "/specific-test.html", - htmlDict, - DCB_TEST_CONTENT.html_page, - true - ); - await registerDCBEndpoint( - server, - "/api/specific-test", - apiDict, - DCB_TEST_CONTENT.api_response, - true - ); - - // Test HTML dictionary selection - let htmlUrl = `${server.origin()}/specific-test.html`; - let htmlChan = makeChan(htmlUrl); - let [, htmlData] = await channelOpenPromise(htmlChan); - - Assert.greater( - htmlData.length, - 0, - "HTML dictionary selection test should have content" - ); - - // Check if correct dictionary was used - await sync_from_server(); - let htmlLogEntry = requestLog.find( - entry => entry.path === "/specific-test.html" - ); - Assert.ok( - htmlLogEntry && htmlLogEntry.hasAvailableDict, - "Dictionary selection test: HTML endpoint received Available-Dictionary header" - ); - - // Test API dictionary selection - let apiUrl = `${server.origin()}/api/specific-test`; - let apiChan = makeChan(apiUrl); - let [, apiData] = await channelOpenPromise(apiChan); - - Assert.greater( - apiData.length, - 0, - "API dictionary selection test should have content" - ); - - // Check if correct dictionary was used - await sync_from_server(); - let apiLogEntry = requestLog.find( - entry => entry.path === "/api/specific-test" - ); - Assert.ok( - apiLogEntry && apiLogEntry.hasAvailableDict, - "Dictionary selection test: API endpoint received Available-Dictionary header" - ); -}); - -// Test behavior when dictionary is missing/unavailable -add_task(async function test_dcb_missing_dictionary() { - requestLog = []; - await sync_to_server(); - - dump("**** Testing DCB missing dictionary\n"); - - // Create a fake dictionary that won't be found - let fakeDict = { - id: "missing-dict", - hash: "fake_hash_that_does_not_exist", - content: "This dictionary was not stored", - expected_length: DCB_TEST_CONTENT.jpeg.length, - }; - - // *.jpeg Doesn't match any of the patterns in DCB_TEST_DICTIONARIES - await registerDCBEndpoint( - server, - "/missing-dict-test.jpeg", - fakeDict, - DCB_TEST_CONTENT.jpeg, - false - ); - - let url = `${server.origin()}/missing-dict-test.jpeg`; - let chan = makeChan(url); - let [request, data] = await channelOpenPromise(chan); - - // Should get uncompressed content when dictionary is missing - Assert.greater( - data.length, - 0, - "Missing dictionary test should still return content" - ); - - // Verify no dcb compression was applied - let usedDCB = verifyDCBResponse( - request.QueryInterface(Ci.nsIHttpChannel), - data, - fakeDict - ); - Assert.ok(!usedDCB, "We should not get DCB encoding for a fake item"); -}); - -// Test IETF spec compliance for dcb encoding -add_task(async function test_dcb_header_compliance() { - requestLog = []; - await sync_to_server(); - - dump("**** Testing DCB header compliance\n"); - - let dict = DCB_TEST_DICTIONARIES.api_json; - await registerDCBEndpoint( - server, - "/api/compliance-test", - dict, - DCB_TEST_CONTENT.api_response, - true - ); - - let url = `${server.origin()}/api/compliance-test`; - let chan = makeChan(url); - let [request, data] = await channelOpenPromise(chan); - - Assert.greater(data.length, 0, "IETF compliance test should have content"); - - let httpChannel = request.QueryInterface(Ci.nsIHttpChannel); - - // Verify proper Content-Type preservation - try { - let contentType = httpChannel.getResponseHeader("Content-Type"); - Assert.ok( - contentType.includes("application/json"), - "Content-Type should be preserved through compression" - ); - } catch (e) { - Assert.ok(false, "Content-Type header should be present"); - } - - // Check for proper dcb handling - await sync_from_server(); - let logEntry = requestLog.find( - entry => entry.path === "/api/compliance-test" - ); - Assert.ok( - logEntry && logEntry.hasAvailableDict, - "Must have available-dictionary in header compliance" - ); - // Verify Available-Dictionary follows IETF Structured Field Byte-Sequence format - // According to RFC 8941, byte sequences are enclosed in colons: :base64data: - let availableDict = logEntry.availableDict; - Assert.ok( - availableDict.startsWith(":"), - "Available-Dictionary should start with ':' (IETF Structured Field Byte-Sequence format)" - ); - Assert.ok( - availableDict.endsWith(":"), - "Available-Dictionary should end with ':' (IETF Structured Field Byte-Sequence format)" - ); - Assert.greater( - availableDict.length, - 2, - "Available-Dictionary should contain base64 data between colons" - ); - - // Extract the base64 content between the colons - let base64Content = availableDict.slice(1, -1); - Assert.greater( - base64Content.length, - 0, - "Available-Dictionary should have base64 content" - ); - - // Basic validation that it looks like base64 (contains valid base64 characters) - let base64Regex = /^[A-Za-z0-9+/]*={0,2}$/; - Assert.ok( - base64Regex.test(base64Content), - "Available-Dictionary content should be valid base64" - ); - - dump(`**** IETF compliance test: Available-Dictionary = ${availableDict}\n`); -}); - -// Test that DCB compression stops working after dictionary cache eviction -add_task(async function test_dcb_compression_after_cache_eviction() { - requestLog = []; - await sync_to_server(); - - dump("**** Testing DCB compression after cache eviction\n"); - - // Use a specific dictionary for this test - let dict = DCB_TEST_DICTIONARIES.html_common; - let dict2 = DCB_TEST_DICTIONARIES.html_common_no_dictionary; - let testContent = DCB_TEST_CONTENT.html_page; - let testPath = "/cache-eviction-test.html"; - let dictUrl = `${server.origin()}/dict/html`; - let contentUrl = `${server.origin()}${testPath}`; - - // Step 1: Ensure dictionary is in cache by fetching it - dump("**** Step 1: Loading dictionary into cache\n"); - let dictChan = makeChan(dictUrl); - let [, dictData] = await channelOpenPromise(dictChan); - Assert.equal(dictData, dict.content, "Dictionary loaded successfully"); - - // Verify dictionary is cached - await new Promise(resolve => { - verifyDictionaryStored(dictUrl, true, () => { - resolve(); - }); - }); - - // Step 2: Set up DCB endpoint and test compression works - dump("**** Step 2: Testing DCB compression with cached dictionary\n"); - await registerDCBEndpoint(server, testPath, dict, testContent, true); - - // Clear request log before testing - requestLog = []; - await sync_to_server(); - - let chan1 = makeChan(contentUrl); - let [req1, data1] = await channelOpenPromise(chan1); - - Assert.greater(data1.length, 0, "Should receive content before eviction"); - - // Check if DCB compression was used (should be true with cached dictionary) - let usedDCB1 = verifyDCBResponse( - req1.QueryInterface(Ci.nsIHttpChannel), - data1, - dict - ); - Assert.ok(usedDCB1, "DCB compression should be used"); - - // Step 3: Evict the dictionary from cache - dump("**** Step 3: Evicting dictionary from cache\n"); - - // Evict the dictionary cache entry - let lci = Services.loadContextInfo.custom(false, { - partitionKey: `(https,localhost)`, - }); - evict_cache_entries("all", lci); - // Force cache sync to ensure everything is written - await new Promise(resolve => { - syncWithCacheIOThread(resolve, true); - }); - - dump("**** Step 3.5: verify no longer cache\n"); - // Verify dictionary is no longer cached - await new Promise(resolve => { - verifyDictionaryStored(dictUrl, false, () => { - resolve(); - }); - }); - - // Step 4: Test that compression no longer works after eviction - dump("**** Step 4: Testing DCB compression after dictionary eviction\n"); - - let chan2 = makeChan(contentUrl); - let [req2, data2] = await channelOpenPromise(chan2); - - Assert.greater( - data2.length, - 0, - "Should still receive content after eviction" - ); - // Check if DCB compression was used (should be false without cached dictionary) - Assert.ok( - !verifyDCBResponse(req2.QueryInterface(Ci.nsIHttpChannel), data2, dict2), - "DCB compression should not be used without dictionary" - ); - - // XXX We can only check this if we actually brotli-compress the data - // Content should still be delivered in both cases, just not compressed in the second case - //Assert.equal(data1.length, data2.length, - // "Content length should be the same whether compressed or not (in our test simulation)"); - - dump("**** Cache eviction test completed successfully\n"); -}); - -// Test HTTP redirect (302) with dictionary-compressed content -add_task(async function test_dcb_with_http_redirect() { - await setupDicts(); - - dump("**** Testing HTTP redirect (302) with dictionary-compressed content\n"); - - let dict = DCB_TEST_DICTIONARIES.html_common; - let content = DCB_TEST_CONTENT.html_page; - await registerDCBEndpoint(server, "/test.html", dict, content, true); - let originalPath = "/redirect/original"; - let finalPath = "/test.html"; - let originalUrl = `${server.origin()}${originalPath}`; - let finalUrl = `${server.origin()}${finalPath}`; - - // Step 1: Set up redirect handler that returns 302 to final URL - let redirectFunc = ` - let finalPath = "${finalPath}"; - - // Log the request for analysis - global.requestLog[global.requestLog.length] = { - path: "${originalPath}", - method: request.method, - redirectTo: finalPath, - hasAvailableDict: !!request.headers['available-dictionary'], - availableDict: request.headers['available-dictionary'] || null - }; - - response.writeHead(302, { - "Location": finalPath, - "Cache-Control": "no-cache" - }); - response.end("Redirecting..."); - `; - let redirectHandler = new Function("request", "response", redirectFunc); - await server.registerPathHandler(originalPath, redirectHandler); - - // Step 2: Set up final endpoint with DCB compression capability - await registerDCBEndpoint(server, finalPath, dict, content, true); - - // Clear request log before testing - requestLog = []; - await sync_to_server(); - - // Step 3: Request the original URL that redirects to potentially DCB-compressed content - let chan = makeChan(originalUrl); - let [req, data] = await channelOpenPromise(chan); - - // Step 4: Verify redirect worked correctly - let finalUri = req.QueryInterface(Ci.nsIHttpChannel).URI.spec; - Assert.equal( - finalUri, - finalUrl, - "Final URI should match the redirected URL after 302 redirect" - ); - - // Verify we received some content - Assert.greater(data.length, 0, "Should receive content after redirect"); - - // Step 5: Check request log to verify both requests were logged - await sync_from_server(); - - // Should have two entries: redirect request and final request - let redirectEntry = requestLog.find(entry => entry.path === originalPath); - let finalEntry = requestLog.find(entry => entry.path === finalPath); - - Assert.ok(redirectEntry, "Redirect request should be logged"); - Assert.ok(finalEntry, "Final request should be logged"); - - // Step 6: Verify Available-Dictionary header handling - // Note: The redirect request may or may not have Available-Dictionary header depending on implementation - // The important thing is that the final request has it - if (redirectEntry.hasAvailableDict) { - dump(`**** Redirect request includes Available-Dictionary header\n`); - } else { - dump( - `**** Redirect request does not include Available-Dictionary header (expected)\n` - ); - } - - // Note: With redirects, Available-Dictionary headers may not be preserved - Assert.ok( - finalEntry.hasAvailableDict, - "Final request includes Available-Dictionary header" - ); - - // Available-Dictionary header should contain the dictionary hash for final request - Assert.ok( - finalEntry.availableDict.includes(dict.hash), - "Final request Available-Dictionary should contain correct dictionary hash" - ); - - // Step 7: Check if DCB compression was applied - Assert.ok( - verifyDCBResponse(req.QueryInterface(Ci.nsIHttpChannel), data, dict), - "DCB compression successfully applied after redirect" - ); -}); - -// Test invalid Use-As-Dictionary headers - missing match parameter -add_task(async function test_use_as_dictionary_invalid_missing_match() { - // Invalid dictionary headers - await server.registerPathHandler( - "/dict/invalid-missing-match", - function (request, response) { - response.writeHead(200, { - "Content-Type": "application/octet-stream", - "Use-As-Dictionary": `id="missing-match-dict", type=raw`, - "Cache-Control": "max-age=3600", - }); - response.end("INVALID_MISSING_MATCH_DATA", "binary"); - } - ); - let url = `${server.origin()}/dict/invalid-missing-match`; - let chan = makeChan(url); - let [req, data] = await channelOpenPromise(chan); - // Verify dictionary content matches - Assert.equal( - data, - "INVALID_MISSING_MATCH_DATA", - "Set up missing match dictionary" - ); - - let dict = DCB_TEST_DICTIONARIES.html_common_no_dictionary; - let content = DCB_TEST_CONTENT.html_page; - await registerDCBEndpoint( - server, - "/invalid/missing-match", - dict, - content, - true - ); - - url = `https://localhost:${server.port()}/invalid/missing-match`; - - chan = makeChan(url); - [req, data] = await channelOpenPromise(chan); - - Assert.equal(data, content, "Content received"); - - // Verify invalid header was not processed as dictionary - Assert.ok( - !verifyDCBResponse(req.QueryInterface(Ci.nsIHttpChannel), data, dict), - "DCB compression should not be used when dictionary has no match=" - ); - - // Invalid dictionary should not be processed as dictionary - dump("**** Missing match parameter test complete\n"); -}); - -// Test invalid Use-As-Dictionary headers - empty id parameter -add_task(async function test_use_as_dictionary_invalid_empty_id() { - await server.registerPathHandler( - "/dict/invalid-empty-id", - function (request, response) { - response.writeHead(200, { - "Content-Type": "application/octet-stream", - "Use-As-Dictionary": `match="/invalid/*", id="", type=raw`, - "Cache-Control": "max-age=3600", - }); - response.end("INVALID_EMPTY_ID_DATA", "binary"); - } - ); - let url = `${server.origin()}/dict/invalid-empty-id`; - let chan = makeChan(url); - let [req, data] = await channelOpenPromise(chan); - // Verify dictionary content matches - Assert.equal(data, "INVALID_EMPTY_ID_DATA", "Set up empty id dictionary"); - - let dict = DCB_TEST_DICTIONARIES.html_common_no_dictionary; - let content = DCB_TEST_CONTENT.html_page; - await registerDCBEndpoint(server, "/invalid/empty-id", dict, content, true); - - url = `https://localhost:${server.port()}/invalid/empty-id`; - - chan = makeChan(url); - [req, data] = await channelOpenPromise(chan); - - Assert.equal(data, content, "non-compressed content received"); - - Assert.ok( - !verifyDCBResponse(req.QueryInterface(Ci.nsIHttpChannel), data, dict), - "DCB compression should not be used with dictionary with empty id" - ); - dump("**** Empty id parameter test complete\n"); -}); - -// Test Available-Dictionary request header generation -add_task(async function test_available_dictionary_header_generation() { - let url = `https://localhost:${server.port()}/api/test`; - requestLog = []; - await sync_to_server(); - - // Calculate expected hash for basic dictionary - let expectedHashB64 = await calculateDictionaryHash( - DCB_TEST_DICTIONARIES.api_json.content - ); - - // Setup DCB endpoint for HTML content - let dict = DCB_TEST_DICTIONARIES.html_common; - let content = DCB_TEST_CONTENT.api_response; - await registerDCBEndpoint(server, "/api/test", dict, content, true); - - let chan = makeChan(url); - let [, data] = await channelOpenPromise(chan); - - Assert.equal(data, DCB_TEST_CONTENT.api_response, "Resource content matches"); - - // Check request log to see if Available-Dictionary header was sent - await sync_from_server(); - let logEntry = requestLog.find(entry => entry.path === "/api/test"); - Assert.ok( - logEntry && logEntry.availableDict != null, - "Available-Dictionary header should be present" - ); - if (logEntry && logEntry.availableDict != null) { - // Verify IETF Structured Field Byte-Sequence format - Assert.ok( - logEntry.availableDict.startsWith(":"), - "Available-Dictionary should start with ':' (IETF Structured Field format)" - ); - Assert.ok( - logEntry.availableDict.endsWith(":"), - "Available-Dictionary should end with ':' (IETF Structured Field format)" - ); - - // Verify base64 content - let base64Content = logEntry.availableDict.slice(1, -1); - let base64Regex = /^[A-Za-z0-9+/]*={0,2}$/; - Assert.ok( - base64Regex.test(base64Content), - "Available-Dictionary content should be valid base64" - ); - Assert.equal( - logEntry.availableDict, - ":" + expectedHashB64 + ":", - "Available-Dictionary has the right hash" - ); - } - dump("**** Available-Dictionary generation test complete\n"); -}); - -// Test Available-Dictionary header for specific pattern matching -add_task(async function test_available_dictionary_specific_patterns() { - let url = `https://localhost:${server.port()}/api/v1/test`; - requestLog = []; - await sync_to_server(); - - let dict = DCB_TEST_DICTIONARIES.api_v1; - let content = DCB_TEST_CONTENT.api_v1; - await registerDCBEndpoint(server, "/api/v1/test", dict, content, true); - - let chan = makeChan(url); - await channelOpenPromise(chan); - - // Check for Available-Dictionary header - await sync_from_server(); - let logEntry = requestLog.find(entry => entry.path === "/api/v1/test"); - Assert.ok( - logEntry && logEntry.availableDict != null, - "Available-Dictionary header should be present for /api/v1/*" - ); - - if (logEntry && logEntry.availableDict != null) { - // Should match both /api/v1/* (longer-dict) and /api/* (basic-dict) patterns - // It should always use the longer match, which would be /api/v1/* - Assert.equal( - logEntry.availableDict, - DCB_TEST_DICTIONARIES.api_v1.hash, - "Longer match pattern for a dictionary should be used" - ); - } - dump("**** Specific pattern matching test complete\n"); -}); - -// Test Available-Dictionary header absence for no matching patterns -add_task(async function test_available_dictionary_no_match() { - let url = `https://localhost:${server.port()}/nomatch/test`; - requestLog = []; - await sync_to_server(); - - let dict = DCB_TEST_DICTIONARIES.html_common; - let content = "NO MATCH TEST DATA"; - await registerDCBEndpoint(server, "/nomatch/test", dict, content, true); - - let chan = makeChan(url); - let [, data] = await channelOpenPromise(chan); - - Assert.equal(data, "NO MATCH TEST DATA", "No match content received"); - - // Check that no Available-Dictionary header was sent - await sync_from_server(); - let logEntry = requestLog.find(entry => entry.path === "/nomatch/test"); - Assert.ok(logEntry, "Request should be logged"); - - if (logEntry) { - Assert.equal( - logEntry.availableDict, - "", - "Available-Dictionary should be null for no match" - ); - } - dump("**** No match test complete\n"); -}); - -// Cleanup -add_task(async function cleanup() { - // Clear cache - let lci = Services.loadContextInfo.custom(false, { - partitionKey: `(https,localhost)`, - }); - evict_cache_entries("all", lci); - dump("**** DCB compression tests completed.\n"); -}); diff --git a/netwerk/test/unit/test_dictionary_retrieval.js b/netwerk/test/unit/test_dictionary_retrieval.js @@ -1,510 +0,0 @@ -/** - * Tests for HTTP Compression Dictionary retrieval functionality - * - Dictionary lookup by origin and pattern matching - * - Available-Dictionary header generation and formatting - * - Dictionary cache hit/miss scenarios - * - Dictionary precedence and selection logic - */ - -"use strict"; - -// Load cache helpers -Services.scriptloader.loadSubScript("resource://test/head_cache.js", this); - -const { NodeHTTPSServer } = ChromeUtils.importESModule( - "resource://testing-common/NodeServer.sys.mjs" -); - -// Test dictionaries with different patterns and priorities -const RETRIEVAL_TEST_DICTIONARIES = { - api_v1: { - id: "api-v1-dict", - content: "API_V1_COMMON_DATA", - pattern: "/api/v1/*", - type: "raw", - }, - api_generic: { - id: "api-generic-dict", - content: "API_GENERIC_DATA", - pattern: "/api/*", - type: "raw", - }, - wildcard: { - id: "wildcard-dict", - content: "WILDCARD_DATA", - pattern: "*", - type: "raw", - }, - js_files: { - id: "js-dict", - content: "JS_COMMON_CODE", - pattern: "*.js", - type: "raw", - }, -}; - -let server = null; -let requestLog = []; // Track requests for verification - -async function sync_to_server() { - if (server.processId) { - await server.execute(`global.requestLog = ${JSON.stringify(requestLog)};`); - } else { - dump("Server not running?\n"); - } -} - -async function sync_from_server() { - if (server.processId) { - requestLog = await server.execute(`global.requestLog`); - } else { - dump("Server not running? (from)\n"); - } -} - -add_setup(async function () { - if (!server) { - server = await setupServer(); - } - // Setup baseline dictionaries for compression testing - - // Clear any existing cache - let lci = Services.loadContextInfo.custom(false, { - partitionKey: `(https,localhost)`, - }); - evict_cache_entries("all", lci); -}); - -// Calculate expected SHA-256 hash for dictionary content -async function calculateDictionaryHash(content) { - let hasher = Cc["@mozilla.org/security/hash;1"].createInstance( - Ci.nsICryptoHash - ); - hasher.init(Ci.nsICryptoHash.SHA256); - let bytes = new TextEncoder().encode(content); - hasher.update(bytes, bytes.length); - let hash = hasher.finish(false); - return btoa(hash); // Convert to base64 -} - -// Setup dictionary test server -async function setupServer() { - if (!server) { - server = new NodeHTTPSServer(); - await server.start(); - - registerCleanupFunction(async () => { - try { - await server.stop(); - } catch (e) { - // Ignore server stop errors during cleanup - } - }); - } - return server; -} - -// Create channel for dictionary requests -function makeChan(url) { - let chan = NetUtil.newChannel({ - uri: url, - loadUsingSystemPrincipal: true, - contentPolicyType: Ci.nsIContentPolicy.TYPE_DOCUMENT, - }).QueryInterface(Ci.nsIHttpChannel); - return chan; -} - -function channelOpenPromise(chan) { - return new Promise(resolve => { - function finish(req, buffer) { - resolve([req, buffer]); - } - chan.asyncOpen(new ChannelListener(finish, null, CL_ALLOW_UNKNOWN_CL)); - }); -} - -// Verify dictionary is stored in cache -function verifyDictionaryStored(url, shouldExist, callback) { - let lci = Services.loadContextInfo.custom(false, { - partitionKey: `(https,localhost)`, - }); - asyncCheckCacheEntryPresence(url, "disk", shouldExist, lci, callback); -} - -// Setup server endpoint that expects specific dictionary headers -async function registerDictionaryAwareEndpoint( - httpServer, - path, - responseContent -) { - // We have to put all values and functions referenced in the handler into - // this string which will be turned into a function for the handler, because - // NodeHTTPSServer handlers can't access items in the local or global scopes of the - // containing file - let func = ` - // Log the request for analysis - global.requestLog[global.requestLog.length] = { - path: "${path}", - hasAvailableDict: request.headers['available-dictionary'] !== undefined, - availableDict: request.headers['available-dictionary'] || null - }; - - response.writeHead(200, { - "Content-Type": "text/plain", - }); - response.end("${responseContent}", "binary"); -`; - let handler = new Function("request", "response", func); - return httpServer.registerPathHandler(path, handler); -} - -// Setup retrieval test server with dictionaries and resources -async function setupRetrievalTestServer() { - await setupServer(); - - // Dictionary endpoints - store dictionaries with different patterns - await server.registerPathHandler( - "/dict/api-v1", - function (request, response) { - const RETRIEVAL_TEST_DICTIONARIES = { - api_v1: { - id: "api-v1-dict", - content: "API_V1_COMMON_DATA", - pattern: "/api/v1/*", - type: "raw", - }, - }; - let dict = RETRIEVAL_TEST_DICTIONARIES.api_v1; - response.writeHead(200, { - "Content-Type": "application/octet-stream", - "Use-As-Dictionary": `match="${dict.pattern}", id="${dict.id}", type=${dict.type}`, - "Cache-Control": "max-age=3600", - }); - response.end(dict.content, "binary"); - } - ); - - await server.registerPathHandler( - "/dict/api-generic", - function (request, response) { - const RETRIEVAL_TEST_DICTIONARIES = { - api_generic: { - id: "api-generic-dict", - content: "API_GENERIC_DATA", - pattern: "/api/*", - type: "raw", - }, - }; - let dict = RETRIEVAL_TEST_DICTIONARIES.api_generic; - response.writeHead(200, { - "Content-Type": "application/octet-stream", - "Use-As-Dictionary": `match="${dict.pattern}", id="${dict.id}", type=${dict.type}`, - "Cache-Control": "max-age=3600", - }); - response.end(dict.content, "binary"); - } - ); - - await server.registerPathHandler( - "/dict/wildcard", - function (request, response) { - const RETRIEVAL_TEST_DICTIONARIES = { - wildcard: { - id: "wildcard-dict", - content: "WILDCARD_DATA", - pattern: "*", - type: "raw", - }, - }; - let dict = RETRIEVAL_TEST_DICTIONARIES.wildcard; - response.writeHead(200, { - "Content-Type": "application/octet-stream", - "Use-As-Dictionary": `match="${dict.pattern}", id="${dict.id}", type=${dict.type}`, - "Cache-Control": "max-age=3600", - }); - response.end(dict.content, "binary"); - } - ); - - await server.registerPathHandler("/dict/js", function (request, response) { - const RETRIEVAL_TEST_DICTIONARIES = { - js_files: { - id: "js-dict", - content: "JS_COMMON_CODE", - pattern: "*.js", - type: "raw", - }, - }; - let dict = RETRIEVAL_TEST_DICTIONARIES.js_files; - response.writeHead(200, { - "Content-Type": "application/octet-stream", - "Use-As-Dictionary": `match="${dict.pattern}", id="${dict.id}", type=${dict.type}`, - "Cache-Control": "max-age=3600", - }); - response.end(dict.content, "binary"); - }); - - // Resource endpoints that should trigger dictionary retrieval - await registerDictionaryAwareEndpoint( - server, - "/api/v1/users", - "API V1 USERS DATA" - ); - await registerDictionaryAwareEndpoint( - server, - "/api/v2/posts", - "API V2 POSTS DATA" - ); - await registerDictionaryAwareEndpoint( - server, - "/api/generic", - "GENERIC API DATA" - ); - await registerDictionaryAwareEndpoint(server, "/web/page", "WEB PAGE DATA"); - await registerDictionaryAwareEndpoint( - server, - "/scripts/app.js", - "JAVASCRIPT CODE" - ); - await registerDictionaryAwareEndpoint( - server, - "/styles/main.css", - "CSS STYLES" - ); - - return server; -} - -// Setup baseline dictionaries for retrieval testing -add_task(async function test_setup_dictionaries() { - await setupRetrievalTestServer(); - - // Clear any existing cache - let lci = Services.loadContextInfo.custom(false, { - partitionKey: `(https,localhost)`, - }); - evict_cache_entries("all", lci); - requestLog = []; - await sync_to_server(); - - // Store all test dictionaries - const dictPaths = [ - "/dict/api-v1", - "/dict/api-generic", - "/dict/wildcard", - "/dict/js", - ]; - - for (let path of dictPaths) { - let url = `https://localhost:${server.port()}${path}`; - - let chan = makeChan(url); - let [, data] = await channelOpenPromise(chan); - dump(`**** Dictionary loaded: ${path}, data length: ${data.length}\n`); - - // Verify dictionary was stored - await new Promise(resolve => { - verifyDictionaryStored(url, true, resolve); - }); - } - dump("**** Setup complete\n"); -}); - -// Test basic dictionary lookup and Available-Dictionary header generation -add_task(async function test_basic_dictionary_retrieval() { - let url = `https://localhost:${server.port()}/api/v1/users`; - requestLog = []; - await sync_to_server(); - - // Calculate expected hash for api_v1 dictionary - let expectedHash = await calculateDictionaryHash( - RETRIEVAL_TEST_DICTIONARIES.api_v1.content - ); - - let chan = makeChan(url); - let [, data] = await channelOpenPromise(chan); - - Assert.equal(data, "API V1 USERS DATA", "Resource content matches"); - - // Check request log to see if Available-Dictionary header was sent - await sync_from_server(); - let logEntry = requestLog.find(entry => entry.path === "/api/v1/users"); - Assert.ok(logEntry && logEntry.hasAvailableDict, "Has Available-Dictionary"); - Assert.ok( - logEntry.availableDict.includes(expectedHash), - "Available-Dictionary header should contain expected hash" - ); - dump("**** Basic retrieval test complete\n"); -}); - -// Test URL pattern matching logic for dictionary selection -add_task(async function test_dictionary_pattern_matching() { - const patternMatchTests = [ - { url: "/api/v1/users", expectedPattern: "/api/v1/*", dictKey: "api_v1" }, - { url: "/api/v2/posts", expectedPattern: "/api/*", dictKey: "api_generic" }, - { url: "/api/generic", expectedPattern: "/api/*", dictKey: "api_generic" }, - { url: "/scripts/app.js", expectedPattern: "*.js", dictKey: "js_files" }, - { url: "/web/page", expectedPattern: "*", dictKey: "wildcard" }, // Only wildcard should match - { url: "/styles/main.css", expectedPattern: "*", dictKey: "wildcard" }, - ]; - - requestLog = []; - await sync_to_server(); - - for (let test of patternMatchTests) { - let url = `https://localhost:${server.port()}${test.url}`; - let expectedDict = RETRIEVAL_TEST_DICTIONARIES[test.dictKey]; - let expectedHash = await calculateDictionaryHash(expectedDict.content); - - let chan = makeChan(url); - let [, data] = await channelOpenPromise(chan); - - Assert.greater(data.length, 0, `Resource ${test.url} should have content`); - - // Check request log - await sync_from_server(); - let logEntry = requestLog.find(entry => entry.path === test.url); - Assert.ok( - logEntry && logEntry.hasAvailableDict, - `Available-Dictionary header should be present for ${test.url}` - ); - if (logEntry && logEntry.hasAvailableDict) { - Assert.ok( - logEntry.availableDict.includes(expectedHash), - `Available-Dictionary header should contain expected hash for ${test.url}` - ); - } - } -}); - -// Test dictionary precedence when multiple patterns match -add_task(async function test_dictionary_precedence() { - // Test URL that matches multiple patterns: /api/v1/users - // Should match: "/api/v1/*" (most specific), "/api/*", "*" (wildcard) - // Most specific pattern should take precedence - - let url = `https://localhost:${server.port()}/api/v1/users`; - requestLog = []; - await sync_to_server(); - - let mostSpecificHash = await calculateDictionaryHash( - RETRIEVAL_TEST_DICTIONARIES.api_v1.content - ); - - let chan = makeChan(url); - let [, data] = await channelOpenPromise(chan); - - Assert.equal(data, "API V1 USERS DATA", "Content should match"); - - // Check request log for precedence - await sync_from_server(); - let logEntry = requestLog.find(entry => entry.path === "/api/v1/users"); - Assert.ok( - logEntry && logEntry.hasAvailableDict, - "Available-Dictionary header should be present for precedence test" - ); - if (logEntry && logEntry.hasAvailableDict) { - // The most specific pattern (/api/v1/*) should be included - // Implementation may include multiple matching dictionaries - Assert.ok( - logEntry.availableDict.includes(mostSpecificHash), - "Available-Dictionary header should contain most specific pattern hash" - ); - } -}); - -// Test successful dictionary lookup and usage -add_task(async function test_dictionary_cache_hit() { - let url = `https://localhost:${server.port()}/api/generic`; - requestLog = []; - await sync_to_server(); - - let expectedHash = await calculateDictionaryHash( - RETRIEVAL_TEST_DICTIONARIES.api_generic.content - ); - - let chan = makeChan(url); - let [, data] = await channelOpenPromise(chan); - - Assert.equal(data, "GENERIC API DATA", "Content should match"); - - // Verify dictionary lookup succeeded - await sync_from_server(); - let logEntry = requestLog.find(entry => entry.path === "/api/generic"); - Assert.ok( - logEntry && logEntry.hasAvailableDict, - "Available-Dictionary header should be present for cache hit" - ); - if (logEntry && logEntry.hasAvailableDict) { - Assert.ok( - logEntry.availableDict.includes(expectedHash), - "Available-Dictionary header should contain expected hash for cache hit" - ); - } -}); - -// Test Available-Dictionary header hash format compliance -add_task(async function test_dictionary_hash_format() { - // Test that dictionary hashes follow IETF spec format: :base64hash: - - let testDict = RETRIEVAL_TEST_DICTIONARIES.api_v1; - let calculatedHash = await calculateDictionaryHash(testDict.content); - - // Verify hash is base64 format - Assert.greater(calculatedHash.length, 0, "Hash should not be empty"); - - // Verify base64 pattern (rough check) - let base64Pattern = /^[A-Za-z0-9+/]*={0,2}$/; - Assert.ok(base64Pattern.test(calculatedHash), "Hash should be valid base64"); - - // The hash format should be structured field byte sequence: :base64: - let structuredFieldFormat = `:${calculatedHash}:`; - Assert.ok( - structuredFieldFormat.includes(calculatedHash), - "Hash should follow structured field format" - ); -}); - -// Test retrieval with multiple dictionary matches -add_task(async function test_multiple_dictionary_matches() { - // Create a request that could match multiple dictionaries - let url = `https://localhost:${server.port()}/api/test`; - requestLog = []; - await sync_to_server(); - - await registerDictionaryAwareEndpoint(server, "/api/test", "API TEST DATA"); - - let apiGenericHash = await calculateDictionaryHash( - RETRIEVAL_TEST_DICTIONARIES.api_generic.content - ); - let chan = makeChan(url); - let [, data] = await channelOpenPromise(chan); - - Assert.equal(data, "API TEST DATA", "Content should match"); - - // Check for multiple dictionary hashes in Available-Dictionary header - await sync_from_server(); - let logEntry = requestLog.find(entry => entry.path === "/api/test"); - Assert.ok( - logEntry && logEntry.hasAvailableDict, - "Available-Dictionary header should be present for multiple matches" - ); - if (logEntry && logEntry.hasAvailableDict) { - // Could match both /api/* and * patterns - verify the longest pattern's hash is present - // (IETF spec says the longest match should be used) - let hasApiGenericHash = logEntry.availableDict.includes(apiGenericHash); - Assert.ok( - hasApiGenericHash, - "Available-Dictionary header should contain at least one expected hash for multiple matches" - ); - } -}); - -// Cleanup -add_task(async function cleanup() { - // Clear cache - let lci = Services.loadContextInfo.custom(false, { - partitionKey: `(https,localhost)`, - }); - evict_cache_entries("all", lci); -}); diff --git a/netwerk/test/unit/test_dictionary_storage.js b/netwerk/test/unit/test_dictionary_storage.js @@ -1,612 +0,0 @@ -/** - * Tests for HTTP Compression Dictionary storage functionality - * - Use-As-Dictionary header parsing and validation - * - Dictionary storage in cache with proper metadata - * - Pattern matching and hash validation - * - Error handling and edge cases - */ - -"use strict"; - -// Load cache helpers -Services.scriptloader.loadSubScript("resource://test/head_cache.js", this); - -const { NodeHTTPSServer } = ChromeUtils.importESModule( - "resource://testing-common/NodeServer.sys.mjs" -); - -// Test data constants -const TEST_DICTIONARIES = { - small: { - id: "test-dict-small", - content: "COMMON_PREFIX_DATA_FOR_COMPRESSION", - pattern: "/api/v1/*", - type: "raw", - }, - large: { - id: "test-dict-large", - content: "A".repeat(1024 * 100), // 100KB dictionary - pattern: "*.html", - type: "raw", - }, - large_url: { - id: "test-dict-large-url", - content: "large URL content", - pattern: "large", - type: "raw", - }, - too_large_url: { - id: "test-dict-too-large-url", - content: "too large URL content", - pattern: "too_large", - type: "raw", - }, -}; - -let server = null; - -add_setup(async function () { - if (!server) { - server = await setupServer(); - } - // Setup baseline dictionaries for compression testing - - // Clear any existing cache - let lci = Services.loadContextInfo.custom(false, { - partitionKey: `(https,localhost)`, - }); - evict_cache_entries("all", lci); -}); - -// Utility function to calculate SHA-256 hash -async function calculateSHA256(data) { - let hasher = Cc["@mozilla.org/security/hash;1"].createInstance( - Ci.nsICryptoHash - ); - hasher.init(Ci.nsICryptoHash.SHA256); - - // Convert string to UTF-8 bytes - let bytes = new TextEncoder().encode(data); - hasher.update(bytes, bytes.length); - return hasher.finish(false); -} - -// Setup dictionary test server -async function setupServer() { - let httpServer = new NodeHTTPSServer(); - await httpServer.start(); - - // Basic dictionary endpoint - await httpServer.registerPathHandler( - "/dict/small", - function (request, response) { - // Test data constants - const TEST_DICTIONARIES = { - small: { - id: "test-dict-small", - content: "COMMON_PREFIX_DATA_FOR_COMPRESSION", - pattern: "/api/v1/*", - type: "raw", - }, - }; - - let dict = TEST_DICTIONARIES.small; - response.writeHead(200, { - "Content-Type": "application/octet-stream", - "Use-As-Dictionary": `match="${dict.pattern}", id="${dict.id}", type=${dict.type}`, - "Cache-Control": "max-age=3600", - }); - response.end(dict.content, "binary"); - } - ); - - // Dictionary with expiration - await httpServer.registerPathHandler( - "/dict/expires", - function (request, response) { - response.writeHead(200, { - "Content-Type": "application/octet-stream", - "Use-As-Dictionary": `match="expires/*", id="expires-dict", type=raw`, - "Cache-Control": "max-age=1", - }); - response.end("EXPIRING_DICTIONARY_DATA", "binary"); - } - ); - - // Dictionary with invalid header - await httpServer.registerPathHandler( - "/dict/invalid", - function (request, response) { - global.test = 1; - response.writeHead(200, { - "Content-Type": "application/octet-stream", - "Use-As-Dictionary": "invalid-header-format", - }); - response.end("INVALID_DICTIONARY_DATA", "binary"); - } - ); - - // Large dictionary - await httpServer.registerPathHandler( - "/dict/large", - function (request, response) { - // Test data constants - const TEST_DICTIONARIES = { - large: { - id: "test-dict-large", - content: "A".repeat(1024 * 100), // 100KB dictionary - pattern: "*.html", - type: "raw", - }, - }; - - let dict = TEST_DICTIONARIES.large; - response.writeHead(200, { - "Content-Type": "application/octet-stream", - "Use-As-Dictionary": `match="${dict.pattern}", id="${dict.id}", type=${dict.type}`, - "Cache-Control": "max-age=3600", - }); - response.end(dict.content, "binary"); - } - ); - - // Large dictionary URL - await httpServer.registerPathHandler( - "/dict/large/" + "A".repeat(1024 * 20), - function (request, response) { - // Test data constants - const TEST_DICTIONARIES = { - large_url: { - id: "test-dict-large-url", - content: "large URL content", - pattern: "large", - type: "raw", - }, - }; - - let dict = TEST_DICTIONARIES.large_url; - response.writeHead(200, { - "Content-Type": "application/octet-stream", - "Use-As-Dictionary": `match="${dict.pattern}", id="${dict.id}", type=${dict.type}`, - "Cache-Control": "max-age=3600", - }); - response.end(dict.content, "binary"); - } - ); - - // Too Large dictionary URL - await httpServer.registerPathHandler( - "/dict/large/" + "B".repeat(1024 * 100), - function (request, response) { - // Test data constants - const TEST_DICTIONARIES = { - too_large_url: { - id: "test-dict-too-large-url", - content: "too large URL content", - pattern: "too_large", - type: "raw", - }, - }; - - let dict = TEST_DICTIONARIES.too_large_url; - response.writeHead(200, { - "Content-Type": "application/octet-stream", - "Use-As-Dictionary": `match="${dict.pattern}", id="${dict.id}", type=${dict.type}`, - "Cache-Control": "max-age=3600", - }); - response.end(dict.content, "binary"); - } - ); - - registerCleanupFunction(async () => { - try { - await httpServer.stop(); - } catch (e) { - // Ignore server stop errors during cleanup - } - }); - - return httpServer; -} - -// Verify dictionary is stored in cache -function verifyDictionaryStored(url, shouldExist, callback) { - let lci = Services.loadContextInfo.custom(false, { - partitionKey: `(https,localhost)`, - }); - asyncCheckCacheEntryPresence(url, "disk", shouldExist, lci, callback); -} - -// Create channel for dictionary requests -function makeChan(url) { - let chan = NetUtil.newChannel({ - uri: url, - loadUsingSystemPrincipal: true, - contentPolicyType: Ci.nsIContentPolicy.TYPE_DOCUMENT, - }).QueryInterface(Ci.nsIHttpChannel); - return chan; -} - -function channelOpenPromise(chan) { - return new Promise(resolve => { - function finish(req, buffer) { - resolve([req, buffer]); - } - chan.asyncOpen(new ChannelListener(finish, null, CL_ALLOW_UNKNOWN_CL)); - }); -} - -// Test basic dictionary storage with Use-As-Dictionary header -add_task(async function test_basic_dictionary_storage() { - // Clear any existing cache - evict_cache_entries("all"); - - let url = `https://localhost:${server.port()}/dict/small`; - let dict = TEST_DICTIONARIES.small; - - let chan = makeChan(url); - let [req, data] = await channelOpenPromise(chan); - - Assert.equal(data, dict.content, "Dictionary content matches"); - - // Verify Use-As-Dictionary header was processed - try { - let headerValue = req.getResponseHeader("Use-As-Dictionary"); - Assert.ok( - headerValue.includes(`id="${dict.id}"`), - "Header contains correct ID" - ); - Assert.ok( - headerValue.includes(`match="${dict.pattern}"`), - "Header contains correct pattern" - ); - } catch (e) { - Assert.ok(false, "Use-As-Dictionary header should be present"); - } - - // Check that dictionary is stored in cache - await new Promise(resolve => { - verifyDictionaryStored(url, true, resolve); - }); -}); - -// Test Use-As-Dictionary header parsing with various formats -add_task(async function test_dictionary_header_parsing() { - const headerTests = [ - { - header: 'match="*", id="dict1", type=raw', - valid: true, - description: "Basic valid header", - }, - { - header: 'match="/api/*", id="api-dict", type=raw', - valid: true, - description: "Path pattern header", - }, - { - header: 'match="*.js", id="js-dict"', - valid: true, - description: "Header without type (should default to raw)", - }, - { - header: 'id="dict1", type=raw', - valid: false, - description: "Missing match parameter", - }, - { - header: 'match="*"', - valid: false, - description: "Missing id parameter", - }, - { - header: 'match="*", id="", type=raw', - valid: false, - description: "Empty id parameter", - }, - ]; - - let testIndex = 0; - for (let test of headerTests) { - let testPath = `/dict/header-test-${testIndex++}`; - let func = ` - global.testIndex = 0; - let test = ${JSON.stringify(test)}; - response.writeHead(200, { - "Content-Type": "application/octet-stream", - "Use-As-Dictionary": test.header, - }); - // We won't be using this, so it doesn't really matter - response.end("HEADER_TEST_DICT_" + global.testIndex++, "binary"); - `; - let handler = new Function("request", "response", func); - await server.registerPathHandler(testPath, handler); - - let url = `https://localhost:${server.port()}${testPath}`; - let chan = makeChan(url); - await channelOpenPromise(chan); - // XXX test if we have a dictionary entry. Need new APIs to let me test it, - // or we can read dict:<origin> and look for this entry - - // Note: Invalid dictionary headers still create regular cache entries, - // they just aren't processed as dictionaries. So all should exist in cache. - await new Promise(resolve => { - verifyDictionaryStored(url, true, resolve); - }); - } -}); - -// Test dictionary hash calculation and validation -add_task(async function test_dictionary_hash_calculation() { - dump("**** testing hashes\n"); - let url = `https://localhost:${server.port()}/dict/small`; - let dict = TEST_DICTIONARIES.small; - - // Calculate expected hash - let expectedHash = await calculateSHA256(dict.content); - Assert.greater(expectedHash.length, 0, "Hash should be calculated"); - - let chan = makeChan(url); - await channelOpenPromise(chan); - - // Calculate expected hash - let hashCalculatedHash = await calculateSHA256(dict.content); - Assert.greater(hashCalculatedHash.length, 0, "Hash should be calculated"); - - // Check cache entry exists - await new Promise(resolve => { - let lci = Services.loadContextInfo.custom(false, { - partitionKey: `(https,localhost)`, - }); - asyncOpenCacheEntry( - url, - "disk", - Ci.nsICacheStorage.OPEN_READONLY, - lci, - function (status, entry) { - Assert.equal(status, Cr.NS_OK, "Cache entry should exist"); - Assert.ok(entry, "Entry should not be null"); - - // Check if entry has dictionary metadata - try { - let metaData = entry.getMetaDataElement("use-as-dictionary"); - Assert.ok(metaData, "Dictionary metadata should exist"); - - // Verify metadata contains hash information - // Note: The exact format may vary based on implementation - Assert.ok( - metaData.includes(dict.id), - "Metadata should contain dictionary ID" - ); - } catch (e) { - // Dictionary metadata might be stored differently - dump(`Dictionary metadata access failed: ${e}\n`); - } - - resolve(); - } - ); - }); -}); - -// Test dictionary expiration handling -add_task(async function test_dictionary_expiration() { - dump("**** testing expiration\n"); - let url = `https://localhost:${server.port()}/dict/expires`; - - // Fetch dictionary with 1-second expiration - let chan = makeChan(url); - let [, data] = await channelOpenPromise(chan); - - Assert.equal(data, "EXPIRING_DICTIONARY_DATA", "Dictionary content matches"); - - // Note: Testing actual expiration behavior requires waiting and is complex - // For now, just verify the dictionary was fetched - // XXX FIX! -}); - -// Test multiple dictionaries per origin with different patterns -add_task(async function test_multiple_dictionaries_per_origin() { - dump("**** test multiple dictionaries per origin\n"); - // Register multiple dictionary endpoints for same origin - await server.registerPathHandler("/dict/api", function (request, response) { - response.writeHead(200, { - "Content-Type": "application/octet-stream", - "Use-As-Dictionary": 'match="/api/*", id="api-dict", type=raw', - }); - response.end("API_DICTIONARY_DATA", "binary"); - }); - - await server.registerPathHandler("/dict/web", function (request, response) { - response.writeHead(200, { - "Content-Type": "application/octet-stream", - "Use-As-Dictionary": 'match="/web/*", id="web-dict", type=raw', - }); - response.end("WEB_DICTIONARY_DATA", "binary"); - }); - - let apiUrl = `https://localhost:${server.port()}/dict/api`; - let webUrl = `https://localhost:${server.port()}/dict/web`; - - // Fetch both dictionaries - let apiChan = makeChan(apiUrl); - let [, apiData] = await channelOpenPromise(apiChan); - Assert.equal( - apiData, - "API_DICTIONARY_DATA", - "API dictionary content matches" - ); - - let webChan = makeChan(webUrl); - let [, webData] = await channelOpenPromise(webChan); - Assert.equal( - webData, - "WEB_DICTIONARY_DATA", - "Web dictionary content matches" - ); - - // Verify both dictionaries are stored - await new Promise(resolve => { - verifyDictionaryStored(apiUrl, true, () => { - verifyDictionaryStored(webUrl, true, resolve); - }); - }); -}); - -// Test dictionary size limits and validation -add_task(async function test_dictionary_size_limits() { - dump("**** test size limits\n"); - let url = `https://localhost:${server.port()}/dict/large`; - let dict = TEST_DICTIONARIES.large; - - let chan = makeChan(url); - let [, data] = await channelOpenPromise(chan); - - Assert.equal(data, dict.content, "Large dictionary content matches"); - Assert.equal(data.length, dict.content.length, "Dictionary size correct"); - - // Verify large dictionary is stored - await new Promise(resolve => { - verifyDictionaryStored(url, true, resolve); - }); -}); - -// Test error handling with invalid dictionary headers -add_task(async function test_invalid_dictionary_headers() { - dump("**** test error handling\n"); - let url = `https://localhost:${server.port()}/dict/invalid`; - - let chan = makeChan(url); - let [, data] = await channelOpenPromise(chan); - - Assert.equal( - data, - "INVALID_DICTIONARY_DATA", - "Invalid dictionary content received" - ); - - // Invalid dictionary should not be stored as dictionary - // but the regular cache entry should exist - await new Promise(resolve => { - asyncOpenCacheEntry( - url, - "disk", - Ci.nsICacheStorage.OPEN_READONLY, - null, - function (status, entry) { - if (status === Cr.NS_OK && entry) { - // Regular cache entry should exist - // Note: Don't call entry.close() as it doesn't exist on this interface - } - // But it should not be processed as a dictionary - resolve(); - } - ); - }); -}); - -// Test cache integration and persistence -add_task(async function test_dictionary_cache_persistence() { - dump("**** test persistence\n"); - // Force cache sync to ensure everything is written - await new Promise(resolve => { - syncWithCacheIOThread(resolve, true); - }); - - // Get cache statistics before - await new Promise(resolve => { - get_device_entry_count("disk", null, entryCount => { - Assert.greater(entryCount, 0, "Cache should have entries"); - resolve(); - }); - }); - - // Verify our test dictionaries are still present - let smallUrl = `https://localhost:${server.port()}/dict/small`; - let chan = makeChan(smallUrl); - await channelOpenPromise(chan); - - await new Promise(resolve => { - verifyDictionaryStored(smallUrl, true, resolve); - }); -}); - -// Test very long url which should fit in metadata -add_task(async function test_long_dictionary_url() { - // Clear any existing cache - evict_cache_entries("all"); - - let url = - `https://localhost:${server.port()}/dict/large/` + "A".repeat(1024 * 20); - let dict = TEST_DICTIONARIES.large_url; - - let chan = makeChan(url); - let [req, data] = await channelOpenPromise(chan); - - Assert.equal(data, dict.content, "Dictionary content matches"); - - // Check that dictionary is stored in cache - await new Promise(resolve => { - verifyDictionaryStored(url, true, resolve); - }); - - // Verify Use-As-Dictionary header was processed and it's an active dictionary - url = `https://localhost:${server.port()}/large`; - chan = makeChan(url); - [req, data] = await channelOpenPromise(chan); - - try { - let headerValue = req.getRequestHeader("Available-Dictionary"); - Assert.ok(headerValue.includes(`:`), "Header contains a hash"); - } catch (e) { - Assert.ok( - false, - "Available-Dictionary header should be present with long URL for dictionary" - ); - } -}); - -// Test url too long to store in metadata -add_task(async function test_too_long_dictionary_url() { - // Clear any existing cache - evict_cache_entries("all"); - - let url = - `https://localhost:${server.port()}/dict/large/` + "B".repeat(1024 * 100); - let dict = TEST_DICTIONARIES.too_large_url; - - let chan = makeChan(url); - let [req, data] = await channelOpenPromise(chan); - - Assert.equal(data, dict.content, "Dictionary content matches"); - - // Check that dictionary is stored in cache (even if it's not a dictionary) - await new Promise(resolve => { - verifyDictionaryStored(url, true, resolve); - }); - - // Verify Use-As-Dictionary header was NOT processed and active due to 64K limit to metadata - // Since we can't store it on disk, we can't offer it as a dictionary. If we change the - // metadata limit, this will need to change - url = `https://localhost:${server.port()}/too_large`; - chan = makeChan(url); - [req, data] = await channelOpenPromise(chan); - - try { - // we're just looking to see if it throws - // eslint-disable-next-line no-unused-vars - let headerValue = req.getRequestHeader("Available-Dictionary"); - Assert.ok(false, "Too-long dictionary was offered in Available-Dictionary"); - } catch (e) { - Assert.ok( - true, - "Available-Dictionary header should not be present with a too-long URL for dictionary" - ); - } -}); - -// Cleanup -add_task(async function cleanup() { - // Clear cache - evict_cache_entries("all"); - dump("**** all done\n"); -}); diff --git a/netwerk/test/unit/xpcshell.toml b/netwerk/test/unit/xpcshell.toml @@ -469,8 +469,6 @@ run-sequentially = ["true"] # httpd server ["test_cache2_clear_with_usercontext_oa.js"] run-sequentially = ["true"] # httpd server -["test_cache2_compression_dictionary.js"] - ["test_cache2_nostore.js"] ["test_cache_204_response.js"] @@ -573,12 +571,6 @@ prefs = ["content.cors.use_triggering_principal=true"] # See bug 1982916. ["test_defaultURI.js"] -["test_dictionary_compression_dcb.js"] - -["test_dictionary_retrieval.js"] - -["test_dictionary_storage.js"] - ["test_dns_by_type_resolve.js"] ["test_dns_cancel.js"] diff --git a/testing/web-platform/meta/fetch/compression-dictionary/dictionary-clear-site-data-cache.tentative.https.html.ini b/testing/web-platform/meta/fetch/compression-dictionary/dictionary-clear-site-data-cache.tentative.https.html.ini @@ -0,0 +1,3 @@ +[dictionary-clear-site-data-cache.tentative.https.html] + [Clear-Site-Data with "cache" directive must unregister dictionary] + expected: FAIL diff --git a/testing/web-platform/meta/fetch/compression-dictionary/dictionary-clear-site-data-cookies.tentative.https.html.ini b/testing/web-platform/meta/fetch/compression-dictionary/dictionary-clear-site-data-cookies.tentative.https.html.ini @@ -0,0 +1,3 @@ +[dictionary-clear-site-data-cookies.tentative.https.html] + [Clear-Site-Data with "cookies" directive must unregister dictionary] + expected: FAIL diff --git a/testing/web-platform/meta/fetch/compression-dictionary/dictionary-clear-site-data-storage.tentative.https.html.ini b/testing/web-platform/meta/fetch/compression-dictionary/dictionary-clear-site-data-storage.tentative.https.html.ini @@ -0,0 +1,3 @@ +[dictionary-clear-site-data-storage.tentative.https.html] + [Clear-Site-Data with "storage" directive must not unregister dictionary] + expected: FAIL diff --git a/testing/web-platform/meta/fetch/compression-dictionary/dictionary-compressed.tentative.https.html.ini b/testing/web-platform/meta/fetch/compression-dictionary/dictionary-compressed.tentative.https.html.ini @@ -0,0 +1,15 @@ +[dictionary-compressed.tentative.https.html] + [Decompresion using gzip-encoded dictionary works as expected] + expected: FAIL + + [Decompresion using Brotli-encoded dictionary works as expected] + expected: FAIL + + [Decompresion using Zstandard-encoded dictionary works as expected] + expected: FAIL + + [A dcb dictionary-compressed dictionary can be used as a dictionary for future requests.] + expected: FAIL + + [A dcz dictionary-compressed dictionary can be used as a dictionary for future requests.] + expected: FAIL diff --git a/testing/web-platform/meta/fetch/compression-dictionary/dictionary-decompression.tentative.https.html.ini b/testing/web-platform/meta/fetch/compression-dictionary/dictionary-decompression.tentative.https.html.ini @@ -0,0 +1,9 @@ +[dictionary-decompression.tentative.https.html] + [Decompresion using Brotli with the dictionary works as expected] + expected: FAIL + + [Decompresion using Zstandard with the dictionary works as expected] + expected: FAIL + + [Decompresion of a cross origin resource works as expected] + expected: FAIL diff --git a/testing/web-platform/meta/fetch/compression-dictionary/dictionary-fetch-no-cors.tentative.https.html.ini b/testing/web-platform/meta/fetch/compression-dictionary/dictionary-fetch-no-cors.tentative.https.html.ini @@ -0,0 +1,3 @@ +[dictionary-fetch-no-cors.tentative.https.html] + [Fetch cross-origin no-cors request does not include Available-Dictionary header] + expected: FAIL diff --git a/testing/web-platform/meta/fetch/compression-dictionary/dictionary-fetch-with-link-element.tentative.https.html.ini b/testing/web-platform/meta/fetch/compression-dictionary/dictionary-fetch-with-link-element.tentative.https.html.ini @@ -0,0 +1,9 @@ +[dictionary-fetch-with-link-element.tentative.https.html] + [Fetch cross origin dictionary using link element] + expected: FAIL + + [Fetch same origin dictionary using link element] + expected: FAIL + + [Browser supports link element with compression-dictionary rel.] + expected: FAIL diff --git a/testing/web-platform/meta/fetch/compression-dictionary/dictionary-fetch-with-link-header.tentative.https.html.ini b/testing/web-platform/meta/fetch/compression-dictionary/dictionary-fetch-with-link-header.tentative.https.html.ini @@ -0,0 +1,3 @@ +[dictionary-fetch-with-link-header.tentative.https.html] + [Fetch same origin dictionary using link header] + expected: FAIL diff --git a/testing/web-platform/meta/fetch/compression-dictionary/dictionary-registration.tentative.https.html.ini b/testing/web-platform/meta/fetch/compression-dictionary/dictionary-registration.tentative.https.html.ini @@ -0,0 +1,15 @@ +[dictionary-registration.tentative.https.html] + [Simple dictionary registration and unregistration] + expected: FAIL + + [Dictionary registration with dictionary ID] + expected: FAIL + + [New dictionary registration overrides the existing one] + expected: FAIL + + [Dictionary registration does not invalidate cache entry] + expected: FAIL + + [Expired dictionary is not used] + expected: FAIL diff --git a/toolkit/components/cleardata/ClearDataService.sys.mjs b/toolkit/components/cleardata/ClearDataService.sys.mjs @@ -192,9 +192,6 @@ function hasSite( // Sanitizer.sanitizeOnShutdown() and // Sanitizer.onStartup() -// IETF spec for compression dictionaries requires clearing them when cookies -// are cleared for the site - Section 10 of -// https://datatracker.ietf.org/doc/draft-ietf-httpbis-compression-dictionary/ const CookieCleaner = { deleteByLocalFiles(aOriginAttributes) { return new Promise(aResolve => { @@ -212,16 +209,6 @@ const CookieCleaner = { aHost, JSON.stringify(aOriginAttributes) ); - // Compression dictionaries are https only - // Note that IPV6 urls require [...], but aPrincipal.host (passed - // to this) doesn't include the []. - if (aHost.includes(":") && aHost[0] != "[") { - aHost = "https://[" + aHost + "]"; - } else { - aHost = "https://" + aHost; - } - let httpsURI = Services.io.newURI(aHost); - Services.cache2.clearOriginDictionary(httpsURI); aResolve(); }); }, @@ -230,9 +217,6 @@ const CookieCleaner = { // Fall back to clearing by host and OA pattern. This will over-clear, since // any properties that are not explicitly set in aPrincipal.originAttributes // will be wildcard matched. - // Note that we clear cookies for all ports, because apparently - // cookies historically have ignored ports based on sameSite rules: - // https://html.spec.whatwg.org/#same-site return this.deleteByHost(aPrincipal.host, aPrincipal.originAttributes); }, @@ -251,9 +235,6 @@ const CookieCleaner = { JSON.stringify(cookie.originAttributes) ); }); - // Compression dictionaries are https only - let httpsURI = Services.io.newURI("https://" + aSchemelessSite); - Services.cache2.clearOriginDictionary(httpsURI); }, deleteByRange(aFrom) { @@ -267,8 +248,6 @@ const CookieCleaner = { aOriginAttributesString ); } catch (ex) {} - // XXX Bug 1984198 we need to clear dictionaries here (probably has - // to be in CookieService::RemoveCookiesWithOriginAttributes() aResolve(); }); }, @@ -276,7 +255,6 @@ const CookieCleaner = { deleteAll() { return new Promise(aResolve => { Services.cookies.removeAll(); - Services.cache2.clearAllOriginDictionaries(); aResolve(); }); }, diff --git a/toolkit/components/clearsitedata/ClearSiteData.cpp b/toolkit/components/clearsitedata/ClearSiteData.cpp @@ -23,14 +23,9 @@ #include "nsIScriptError.h" #include "nsIScriptSecurityManager.h" #include "nsNetUtil.h" -#include "mozilla/Logging.h" using namespace mozilla; -LazyLogModule gClearSiteDataLog("ClearSiteData"); - -#define LOG(args) MOZ_LOG(gClearSiteDataLog, mozilla::LogLevel::Debug, args) - namespace { StaticRefPtr<ClearSiteData> gClearSiteData; @@ -209,8 +204,6 @@ void ClearSiteData::ClearDataFromChannel(nsIHttpChannel* aChannel) { // in a different principal. int32_t cleanNetworkFlags = 0; - LOG(("ClearSiteData: %s, %x", uri->GetSpecOrDefault().get(), flags)); - if (StaticPrefs::privacy_clearSiteDataHeader_cache_enabled() && (flags & eCache)) { LogOpToConsole(aChannel, uri, eCache); @@ -231,8 +224,6 @@ void ClearSiteData::ClearDataFromChannel(nsIHttpChannel* aChannel) { nsIClearDataService::CLEAR_FINGERPRINTING_PROTECTION_STATE; } - LOG(("ClearSiteData: cleanFlags=%x, cleanNetworkFlags=%x", cleanFlags, - cleanNetworkFlags)); // for each `DeleteDataFromPrincipal` we need to wait for one callback. // cleanFlags elicits once callback. uint32_t numClearCalls = (cleanFlags != 0) + (cleanNetworkFlags != 0); diff --git a/toolkit/components/extensions/test/mochitest/head_webrequest.js b/toolkit/components/extensions/test/mochitest/head_webrequest.js @@ -139,16 +139,12 @@ function background(events) { ); let deletedAny = false; - dump(`*** headers = ${JSON.stringify(headers)}\n`); for (let j = headers.length; j-- > 0; ) { if (remove.includes(headers[j].name.toLowerCase())) { headers.splice(j, 1); deletedAny = true; } } - dump( - `*** headers(after) = ${JSON.stringify(headers)}, deletedAny=${deletedAny}\n` - ); browser.test.assertTrue( deletedAny, `at least one ${phase}Headers element to delete` diff --git a/uriloader/preload/PreloadService.cpp b/uriloader/preload/PreloadService.cpp @@ -93,13 +93,6 @@ already_AddRefed<PreloaderBase> PreloadService::PreloadLinkElement( integrity = aLinkElement->HasAttr(nsGkAtoms::integrity) ? integrity : VoidString(); - // rel=compression-dictionary fetches default to "anonymous" if no - // crossorigin=foo parameter is given - if (rel.LowerCaseEqualsASCII("compression-dictionary") && - crossOrigin.IsEmpty()) { - crossOrigin = u"anonymous"_ns; - } - nsAutoString nonce; if (nsString* cspNonce = static_cast<nsString*>(aLinkElement->GetProperty(nsGkAtoms::nonce))) { @@ -114,7 +107,7 @@ already_AddRefed<PreloaderBase> PreloadService::PreloadLinkElement( aLinkElement->GetType(type); } - auto result = PreloadOrCoalesce(uri, url, aPolicyType, as, rel, type, charset, + auto result = PreloadOrCoalesce(uri, url, aPolicyType, as, type, charset, srcset, sizes, nonce, integrity, crossOrigin, referrerPolicy, fetchPriority, /* aFromHeader = */ false, 0); @@ -130,9 +123,9 @@ already_AddRefed<PreloaderBase> PreloadService::PreloadLinkElement( void PreloadService::PreloadLinkHeader( nsIURI* aURI, const nsAString& aURL, nsContentPolicyType aPolicyType, - const nsAString& aAs, const nsAString& aRel, const nsAString& aType, - const nsAString& aNonce, const nsAString& aIntegrity, - const nsAString& aSrcset, const nsAString& aSizes, const nsAString& aCORS, + const nsAString& aAs, const nsAString& aType, const nsAString& aNonce, + const nsAString& aIntegrity, const nsAString& aSrcset, + const nsAString& aSizes, const nsAString& aCORS, const nsAString& aReferrerPolicy, uint64_t aEarlyHintPreloaderId, const nsAString& aFetchPriority) { if (aPolicyType == nsIContentPolicy::TYPE_INVALID) { @@ -140,17 +133,10 @@ void PreloadService::PreloadLinkHeader( return; } - // rel=compression-dictionary fetches default to "anonymous" if no - // crossorigin=foo parameter is given - - PreloadOrCoalesce( - aURI, aURL, aPolicyType, aAs, aRel, aType, u""_ns, aSrcset, aSizes, - aNonce, aIntegrity, - aRel.LowerCaseEqualsASCII("compression-dictionary") && aCORS.IsEmpty() - ? u"anonymous"_ns - : aCORS, - aReferrerPolicy, aFetchPriority, - /* aFromHeader = */ true, aEarlyHintPreloaderId); + PreloadOrCoalesce(aURI, aURL, aPolicyType, aAs, aType, u""_ns, aSrcset, + aSizes, aNonce, aIntegrity, aCORS, aReferrerPolicy, + aFetchPriority, + /* aFromHeader = */ true, aEarlyHintPreloaderId); } // The mapping is specified as implementation-defined, see step 15 of @@ -181,9 +167,8 @@ class SupportsPriorityValueFor { PreloadService::PreloadOrCoalesceResult PreloadService::PreloadOrCoalesce( nsIURI* aURI, const nsAString& aURL, nsContentPolicyType aPolicyType, - const nsAString& aAs, const nsAString& aRel, const nsAString& aType, - const nsAString& aCharset, const nsAString& aSrcset, - const nsAString& aSizes, const nsAString& aNonce, + const nsAString& aAs, const nsAString& aType, const nsAString& aCharset, + const nsAString& aSrcset, const nsAString& aSizes, const nsAString& aNonce, const nsAString& aIntegrity, const nsAString& aCORS, const nsAString& aReferrerPolicy, const nsAString& aFetchPriority, bool aFromHeader, uint64_t aEarlyHintPreloaderId) { @@ -217,10 +202,6 @@ PreloadService::PreloadOrCoalesceResult PreloadService::PreloadOrCoalesce( } else if (aAs.LowerCaseEqualsASCII("fetch")) { preloadKey = PreloadHashKey::CreateAsFetch( uri, dom::Element::StringToCORSMode(aCORS)); - } else if (aRel.LowerCaseEqualsASCII("compression-dictionary")) { - // compression-dictionary doesn't specify an 'as=' value - preloadKey = PreloadHashKey::CreateAsFetch( - uri, dom::Element::StringToCORSMode(aCORS)); } else { return {nullptr, false}; } @@ -259,8 +240,7 @@ PreloadService::PreloadOrCoalesceResult PreloadService::PreloadOrCoalesce( } else if (aAs.LowerCaseEqualsASCII("font")) { PreloadFont(uri, aCORS, aReferrerPolicy, aEarlyHintPreloaderId, aFetchPriority); - } else if (aAs.LowerCaseEqualsASCII("fetch") || - aRel.LowerCaseEqualsASCII("compression-dictionary")) { + } else if (aAs.LowerCaseEqualsASCII("fetch")) { PreloadFetch(uri, aCORS, aReferrerPolicy, aEarlyHintPreloaderId, aFetchPriority); } diff --git a/uriloader/preload/PreloadService.h b/uriloader/preload/PreloadService.h @@ -74,10 +74,9 @@ class PreloadService { // AsyncOpen. void PreloadLinkHeader(nsIURI* aURI, const nsAString& aURL, nsContentPolicyType aPolicyType, const nsAString& aAs, - const nsAString& aRel, const nsAString& aType, - const nsAString& aNonce, const nsAString& aIntegrity, - const nsAString& aSrcset, const nsAString& aSizes, - const nsAString& aCORS, + const nsAString& aType, const nsAString& aNonce, + const nsAString& aIntegrity, const nsAString& aSrcset, + const nsAString& aSizes, const nsAString& aCORS, const nsAString& aReferrerPolicy, uint64_t aEarlyHintPreloaderId, const nsAString& aFetchPriority); @@ -120,12 +119,12 @@ class PreloadService { PreloadOrCoalesceResult PreloadOrCoalesce( nsIURI* aURI, const nsAString& aURL, nsContentPolicyType aPolicyType, - const nsAString& aAs, const nsAString& aRel, const nsAString& aType, - const nsAString& aCharset, const nsAString& aSrcset, - const nsAString& aSizes, const nsAString& aNonce, - const nsAString& aIntegrity, const nsAString& aCORS, - const nsAString& aReferrerPolicy, const nsAString& aFetchPriority, - bool aFromHeader, uint64_t aEarlyHintPreloaderId); + const nsAString& aAs, const nsAString& aType, const nsAString& aCharset, + const nsAString& aSrcset, const nsAString& aSizes, + const nsAString& aNonce, const nsAString& aIntegrity, + const nsAString& aCORS, const nsAString& aReferrerPolicy, + const nsAString& aFetchPriority, bool aFromHeader, + uint64_t aEarlyHintPreloaderId); private: nsRefPtrHashtable<PreloadHashKey, PreloaderBase> mPreloads;