tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

commit 37c047a757209618976176c54ee5b2c9f0432cfb
parent 4b61512512f362895db9d670aa3d3712b214a69b
Author: Bastian Gruber <foreach@me.com>
Date:   Tue,  2 Dec 2025 11:20:48 +0000

Bug 1983135 - Implement a Necko backend for viaduct. r=bdk,valentin,supply-chain-reviewers

Differential Revision: https://phabricator.services.mozilla.com/D271450

Diffstat:
MCargo.lock | 18++++++++++++++++++
MCargo.toml | 1+
Aservices/application-services/components/viaduct-necko/Cargo.toml | 15+++++++++++++++
Aservices/application-services/components/viaduct-necko/backend.cpp | 670+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Aservices/application-services/components/viaduct-necko/backend.h | 72++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Aservices/application-services/components/viaduct-necko/moz.build | 16++++++++++++++++
Aservices/application-services/components/viaduct-necko/src/lib.rs | 350+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Aservices/application-services/components/viaduct-necko/tests/test_viaduct_necko_backend.js | 318+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Aservices/application-services/components/viaduct-necko/tests/xpcshell.toml | 5+++++
Mservices/moz.build | 1+
Msupply-chain/audits.toml | 5+++++
Msupply-chain/imports.lock | 1+
Athird_party/rust/oneshot/.cargo-checksum.json | 2++
Athird_party/rust/oneshot/CHANGELOG.md | 106+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Athird_party/rust/oneshot/Cargo.lock | 1499+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Athird_party/rust/oneshot/Cargo.toml | 159+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Athird_party/rust/oneshot/LICENSE-APACHE | 176+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Athird_party/rust/oneshot/LICENSE-MIT | 23+++++++++++++++++++++++
Athird_party/rust/oneshot/README.md | 94+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Athird_party/rust/oneshot/benches/benches.rs | 138+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Athird_party/rust/oneshot/check_mem_leaks.sh | 13+++++++++++++
Athird_party/rust/oneshot/examples/recv_before_send.rs | 18++++++++++++++++++
Athird_party/rust/oneshot/examples/recv_before_send_then_drop_sender.rs | 18++++++++++++++++++
Athird_party/rust/oneshot/examples/recv_ref_before_send.rs | 18++++++++++++++++++
Athird_party/rust/oneshot/examples/recv_ref_before_send_then_drop_sender.rs | 18++++++++++++++++++
Athird_party/rust/oneshot/examples/recv_timeout_before_send.rs | 18++++++++++++++++++
Athird_party/rust/oneshot/examples/recv_timeout_before_send_then_drop_sender.rs | 18++++++++++++++++++
Athird_party/rust/oneshot/examples/recv_with_dropped_sender.rs | 11+++++++++++
Athird_party/rust/oneshot/examples/send_before_recv.rs | 11+++++++++++
Athird_party/rust/oneshot/examples/send_then_drop_receiver.rs | 7+++++++
Athird_party/rust/oneshot/examples/send_with_dropped_receiver.rs | 8++++++++
Athird_party/rust/oneshot/src/errors.rs | 151++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Athird_party/rust/oneshot/src/lib.rs | 1343+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Athird_party/rust/oneshot/src/loombox.rs | 158+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Athird_party/rust/oneshot/tests/assert_mem.rs | 37+++++++++++++++++++++++++++++++++++++
Athird_party/rust/oneshot/tests/async.rs | 128+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Athird_party/rust/oneshot/tests/future.rs | 65+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Athird_party/rust/oneshot/tests/helpers/mod.rs | 63+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Athird_party/rust/oneshot/tests/helpers/waker.rs | 64++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Athird_party/rust/oneshot/tests/loom.rs | 249+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Athird_party/rust/oneshot/tests/raw.rs | 46++++++++++++++++++++++++++++++++++++++++++++++
Athird_party/rust/oneshot/tests/sync.rs | 369+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Mtoolkit/library/rust/shared/Cargo.toml | 1+
Mtoolkit/library/rust/shared/lib.rs | 6++++++
44 files changed, 6507 insertions(+), 0 deletions(-)

diff --git a/Cargo.lock b/Cargo.lock @@ -2659,6 +2659,7 @@ dependencies = [ "urlpattern", "urlpattern_glue", "viaduct", + "viaduct-necko", "webext-storage", "webrender_bindings", "wgpu_bindings", @@ -5232,6 +5233,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] +name = "oneshot" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4ce411919553d3f9fa53a0880544cda985a112117a0444d5ff1e870a893d6ea" + +[[package]] name = "ordered-float" version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -7675,6 +7682,17 @@ dependencies = [ ] [[package]] +name = "viaduct-necko" +version = "0.1.0" +dependencies = [ + "async-trait", + "error-support", + "oneshot", + "url", + "viaduct", +] + +[[package]] name = "void" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" diff --git a/Cargo.toml b/Cargo.toml @@ -26,6 +26,7 @@ members = [ "toolkit/crashreporter/mozwer-rust", "toolkit/library/gtest/rust", "toolkit/library/rust/", + "services/application-services/components/viaduct-necko", ] # Excluded crates may be built as dependencies, but won't be considered members diff --git a/services/application-services/components/viaduct-necko/Cargo.toml b/services/application-services/components/viaduct-necko/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "viaduct-necko" +version = "0.1.0" +edition = "2021" +rust-version.workspace = true + +[dependencies] +async-trait = "0.1" +error-support = "0.1" +oneshot = "0.1.11" +url = "2" +viaduct = "0.1" + +[lib] +path = "src/lib.rs" diff --git a/services/application-services/components/viaduct-necko/backend.cpp b/services/application-services/components/viaduct-necko/backend.cpp @@ -0,0 +1,670 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "backend.h" + +#include "mozilla/Logging.h" +#include "mozilla/Span.h" +#include "nsCOMPtr.h" +#include "nsComponentManagerUtils.h" +#include "nsContentUtils.h" +#include "nsIChannel.h" +#include "nsIHttpChannel.h" +#include "nsIHttpChannelInternal.h" +#include "nsIHttpHeaderVisitor.h" +#include "nsIInputStream.h" +#include "nsIStreamListener.h" +#include "nsITimer.h" +#include "nsIUploadChannel2.h" +#include "nsIURI.h" +#include "nsNetUtil.h" +#include "nsPrintfCString.h" +#include "nsStringStream.h" +#include "nsThreadUtils.h" +#include "nsTArray.h" + +#include <cinttypes> +#include <utility> + +using namespace mozilla; + +// Logger for viaduct-necko backend +static LazyLogModule gViaductLogger("viaduct"); + +/** + * Manages viaduct Request/Result pointers + * + * This class ensures that we properly manage the `ViaductRequest` and + * `ViaductResult` pointers, avoiding use-after-free bugs. It ensures that + * either `viaduct_necko_result_complete` or + * `viaduct_necko_result_complete_error` will be called exactly once and the + * pointers won't be used after that. + * + * This class is designed to be created outside of NS_DispatchToMainThread and + * moved into the closure. This way, even if the closure never runs, the + * destructor will still be called and we'll complete with an error. + */ +class ViaductRequestGuard { + private: + const ViaductRequest* mRequest; + ViaductResult* mResult; + + public: + // Constructor + ViaductRequestGuard(const ViaductRequest* aRequest, ViaductResult* aResult) + : mRequest(aRequest), mResult(aResult) { + MOZ_LOG(gViaductLogger, LogLevel::Debug, + ("ViaductRequestGuard: Created with request=%p, result=%p", + mRequest, mResult)); + } + + // Move Constructor + // Transfers ownership of the pointers from other to this. + ViaductRequestGuard(ViaductRequestGuard&& other) noexcept + : mRequest(std::exchange(other.mRequest, nullptr)), + mResult(std::exchange(other.mResult, nullptr)) { + MOZ_LOG(gViaductLogger, LogLevel::Debug, + ("ViaductRequestGuard: Move constructed, request=%p, result=%p", + mRequest, mResult)); + } + + // Move assignment operator + ViaductRequestGuard& operator=(ViaductRequestGuard&& other) noexcept { + if (this != &other) { + // If we already own pointers, complete with error before replacing + if (mResult) { + MOZ_LOG(gViaductLogger, LogLevel::Warning, + ("ViaductRequestGuard: Move assignment replacing existing " + "pointers, completing with error")); + viaduct_necko_result_complete_error( + mResult, static_cast<uint32_t>(NS_ERROR_ABORT), + "Request replaced by move assignment"); + } + mRequest = std::exchange(other.mRequest, nullptr); + mResult = std::exchange(other.mResult, nullptr); + } + return *this; + } + + // Disable copy constructor and assignment + // We prevent copying since we only want to complete the result once. + ViaductRequestGuard(const ViaductRequestGuard& other) = delete; + ViaductRequestGuard& operator=(const ViaductRequestGuard& other) = delete; + + ~ViaductRequestGuard() { + // If mResult is non-null, the request was destroyed before completing. + // This can happen if the closure never runs (e.g., shutdown). + if (mResult) { + MOZ_LOG(gViaductLogger, LogLevel::Warning, + ("ViaductRequestGuard: Destructor called with non-null result, " + "completing with error")); + viaduct_necko_result_complete_error( + mResult, static_cast<uint32_t>(NS_ERROR_ABORT), + "Request destroyed without completion"); + } + } + + // Get the request pointer (for reading request data) + // Returns nullptr if already consumed. + const ViaductRequest* Request() const { + MOZ_ASSERT(mRequest, + "ViaductRequestGuard::Request called after completion"); + return mRequest; + } + + // Get the result pointer (for building up the response) + // Returns nullptr if already consumed. + ViaductResult* Result() const { + MOZ_ASSERT(mResult, "ViaductRequestGuard::Result called after completion"); + return mResult; + } + + // Check if the guard still owns valid pointers + bool IsValid() const { return mResult != nullptr; } + + // Complete the result successfully and release ownership. + // After this call, the guard no longer owns the pointers. + void Complete() { + MOZ_ASSERT(mResult, "ViaductRequestGuard::Complete called twice"); + MOZ_LOG(gViaductLogger, LogLevel::Debug, + ("ViaductRequestGuard: Completing successfully")); + viaduct_necko_result_complete(mResult); + mResult = nullptr; + mRequest = nullptr; + } + + // Complete the result with an error and release ownership. + // After this call, the guard no longer owns the pointers. + void CompleteWithError(nsresult aError, const char* aMessage) { + MOZ_ASSERT(mResult, "ViaductRequestGuard::CompleteWithError called twice"); + MOZ_LOG(gViaductLogger, LogLevel::Error, + ("ViaductRequestGuard: Completing with error: %s (0x%08x)", + aMessage, static_cast<uint32_t>(aError))); + viaduct_necko_result_complete_error(mResult, static_cast<uint32_t>(aError), + aMessage); + mResult = nullptr; + mRequest = nullptr; + } +}; + +// Listener that collects the complete HTTP response (headers and body) +class ViaductResponseListener final : public nsIHttpHeaderVisitor, + public nsIStreamListener, + public nsITimerCallback, + public nsINamed { + public: + NS_DECL_THREADSAFE_ISUPPORTS + NS_DECL_NSIHTTPHEADERVISITOR + NS_DECL_NSIREQUESTOBSERVER + NS_DECL_NSISTREAMLISTENER + NS_DECL_NSITIMERCALLBACK + NS_DECL_NSINAMED + + explicit ViaductResponseListener(ViaductRequestGuard&& aGuard, + uint32_t aTimeoutSecs) + : mGuard(std::move(aGuard)), mChannel(nullptr) { + MOZ_LOG(gViaductLogger, LogLevel::Info, + ("TRACE: ViaductResponseListener constructor called with timeout: " + "%u seconds, guard valid: %s", + aTimeoutSecs, mGuard.IsValid() ? "true" : "false")); + + // Create timeout timer if timeout > 0 + if (aTimeoutSecs > 0) { + MOZ_LOG(gViaductLogger, LogLevel::Debug, + ("Setting timeout timer for %u seconds", aTimeoutSecs)); + nsresult rv = + NS_NewTimerWithCallback(getter_AddRefs(mTimeoutTimer), this, + aTimeoutSecs * 1000, nsITimer::TYPE_ONE_SHOT); + if (NS_WARN_IF(NS_FAILED(rv))) { + MOZ_LOG(gViaductLogger, LogLevel::Error, + ("Failed to create timeout timer: 0x%08x", + static_cast<uint32_t>(rv))); + } + } + } + + void SetChannel(nsIChannel* aChannel) { mChannel = aChannel; } + + private: + ~ViaductResponseListener() { + MOZ_LOG(gViaductLogger, LogLevel::Info, + ("TRACE: ViaductResponseListener destructor called")); + + ClearTimer(); + + // The guard's destructor will handle completion if needed + } + + void ClearTimer() { + if (mTimeoutTimer) { + mTimeoutTimer->Cancel(); + mTimeoutTimer = nullptr; + } + } + + // Error handling: logs error and completes the result with error via the + // guard. + void HandleError(nsresult aError, const char* aMessage); + + // Wrapper methods that use the guard to safely access the result + void SetStatusCode(uint16_t aStatusCode); + void SetUrl(const char* aUrl, size_t aLength); + void AddHeader(const char* aKey, size_t aKeyLength, const char* aValue, + size_t aValueLength); + void ExtendBody(const uint8_t* aData, size_t aLength); + void Complete(); + + ViaductRequestGuard mGuard; + nsCOMPtr<nsITimer> mTimeoutTimer; + nsCOMPtr<nsIChannel> mChannel; +}; + +NS_IMPL_ISUPPORTS(ViaductResponseListener, nsIHttpHeaderVisitor, + nsIStreamListener, nsIRequestObserver, nsITimerCallback, + nsINamed) + +void ViaductResponseListener::HandleError(nsresult aError, + const char* aMessage) { + MOZ_LOG(gViaductLogger, LogLevel::Error, + ("TRACE: HandleError called with message: %s (0x%08x)", aMessage, + static_cast<uint32_t>(aError))); + + if (mGuard.IsValid()) { + MOZ_LOG(gViaductLogger, LogLevel::Info, + ("TRACE: Calling CompleteWithError via guard")); + mGuard.CompleteWithError(aError, aMessage); + } else { + MOZ_LOG(gViaductLogger, LogLevel::Error, + ("TRACE: HandleError called but guard is invalid")); + } +} + +void ViaductResponseListener::SetStatusCode(uint16_t aStatusCode) { + MOZ_LOG(gViaductLogger, LogLevel::Info, + ("TRACE: SetStatusCode called with code: %u", aStatusCode)); + if (!mGuard.IsValid()) { + MOZ_LOG(gViaductLogger, LogLevel::Error, + ("SetStatusCode called but guard is invalid")); + return; + } + viaduct_necko_result_set_status_code(mGuard.Result(), aStatusCode); + MOZ_LOG(gViaductLogger, LogLevel::Debug, + ("Set status code: %u", aStatusCode)); +} + +void ViaductResponseListener::SetUrl(const char* aUrl, size_t aLength) { + MOZ_LOG(gViaductLogger, LogLevel::Info, + ("TRACE: SetUrl called with URL (length %zu)", aLength)); + if (!mGuard.IsValid()) { + MOZ_LOG(gViaductLogger, LogLevel::Error, + ("SetUrl called but guard is invalid")); + return; + } + viaduct_necko_result_set_url(mGuard.Result(), aUrl, aLength); + MOZ_LOG(gViaductLogger, LogLevel::Debug, ("Set URL")); +} + +void ViaductResponseListener::AddHeader(const char* aKey, size_t aKeyLength, + const char* aValue, + size_t aValueLength) { + MOZ_LOG(gViaductLogger, LogLevel::Info, + ("TRACE: AddHeader called - key length: %zu, value length: %zu", + aKeyLength, aValueLength)); + if (!mGuard.IsValid()) { + MOZ_LOG(gViaductLogger, LogLevel::Error, + ("AddHeader called but guard is invalid")); + return; + } + viaduct_necko_result_add_header(mGuard.Result(), aKey, aKeyLength, aValue, + aValueLength); + MOZ_LOG(gViaductLogger, LogLevel::Debug, ("Added header")); +} + +void ViaductResponseListener::ExtendBody(const uint8_t* aData, size_t aLength) { + MOZ_LOG(gViaductLogger, LogLevel::Info, + ("TRACE: ExtendBody called with %zu bytes", aLength)); + if (!mGuard.IsValid()) { + MOZ_LOG(gViaductLogger, LogLevel::Error, + ("ExtendBody called but guard is invalid")); + return; + } + viaduct_necko_result_extend_body(mGuard.Result(), aData, aLength); + MOZ_LOG(gViaductLogger, LogLevel::Debug, + ("Extended body with %zu bytes", aLength)); +} + +void ViaductResponseListener::Complete() { + MOZ_LOG(gViaductLogger, LogLevel::Info, + ("TRACE: Complete called - marking request as successful")); + if (!mGuard.IsValid()) { + MOZ_LOG(gViaductLogger, LogLevel::Error, + ("Complete called but guard is invalid")); + return; + } + MOZ_LOG(gViaductLogger, LogLevel::Info, + ("TRACE: Calling Complete via guard")); + mGuard.Complete(); +} + +NS_IMETHODIMP +ViaductResponseListener::VisitHeader(const nsACString& aHeader, + const nsACString& aValue) { + MOZ_LOG(gViaductLogger, LogLevel::Info, + ("TRACE: VisitHeader called for header: %s", + PromiseFlatCString(aHeader).get())); + AddHeader(aHeader.BeginReading(), aHeader.Length(), aValue.BeginReading(), + aValue.Length()); + return NS_OK; +} + +NS_IMETHODIMP +ViaductResponseListener::OnStartRequest(nsIRequest* aRequest) { + MOZ_LOG(gViaductLogger, LogLevel::Info, + ("TRACE: ========== OnStartRequest called ==========")); + + nsCOMPtr<nsIHttpChannel> httpChannel = do_QueryInterface(aRequest); + if (!httpChannel) { + HandleError(NS_ERROR_FAILURE, "Request is not an HTTP channel"); + return NS_ERROR_FAILURE; + } + + // Get status code from HTTP channel + uint32_t responseStatus; + nsresult rv = httpChannel->GetResponseStatus(&responseStatus); + if (NS_FAILED(rv)) { + HandleError(rv, "Failed to get response status"); + return rv; + } + SetStatusCode(static_cast<uint16_t>(responseStatus)); + + // Get final URL + nsCOMPtr<nsIURI> uri; + rv = httpChannel->GetURI(getter_AddRefs(uri)); + if (NS_FAILED(rv)) { + HandleError(rv, "Failed to get URI"); + return rv; + } + + if (!uri) { + HandleError(NS_ERROR_FAILURE, "HTTP channel has null URI"); + return NS_ERROR_FAILURE; + } + + nsAutoCString spec; + rv = uri->GetSpec(spec); + if (NS_FAILED(rv)) { + HandleError(rv, "Failed to get URI spec"); + return rv; + } + SetUrl(spec.get(), spec.Length()); + + // Collect response headers - using 'this' since we implement + // nsIHttpHeaderVisitor + MOZ_LOG(gViaductLogger, LogLevel::Info, + ("TRACE: About to visit response headers")); + rv = httpChannel->VisitResponseHeaders(this); + if (NS_FAILED(rv)) { + HandleError(rv, "Failed to visit response headers"); + return rv; + } + + return NS_OK; +} + +NS_IMETHODIMP +ViaductResponseListener::OnDataAvailable(nsIRequest* aRequest, + nsIInputStream* aInputStream, + uint64_t aOffset, uint32_t aCount) { + MOZ_LOG(gViaductLogger, LogLevel::Debug, + ("OnDataAvailable called with %u bytes at offset %" PRIu64, aCount, + aOffset)); + + // Read the data from the input stream + nsTArray<uint8_t> buffer; + buffer.SetLength(aCount); + + uint32_t bytesRead; + nsresult rv = aInputStream->Read(reinterpret_cast<char*>(buffer.Elements()), + aCount, &bytesRead); + if (NS_FAILED(rv)) { + HandleError(rv, "Failed to read from input stream"); + return rv; + } + + if (bytesRead > 0) { + ExtendBody(buffer.Elements(), bytesRead); + } else { + MOZ_LOG(gViaductLogger, LogLevel::Warning, + ("Read 0 bytes from input stream")); + } + + return NS_OK; +} + +NS_IMETHODIMP +ViaductResponseListener::OnStopRequest(nsIRequest* aRequest, nsresult aStatus) { + MOZ_LOG(gViaductLogger, LogLevel::Debug, + ("OnStopRequest called with status: 0x%08x", + static_cast<uint32_t>(aStatus))); + + // Cancel timer since request is complete + ClearTimer(); + + if (NS_SUCCEEDED(aStatus)) { + Complete(); + } else { + HandleError(aStatus, "Request failed"); + } + + return NS_OK; +} + +/////////////////////////////////////////////////////////////////////////////// +// nsITimerCallback implementation + +NS_IMETHODIMP +ViaductResponseListener::Notify(nsITimer* aTimer) { + MOZ_LOG(gViaductLogger, LogLevel::Warning, + ("TRACE: Request timeout fired - cancelling request")); + + ClearTimer(); + + // Cancel the channel, which will trigger OnStopRequest with an error + if (mChannel) { + mChannel->Cancel(NS_ERROR_NET_TIMEOUT_EXTERNAL); + mChannel = nullptr; + } + + return NS_OK; +} + +/////////////////////////////////////////////////////////////////////////////// +// nsINamed implementation + +NS_IMETHODIMP +ViaductResponseListener::GetName(nsACString& aName) { + aName.AssignLiteral("ViaductResponseListener"); + return NS_OK; +} + +// Convert ViaductMethod to HTTP method string +static const char* GetMethodString(ViaductMethod method) { + switch (method) { + case VIADUCT_METHOD_GET: + return "GET"; + case VIADUCT_METHOD_HEAD: + return "HEAD"; + case VIADUCT_METHOD_POST: + return "POST"; + case VIADUCT_METHOD_PUT: + return "PUT"; + case VIADUCT_METHOD_DELETE: + return "DELETE"; + case VIADUCT_METHOD_CONNECT: + return "CONNECT"; + case VIADUCT_METHOD_OPTIONS: + return "OPTIONS"; + case VIADUCT_METHOD_TRACE: + return "TRACE"; + case VIADUCT_METHOD_PATCH: + return "PATCH"; + default: + MOZ_LOG(gViaductLogger, LogLevel::Warning, + ("Unknown ViaductMethod: %d, defaulting to GET", method)); + return "GET"; + } +} + +extern "C" { + +void viaduct_necko_backend_init() { + MOZ_LOG(gViaductLogger, LogLevel::Info, + ("Viaduct Necko backend initialized")); +} + +void viaduct_necko_backend_send_request(const ViaductRequest* request, + ViaductResult* result) { + MOZ_LOG(gViaductLogger, LogLevel::Debug, ("send_request called")); + + MOZ_ASSERT(request, "Request pointer should not be null"); + MOZ_ASSERT(result, "Result pointer should not be null"); + + // Create a guard to manage the request/result pointer lifetime. + // This ensures that either viaduct_necko_result_complete or + // viaduct_necko_result_complete_error is called exactly once, + // even if the closure never runs (e.g., during shutdown). + ViaductRequestGuard guard(request, result); + + // This function is called from Rust on a background thread. + // We need to dispatch to the main thread to use Necko. + NS_DispatchToMainThread(NS_NewRunnableFunction( + "ViaductNeckoRequest", [guard = std::move(guard)]() mutable { + MOZ_LOG(gViaductLogger, LogLevel::Debug, + ("Executing request on main thread")); + + MOZ_ASSERT(guard.Request() && guard.Result(), + "Guard should have valid pointers"); + + nsresult rv; + + // Parse the URL + nsCOMPtr<nsIURI> uri; + nsAutoCString urlSpec(guard.Request()->url); + MOZ_LOG(gViaductLogger, LogLevel::Debug, + ("Parsing URL: %s", urlSpec.get())); + + rv = NS_NewURI(getter_AddRefs(uri), urlSpec); + if (NS_FAILED(rv)) { + guard.CompleteWithError(rv, "Failed to parse URL"); + return; + } + + // Create the channel + nsSecurityFlags secFlags = + nsILoadInfo::SEC_ALLOW_CROSS_ORIGIN_SEC_CONTEXT_IS_NULL | + nsILoadInfo::SEC_COOKIES_OMIT; + + nsCOMPtr<nsIChannel> channel; + rv = NS_NewChannel(getter_AddRefs(channel), uri, + nsContentUtils::GetSystemPrincipal(), secFlags, + nsIContentPolicy::TYPE_OTHER); + + if (NS_FAILED(rv)) { + guard.CompleteWithError(rv, "Failed to create channel"); + return; + } + + if (!channel) { + guard.CompleteWithError(NS_ERROR_FAILURE, + "NS_NewChannel returned null channel"); + return; + } + + // Get the HTTP channel interface + nsCOMPtr<nsIHttpChannel> httpChannel = do_QueryInterface(channel); + if (!httpChannel) { + guard.CompleteWithError(NS_ERROR_FAILURE, + "Channel is not an HTTP channel"); + return; + } + + // Set HTTP method + const char* methodStr = GetMethodString(guard.Request()->method); + MOZ_LOG(gViaductLogger, LogLevel::Debug, + ("Setting HTTP method: %s", methodStr)); + rv = httpChannel->SetRequestMethod(nsDependentCString(methodStr)); + if (NS_FAILED(rv)) { + guard.CompleteWithError(rv, "Failed to set request method"); + return; + } + + // Set request headers + MOZ_LOG(gViaductLogger, LogLevel::Debug, + ("Setting %zu request headers", guard.Request()->header_count)); + for (size_t i = 0; i < guard.Request()->header_count; i++) { + nsAutoCString key(guard.Request()->headers[i].key); + nsAutoCString value(guard.Request()->headers[i].value); + rv = httpChannel->SetRequestHeader(key, value, false); + if (NS_FAILED(rv)) { + guard.CompleteWithError(rv, "Failed to set request header"); + return; + } + } + + // Set redirect limit + if (guard.Request()->redirect_limit == 0) { + // Disable redirects entirely + MOZ_LOG(gViaductLogger, LogLevel::Debug, ("Disabling redirects")); + nsCOMPtr<nsIHttpChannelInternal> httpInternal = + do_QueryInterface(httpChannel); + if (!httpInternal) { + guard.CompleteWithError( + NS_ERROR_FAILURE, + "Failed to get nsIHttpChannelInternal interface"); + return; + } + rv = httpInternal->SetRedirectMode( + nsIHttpChannelInternal::REDIRECT_MODE_ERROR); + if (NS_FAILED(rv)) { + guard.CompleteWithError(rv, "Failed to set redirect mode"); + return; + } + } else { + // Set a specific redirect limit + MOZ_LOG( + gViaductLogger, LogLevel::Debug, + ("Setting redirect limit: %u", guard.Request()->redirect_limit)); + rv = + httpChannel->SetRedirectionLimit(guard.Request()->redirect_limit); + if (NS_FAILED(rv)) { + guard.CompleteWithError(rv, "Failed to set redirection limit"); + return; + } + } + + // Set request body if present + if (guard.Request()->body != nullptr && guard.Request()->body_len > 0) { + MOZ_LOG( + gViaductLogger, LogLevel::Debug, + ("Setting request body (%zu bytes)", guard.Request()->body_len)); + nsCOMPtr<nsIUploadChannel2> uploadChannel = + do_QueryInterface(httpChannel); + if (!uploadChannel) { + guard.CompleteWithError( + NS_ERROR_FAILURE, "Failed to get nsIUploadChannel2 interface"); + return; + } + + nsCOMPtr<nsIInputStream> bodyStream; + rv = NS_NewByteInputStream( + getter_AddRefs(bodyStream), + Span(reinterpret_cast<const char*>(guard.Request()->body), + guard.Request()->body_len), + NS_ASSIGNMENT_COPY); + if (NS_FAILED(rv)) { + guard.CompleteWithError(rv, "Failed to create body stream"); + return; + } + + rv = uploadChannel->ExplicitSetUploadStream( + bodyStream, VoidCString(), guard.Request()->body_len, + nsDependentCString(methodStr), false); + if (NS_FAILED(rv)) { + guard.CompleteWithError(rv, "Failed to set upload stream"); + return; + } + } + + // Get timeout before moving the guard + uint32_t timeout = guard.Request()->timeout; + + // Create listener with timeout support. + // Move the guard into the listener so it owns the request/result + // pointers. + RefPtr<ViaductResponseListener> listener = + new ViaductResponseListener(std::move(guard), timeout); + + // Store the channel in the listener so it can cancel it on + // timeout + listener->SetChannel(channel); + + MOZ_LOG(gViaductLogger, LogLevel::Debug, ("Opening HTTP channel")); + rv = httpChannel->AsyncOpen(listener); + + if (NS_FAILED(rv)) { + MOZ_LOG(gViaductLogger, LogLevel::Error, + ("AsyncOpen failed: 0x%08x. Guard was moved to listener, " + "destructor will handle cleanup and complete with error.", + static_cast<uint32_t>(rv))); + return; + } + + MOZ_LOG(gViaductLogger, LogLevel::Debug, + ("Request initiated successfully")); + // The request is now in progress. The listener will handle + // completion. + })); +} + +} // extern "C" diff --git a/services/application-services/components/viaduct-necko/backend.h b/services/application-services/components/viaduct-necko/backend.h @@ -0,0 +1,72 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef VIADUCT_NECKO_H +#define VIADUCT_NECKO_H + +#include <stdint.h> +#include <stddef.h> + +#ifdef __cplusplus +extern "C" { +#endif + +// HTTP Method enumeration (must match Rust side) +enum ViaductMethod : uint8_t { + VIADUCT_METHOD_GET = 0, + VIADUCT_METHOD_HEAD = 1, + VIADUCT_METHOD_POST = 2, + VIADUCT_METHOD_PUT = 3, + VIADUCT_METHOD_DELETE = 4, + VIADUCT_METHOD_CONNECT = 5, + VIADUCT_METHOD_OPTIONS = 6, + VIADUCT_METHOD_TRACE = 7, + VIADUCT_METHOD_PATCH = 8, +}; + +// Header structure +struct ViaductHeader { + const char* key; + const char* value; +}; + +// Request structure +struct ViaductRequest { + uint32_t timeout; + uint32_t redirect_limit; + ViaductMethod method; + const char* url; + const ViaductHeader* headers; + size_t header_count; + const uint8_t* body; // Body remains uint8_t* since it's binary data + size_t body_len; +}; + +// Opaque result pointer (points to Rust FfiResult) +struct ViaductResult; + +// Functions that C++ must implement +void viaduct_necko_backend_init(); +void viaduct_necko_backend_send_request(const ViaductRequest* request, + ViaductResult* result); + +// Functions that Rust provides for C++ to call +void viaduct_necko_result_set_url(ViaductResult* result, const char* url, + size_t length); +void viaduct_necko_result_set_status_code(ViaductResult* result, uint16_t code); +void viaduct_necko_result_add_header(ViaductResult* result, const char* key, + size_t key_length, const char* value, + size_t value_length); +void viaduct_necko_result_extend_body(ViaductResult* result, + const uint8_t* data, size_t length); +void viaduct_necko_result_complete(ViaductResult* result); +void viaduct_necko_result_complete_error(ViaductResult* result, + uint32_t error_code, + const char* message); + +#ifdef __cplusplus +} +#endif + +#endif // VIADUCT_NECKO_H diff --git a/services/application-services/components/viaduct-necko/moz.build b/services/application-services/components/viaduct-necko/moz.build @@ -0,0 +1,16 @@ +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- +# vim: set filetype=python: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +FINAL_LIBRARY = "xul" + +with Files("**"): + BUG_COMPONENT = ("Application Services", "General") + +UNIFIED_SOURCES += [ + "backend.cpp", +] + +XPCSHELL_TESTS_MANIFESTS += ["tests/xpcshell.toml"] diff --git a/services/application-services/components/viaduct-necko/src/lib.rs b/services/application-services/components/viaduct-necko/src/lib.rs @@ -0,0 +1,350 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +use error_support::{info, warn}; +use std::{ffi::{CStr, c_char}, ptr, slice, sync::Arc}; +use url::Url; +use viaduct::{ + init_backend, Backend, ClientSettings, Method, Request, Response, Result, ViaductError, +}; + +const NULL: char = '\0'; + +/// Request for the C++ backend +#[repr(C)] +pub struct FfiRequest { + pub timeout: u32, + pub redirect_limit: u32, + pub method: Method, + pub url: *mut u8, + pub headers: *mut FfiHeader, + pub header_count: usize, + pub body: *mut u8, + pub body_len: usize, +} + +#[repr(C)] +pub struct FfiHeader { + pub key: *mut u8, + pub value: *mut u8, +} + +/// Result from the backend +/// +/// This is built-up piece by piece using the extern "C" API. +pub struct FfiResult { + // oneshot sender that the Rust code is awaiting. If `Ok(())` is sent, then the Rust code + // should return the response. If an error is sent, then that should be returned instead. + sender: Option<oneshot::Sender<Result<Response>>>, + response: Response, + // Owned values stored in the [FfiRequest]. These are copied from the request. By storing + // them in the result, we ensure they stay alive while the C code may access them. + pub url: String, + pub headers: Vec<(String, String)>, + pub body: Option<Vec<u8>>, + // The request struct that we pass to C++. This must be kept alive as long as the C++ code is + // using it. + pub request: FfiRequest, + pub ffi_headers: Vec<FfiHeader>, +} + +// Functions that the C++ library exports for us +extern "C" { + fn viaduct_necko_backend_init(); + + #[allow(improper_ctypes)] + fn viaduct_necko_backend_send_request(request: *const FfiRequest, result: *mut FfiResult); +} + +// Functions that we provide to the C++ library + +/// Set the URL for a result +/// +/// # Safety +/// +/// - `result` must be valid. +/// - `url` and `length` must refer to a valid byte string. +/// +/// Note: URLs are expected to be ASCII. Non-ASCII URLs will be logged and skipped. +#[no_mangle] +pub unsafe extern "C" fn viaduct_necko_result_set_url( + result: *mut FfiResult, + url: *const u8, + length: usize, +) { + let result = unsafe { &mut *result }; + + // Safety: Creating a slice from raw parts is safe if the backend passes valid pointers and lengths + let url_bytes = unsafe { slice::from_raw_parts(url, length) }; + + // Validate that the URL is ASCII before converting to String + if !url_bytes.is_ascii() { + warn!( + "Non-ASCII URL received - length: {} - skipping URL update", + length + ); + return; + } + + // Safety: We just verified the bytes are ASCII, which is valid UTF-8 + let url_str = unsafe { std::str::from_utf8_unchecked(url_bytes) }; + + match Url::parse(url_str) { + Ok(url) => { + result.response.url = url; + } + Err(e) => { + warn!("Error parsing URL from C backend: {e}") + } + } +} + +/// Set the status code for a result +/// +/// # Safety +/// +/// `result` must be valid. +#[no_mangle] +pub unsafe extern "C" fn viaduct_necko_result_set_status_code(result: *mut FfiResult, code: u16) { + let result = unsafe { &mut *result }; + result.response.status = code; +} + +/// Set a header for a result +/// +/// # Safety +/// +/// - `result` must be valid. +/// - `key` and `key_length` must refer to a valid byte string. +/// - `value` and `value_length` must refer to a valid byte string. +/// +/// Note: HTTP headers are expected to be ASCII. Non-ASCII headers will be logged and skipped. +#[no_mangle] +pub unsafe extern "C" fn viaduct_necko_result_add_header( + result: *mut FfiResult, + key: *const u8, + key_length: usize, + value: *const u8, + value_length: usize, +) { + let result = unsafe { &mut *result }; + + // Safety: Creating slices from raw parts is safe if the backend passes valid pointers and lengths + let key_bytes = unsafe { slice::from_raw_parts(key, key_length) }; + let value_bytes = unsafe { slice::from_raw_parts(value, value_length) }; + + // Validate that headers are ASCII before converting to String + // HTTP headers should be ASCII per best practices, though the spec technically allows other encodings + if !key_bytes.is_ascii() || !value_bytes.is_ascii() { + warn!( + "Non-ASCII HTTP header received - key_len: {}, value_len: {} - skipping header", + key_length, value_length + ); + return; + } + + // Safety: We just verified the bytes are ASCII, which is valid UTF-8 + let (key, value) = unsafe { + ( + String::from_utf8_unchecked(key_bytes.to_vec()), + String::from_utf8_unchecked(value_bytes.to_vec()), + ) + }; + + let _ = result.response.headers.insert(key, value); +} + +/// Append data to a result body +/// +/// This method can be called multiple times to build up the body in chunks. +/// +/// # Safety +/// +/// - `result` must be valid. +/// - `data` and `length` must refer to a binary string. +#[no_mangle] +pub unsafe extern "C" fn viaduct_necko_result_extend_body( + result: *mut FfiResult, + data: *const u8, + length: usize, +) { + let result = unsafe { &mut *result }; + // Safety: this is safe as long as the backend passes us valid data + result + .response + .body + .extend_from_slice(unsafe { slice::from_raw_parts(data, length) }); +} + +/// Complete a result +/// +/// # Safety +/// +/// `result` must be valid. After calling this function it must not be used again. +#[no_mangle] +pub unsafe extern "C" fn viaduct_necko_result_complete(result: *mut FfiResult) { + let mut result = unsafe { Box::from_raw(result) }; + match result.sender.take() { + Some(sender) => { + // Ignore any errors when sending the result. This happens when the receiver is + // closed, which happens when a future is cancelled. + let _ = sender.send(Ok(result.response)); + } + None => warn!("viaduct-necko: result completed twice"), + } +} + +/// Complete a result with an error message +/// +/// # Safety +/// +/// - `result` must be valid. After calling this function it must not be used again. +/// - `message` and `length` must refer to a valid UTF-8 string. +#[no_mangle] +pub unsafe extern "C" fn viaduct_necko_result_complete_error( + result: *mut FfiResult, + error_code: u32, + message: *const u8, +) { + let mut result = unsafe { Box::from_raw(result) }; + // Safety: this is safe as long as the backend passes us valid data + let msg_str = unsafe { + CStr::from_ptr(message as *const c_char) + .to_string_lossy() + .into_owned() + }; + let msg = format!("{} (0x{:08x})", msg_str, error_code); + match result.sender.take() { + Some(sender) => { + // Ignore any errors when sending the result. This happens when the receiver is + // closed, which happens when a future is cancelled. + let _ = sender.send(Err(ViaductError::BackendError(msg))); + } + None => warn!("viaduct-necko: result completed twice"), + } +} + +// The Necko backend is a zero-sized type, since all the backend functionality is statically linked +struct NeckoBackend; + +/// Initialize the Necko backend +/// +/// This should be called once at startup before any HTTP requests are made. +pub fn init_necko_backend() -> Result<()> { + info!("Initializing viaduct Necko backend"); + // Safety: this is safe as long as the C++ code is correct. + unsafe { viaduct_necko_backend_init() }; + init_backend(Arc::new(NeckoBackend)) +} + +#[async_trait::async_trait] +impl Backend for NeckoBackend { + async fn send_request(&self, request: Request, settings: ClientSettings) -> Result<Response> { + // Convert the request for the backend + let mut url = request.url.to_string(); + url.push(NULL); + + // Convert headers to null-terminated strings for C++ + // Note: Headers iterates over Header objects, not tuples + let header_strings: Vec<(String, String)> = request + .headers + .iter() + .map(|h| { + let mut key_str = h.name().to_string(); + key_str.push(NULL); + let mut value_str = h.value().to_string(); + value_str.push(NULL); + (key_str, value_str) + }) + .collect(); + + // Prepare an FfiResult with an empty response + let (sender, receiver) = oneshot::channel(); + let mut result = Box::new(FfiResult { + sender: Some(sender), + response: Response { + request_method: request.method, + url: request.url.clone(), + status: 0, + headers: viaduct::Headers::new(), + body: Vec::default(), + }, + url, + headers: header_strings, + body: request.body, + request: FfiRequest { + timeout: settings.timeout, + redirect_limit: settings.redirect_limit, + method: request.method, + url: ptr::null_mut(), + headers: ptr::null_mut(), + header_count: 0, + body: ptr::null_mut(), + body_len: 0, + }, + ffi_headers: Vec::new(), + }); + + // Now that we have the result box, we can set up the pointers in the request. + // By doing this after creating the box, we minimize the chance that a value moves after a pointer is created. + result.ffi_headers = result + .headers + .iter_mut() + .map(|(key, value)| FfiHeader { + key: key.as_mut_ptr(), + value: value.as_mut_ptr(), + }) + .collect(); + + let (body_ptr, body_len) = match &result.body { + Some(body) => (body.as_ptr() as *mut u8, body.len()), + None => (ptr::null_mut(), 0), + }; + + result.request.url = result.url.as_mut_ptr(); + result.request.headers = result.ffi_headers.as_mut_ptr(); + result.request.header_count = result.ffi_headers.len(); + result.request.body = body_ptr; + result.request.body_len = body_len; + + let request_ptr = &result.request as *const FfiRequest; + + // Safety: this is safe if the C backend implements the API correctly. + unsafe { + viaduct_necko_backend_send_request(request_ptr, Box::into_raw(result)); + }; + + receiver.await.unwrap_or_else(|_| { + Err(ViaductError::BackendError( + "Error receiving result from C++ backend".to_string(), + )) + }) + } +} + +// Mark FFI types as Send to allow them to be used across an await point. This is safe as long as +// the backend code uses them correctly. +unsafe impl Send for FfiRequest {} +unsafe impl Send for FfiResult {} +unsafe impl Send for FfiHeader {} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_method_layout() { + // Assert that the viaduct::Method enum matches the layout expected by the C++ backend. + // See ViaductMethod in backend.h + assert_eq!(Method::Get as u8, 0); + assert_eq!(Method::Head as u8, 1); + assert_eq!(Method::Post as u8, 2); + assert_eq!(Method::Put as u8, 3); + assert_eq!(Method::Delete as u8, 4); + assert_eq!(Method::Connect as u8, 5); + assert_eq!(Method::Options as u8, 6); + assert_eq!(Method::Trace as u8, 7); + assert_eq!(Method::Patch as u8, 8); + } +} diff --git a/services/application-services/components/viaduct-necko/tests/test_viaduct_necko_backend.js b/services/application-services/components/viaduct-necko/tests/test_viaduct_necko_backend.js @@ -0,0 +1,318 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +"use strict"; + +/* global add_setup, add_task, Assert, info, do_get_profile, do_timeout, registerCleanupFunction, Services */ + +const { HttpServer } = ChromeUtils.importESModule( + "resource://testing-common/httpd.sys.mjs" +); + +const { NetUtil } = ChromeUtils.importESModule( + "resource://gre/modules/NetUtil.sys.mjs" +); + +// Create a test HTTP server +let gHttpServer = null; +let gServerPort = -1; +let gServerURL = ""; + +// Track requests received by the server +let gRequestsReceived = []; + +/** + * Helper to decode potentially gzipped request body + */ +function decodeRequestBody(request) { + let bodyStream = request.bodyInputStream; + let avail = bodyStream.available(); + if (avail === 0) { + return ""; + } + + if ( + request.hasHeader("content-encoding") && + request.getHeader("content-encoding") === "gzip" + ) { + // For gzipped content, we'd need to decompress + // For now, just note it was gzipped + return "[gzipped content]"; + } + + return NetUtil.readInputStreamToString(bodyStream, avail); +} + +/** + * Setup function to initialize the HTTP server + */ +add_setup(async function () { + info("Setting up viaduct-necko test environment"); + + // FOG needs a profile directory to store its data + do_get_profile(); + + // Create and start the HTTP server + gHttpServer = new HttpServer(); + + // Register various test endpoints + setupTestEndpoints(); + + gHttpServer.start(-1); + gServerPort = gHttpServer.identity.primaryPort; + gServerURL = `http://localhost:${gServerPort}`; + + info(`Test HTTP server started on port ${gServerPort}`); + + // Set the telemetry port preference to use our test server + Services.prefs.setIntPref("telemetry.fog.test.localhost_port", gServerPort); + + // Enable telemetry upload (needed for viaduct to be used) + Services.prefs.setBoolPref("datareporting.healthreport.uploadEnabled", true); + + // Initialize FOG/Glean which should trigger viaduct-necko initialization + // This internally calls init_necko_backend() through GkRust_Init + Services.fog.testResetFOG(); + + registerCleanupFunction(async () => { + Services.prefs.clearUserPref("telemetry.fog.test.localhost_port"); + Services.prefs.clearUserPref("datareporting.healthreport.uploadEnabled"); + await new Promise(resolve => gHttpServer.stop(resolve)); + }); +}); + +/** + * Setup test endpoints on the HTTP server + */ +function setupTestEndpoints() { + // Glean telemetry submission endpoints + gHttpServer.registerPrefixHandler("/submit/", (request, response) => { + const path = request.path; + info(`Viaduct request received: ${request.method} ${path}`); + + // Read the request body + const body = decodeRequestBody(request); + + // Extract ping type from path + // Path format: /submit/firefox-desktop/{ping-type}/1/{uuid} + const pathParts = path.split("/"); + const pingType = pathParts[3] || "unknown"; + + gRequestsReceived.push({ + path, + method: request.method, + pingType, + body, + bodySize: request.hasHeader("content-length") + ? parseInt(request.getHeader("content-length"), 10) + : body.length, + headers: { + "content-type": request.hasHeader("content-type") + ? request.getHeader("content-type") + : null, + "content-encoding": request.hasHeader("content-encoding") + ? request.getHeader("content-encoding") + : null, + "user-agent": request.hasHeader("user-agent") + ? request.getHeader("user-agent") + : null, + }, + }); + + // Return a response similar to what a telemetry server would return + // Using 501 to match your test scenario + response.setStatusLine(request.httpVersion, 501, "Not Implemented"); + response.setHeader("Server", "TestServer/1.0 (Viaduct Backend)"); + response.setHeader("Date", new Date().toUTCString()); + response.setHeader("Connection", "close"); + response.setHeader("Content-Type", "application/json"); + + const responseBody = JSON.stringify({ + status: "not_implemented", + message: "Test server response", + }); + response.setHeader("Content-Length", responseBody.length.toString()); + response.write(responseBody); + }); +} + +/** + * Test that the viaduct-necko backend was initialized and is processing requests + */ +add_task(async function test_viaduct_backend_working() { + info("Testing viaduct-necko backend initialization and request processing"); + + // Clear previous requests (though some health pings may have already been sent) + const initialRequestCount = gRequestsReceived.length; + info( + `Already received ${initialRequestCount} requests during initialization` + ); + + // Wait for any pending requests to complete using do_timeout + // This ensures we capture the health pings that are automatically sent + await new Promise(resolve => do_timeout(500, resolve)); + + // Check that we've received requests through viaduct + Assert.ok( + !!gRequestsReceived.length, + `Viaduct-necko backend is processing requests. Received ${gRequestsReceived.length} requests.` + ); + + // Verify the requests are health pings (as shown in your logs) + const healthPings = gRequestsReceived.filter(r => r.pingType === "health"); + info(`Received ${healthPings.length} health pings through viaduct-necko`); + + // All telemetry submissions should be POST requests + for (const request of gRequestsReceived) { + Assert.equal( + request.method, + "POST", + `Request to ${request.path} should be POST` + ); + } + + // Log summary of what was processed + const pingTypes = [...new Set(gRequestsReceived.map(r => r.pingType))]; + info( + `Test successful: Viaduct-necko backend processed ${gRequestsReceived.length} requests` + ); + info(`Ping types received: ${pingTypes.join(", ")}`); + info( + `The C++ backend successfully handled requests from Rust through the FFI layer` + ); +}); + +/** + * Test different HTTP parameters and methods + * We verify different body sizes and headers are handled correctly + */ +add_task(async function test_different_parameters() { + info("Testing different HTTP parameters through viaduct-necko"); + + // Clear request tracking + gRequestsReceived = []; + + // Submit different types of pings with varying sizes + // This will test different body sizes and headers + + // Reset FOG to trigger new pings + Services.fog.testResetFOG(); + + // Wait to collect the requests + await new Promise(resolve => do_timeout(1000, resolve)); + + const requestsAfterReset = gRequestsReceived.length; + info(`Received ${requestsAfterReset} requests after FOG reset`); + + // Verify different content types and encodings were handled + const contentTypes = new Set(); + const contentEncodings = new Set(); + const bodySizes = new Set(); + + for (const request of gRequestsReceived) { + if (request.headers["content-type"]) { + contentTypes.add(request.headers["content-type"]); + } + if (request.headers["content-encoding"]) { + contentEncodings.add(request.headers["content-encoding"]); + } + if (request.bodySize) { + bodySizes.add(request.bodySize); + } + } + + info(`Content types seen: ${Array.from(contentTypes).join(", ")}`); + info(`Content encodings seen: ${Array.from(contentEncodings).join(", ")}`); + info( + `Body sizes seen: ${Array.from(bodySizes) + .sort((a, b) => a - b) + .join(", ")}` + ); + + Assert.ok( + !!gRequestsReceived.length, + "Different parameters were processed successfully" + ); + + // Verify we're seeing variation in body sizes (different ping types have different sizes) + Assert.ok( + bodySizes.size > 1, + `Multiple body sizes handled: ${Array.from(bodySizes).join(", ")}` + ); +}); + +/** + * Test that headers are properly passed through the FFI layer + */ +add_task(async function test_header_handling() { + info("Testing header handling through viaduct-necko"); + + // Check the headers that were sent in previous requests + let hasHeaders = false; + let headerCount = 0; + + for (const request of gRequestsReceived) { + if (request.headers && Object.keys(request.headers).length) { + hasHeaders = true; + const nonNullHeaders = Object.entries(request.headers).filter( + ([_, value]) => value !== null + ); + + if (nonNullHeaders.length) { + headerCount++; + info( + `Request headers found: ${JSON.stringify(Object.fromEntries(nonNullHeaders))}` + ); + } + } + } + + Assert.ok( + hasHeaders, + "Headers are properly transmitted through viaduct-necko" + ); + + Assert.ok(headerCount > 0, `Found ${headerCount} requests with headers`); + + // Verify specific headers we expect to see + const hasContentType = gRequestsReceived.some( + r => r.headers && r.headers["content-type"] !== null + ); + const hasContentEncoding = gRequestsReceived.some( + r => r.headers && r.headers["content-encoding"] !== null + ); + const hasUserAgent = gRequestsReceived.some( + r => r.headers && r.headers["user-agent"] !== null + ); + + Assert.ok(hasContentType, "Content-Type header is present"); + Assert.ok(hasContentEncoding, "Content-Encoding header is present"); + Assert.ok(hasUserAgent, "User-Agent header is present"); + + info("Headers are properly handled through the Rust → C++ → Necko chain"); +}); + +/** + * Test configuration validation + * While we can't directly test redirects and timeouts, we can verify + * that the configuration is being passed correctly from logs + */ +add_task(async function test_configuration_validation() { + info("Validating viaduct-necko configuration"); + + // We can verify at least that requests are completing successfully + // which means the configuration isn't breaking anything + const successfulRequests = gRequestsReceived.filter( + r => r.method === "POST" && r.path.includes("/submit/") + ); + + Assert.ok( + !!successfulRequests.length, + `Configuration is valid: ${successfulRequests.length} successful requests processed` + ); + + info( + "Viaduct-necko backend configuration validated through successful request processing" + ); +}); diff --git a/services/application-services/components/viaduct-necko/tests/xpcshell.toml b/services/application-services/components/viaduct-necko/tests/xpcshell.toml @@ -0,0 +1,5 @@ +[DEFAULT] +head = "" +firefox-appdir = "browser" + +["test_viaduct_necko_backend.js"] diff --git a/services/moz.build b/services/moz.build @@ -32,6 +32,7 @@ if not CONFIG["RELEASE_OR_BETA"] or CONFIG["MOZ_DEBUG"]: if CONFIG["MOZ_WIDGET_TOOLKIT"] != "android": DIRS += [ "fxaccounts", + "application-services/components/viaduct-necko", ] if CONFIG["MOZ_SERVICES_SYNC"]: diff --git a/supply-chain/audits.toml b/supply-chain/audits.toml @@ -4623,6 +4623,11 @@ The git branch is my fork of the official code that removes the `loom` target to This doesn't change any of the functionality -- the `loom` target is only used for testing. """ +[[audits.oneshot]] +who = "Bastian Gruber <foreach@me.com>" +criteria = "safe-to-deploy" +version = "0.1.11" + [[audits.oneshot-uniffi]] who = "Ben Dean-Kawamura <bdk@mozilla.com>" criteria = "safe-to-deploy" diff --git a/supply-chain/imports.lock b/supply-chain/imports.lock @@ -2807,6 +2807,7 @@ criteria = "safe-to-deploy" user-id = 213776 # divviup-github-automation start = "2020-09-28" end = "2026-01-07" +renew = false [[audits.isrg.audits.base64]] who = "Tim Geoghegan <timg@letsencrypt.org>" diff --git a/third_party/rust/oneshot/.cargo-checksum.json b/third_party/rust/oneshot/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"1e0a210e4c51ac6c7ddd08d8653fd81154569c9198ef2db2c7e1396434484937","Cargo.lock":"f8bfc4b346998db9bd5ab75351e51b988990378f85dc3a3be04102d3f4b226a9","Cargo.toml":"edeea486c050740833cdcaf23b0b23acc900cef59d676cb5f3e5d5bfe18879d8","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"811ea1c958d5a65583d0223b7ab09bb282e7a51ed60f9a2cb90ef6d555325a68","benches/benches.rs":"f60be1b186d362e84fb6cbb52119733706824833c8c4433e071d97d57544f5f5","check_mem_leaks.sh":"c1ab6ef27997c7f971352ab1c86a184004843c499bc24925da953aefcf1c624c","examples/recv_before_send.rs":"9a3cabcc2878990b61787d0048061b382555a8cd1a08b1ddec63a6e8a4a31e56","examples/recv_before_send_then_drop_sender.rs":"14706c6b4308a690662ceaa47f1699588bd833b3ec020eb9f42f220f3ffc7ae7","examples/recv_ref_before_send.rs":"43699f4720c46b5f138c260b866eb708ddf616e2b442ffa74a97373f4f48d4d0","examples/recv_ref_before_send_then_drop_sender.rs":"a190ed220cb4288d4965485365c9afaed30535cbfad5f8cb7389071b82d67cac","examples/recv_timeout_before_send.rs":"2262aa6531afce7816d43182ad9cbec2c04f3dc129064e11e89452278ce8b163","examples/recv_timeout_before_send_then_drop_sender.rs":"4cc8eade4c211f52f5b9be0f72a5906689b894490f4cb5255525e44106e7a4a8","examples/recv_with_dropped_sender.rs":"7906685053ce1c53ff6c26ce11d3221d4bf5ca3429d1d4d2c28de9237cb151c6","examples/send_before_recv.rs":"5555bd61ad52273b663007794128d8f012fc54272bd3225259b5546221bcd591","examples/send_then_drop_receiver.rs":"c3612de207309098404b057468687a2d2311d07f354b7e046398e35e93c4cdcf","examples/send_with_dropped_receiver.rs":"f5a7762b231a24a0db4397c5139437cba155d09b9dbb59872d662c7923080706","src/errors.rs":"7c1a1e5c4c0215f01c273d294fa5db0dd4720456485bca0c021edecd0e221786","src/lib.rs":"1cc6b6f73321187f9e4a304eb4d86815586fd03a037265d78d24d62fe9f7fa7d","src/loombox.rs":"231b922504bd2be5e00dd21fe14392e7c34e66a4c2e23d598ffd9845dc0dd755","tests/assert_mem.rs":"b1e5190af01af22e55c7c1cd1ff2711807591f788e4eb8b6c6d89123e146105e","tests/async.rs":"843370475eefd741bd2d05231e054a1f6dbb2e26c03efb64cbcc18aca287d94f","tests/future.rs":"c42982570863ac13dfe97bfd0ba6f5266eac840a907b65877db27de8b8010eb9","tests/helpers/mod.rs":"1c597bcdd7cea357c91f36bf9f93acd83c7345233940916e6c6711e12e193ae4","tests/helpers/waker.rs":"77494d49f62d0d320df3830643c306e06e6e20751d210cf6fa58b238bd96c3f9","tests/loom.rs":"38f44782aa42106f58b04a1640cb4d14e4637bbad2cdcccbeafb80a4b6339f5b","tests/raw.rs":"04d8c0e5a0dba9018bc2ad6bfaf1c713b873a94dac79d84e57b86e05c3bf3312","tests/sync.rs":"2434bf7ec4206e528004fe4c02edc09db15c3cfb154bfb8836c9e9eda2f3b064"},"package":"b4ce411919553d3f9fa53a0880544cda985a112117a0444d5ff1e870a893d6ea"} +\ No newline at end of file diff --git a/third_party/rust/oneshot/CHANGELOG.md b/third_party/rust/oneshot/CHANGELOG.md @@ -0,0 +1,106 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +### Categories each change fall into + +* **Added**: for new features. +* **Changed**: for changes in existing functionality. +* **Deprecated**: for soon-to-be removed features. +* **Removed**: for now removed features. +* **Fixed**: for any bug fixes. +* **Security**: in case of vulnerabilities. + + +## [Unreleased] + + +## [0.1.11] - 2025-02-22 +### Fixed +- Handle the `UNPARKING` state correctly in `Receiver::drop()`. Fixes a panic that could + occur if a `Receiver` had been first polled as a future and then was being dropped + in parallel with the `Sender` sending a message. + + +## [0.1.10] - 2025-02-04 +### Added +- Add `is_closed` and `has_message` to the `Receiver`. Allows polling for the channel + state without modifying the channel or pulling the message from it. +- Make the cargo features show up on docs.rs for better discoverability. + + +## [0.1.9] - 2025-02-02 +### Added +- Implement `Sync` for `Sender`. There is not a whole lot someone can do with a `&Sender`, + but this allows storing the sender in places that are overly conservative and require + a `Sync` bound on the content. + + +## [0.1.8] - 2024-06-13 +### Changed +- Change how loom concurrency testing is triggered. To get rid of `loom` in the dependency tree + `oneshot` pulls in, it has in addition to being gated behind `cfg(oneshot_loom)` also been made + an optional dependency. This makes this library way smaller for downstream consumers. + This has the downside that the crate now exposes a `loom` feature. + DOWNSTREAM USERS ARE NOT SUPPOSED TO EVER ENABLE THIS. No stability or semver + guarantees exist around the `loom` feature. + This change ultimately makes no difference for any user of `oneshot` in regular usage. + + +## [0.1.7] - 2024-05-24 +### Added +* Add `is_closed` method to the `Sender`. + + +## [0.1.6] - 2023-09-14 +### Added +* Add `into_raw` and `from_raw` methods on both `Sender` and `Receiver`. Allows passing `oneshot` + channels over FFI without an extra layer of heap allocation. + + +## [0.1.5] - 2022-09-01 +### Fixed +- Handle the UNPARKING state correctly in all recv methods. `try_recv` will now not panic + if used on a `Receiver` that is being unparked from an async wait. The other `recv` methods + will still panic (as they should), but with a better error message. + + +## [0.1.4] - 2022-08-30 +### Changed +- Upgrade to Rust edition 2021. Also increases the MSRV to Rust 1.60. +- Add null-pointer optimization to `Sender`, `Receiver` and `SendError`. + This reduces the call stack size of Sender::send and it makes + `Option<Sender>` and `Option<Receiver>` pointer sized (#18). +- Relax the memory ordering of all atomic operations from `SeqCst` to the most appropriate + lower ordering (#17 + #20). + +### Fixed +- Fix undefined behavior due to multiple mutable references to the same channel instance (#18). +- Fix race condition that could happen during unparking of a receiving `Receiver` (#17 + #20). + + +## [0.1.3] - 2021-11-23 +### Fixed +- Keep the *last* `Waker` in `Future::poll`, not the *first* one. Stops breaking the contract + on how futures should work. + + +## [0.1.2] - 2020-08-11 +### Fixed +- Fix unreachable code panic that happened if the `Receiver` of an empty but open channel was + polled and then dropped. + + +## [0.1.1] - 2020-05-10 +Initial implementation. Supports basically all the (for now) intended functionality. +Sender is as lock-free as I think it can get and the receiver can both do thread blocking +and be awaited asynchronously. The receiver also has a wait-free `try_recv` method. + +The crate has two features. They are activated by default, but the user can opt out of async +support as well as usage of libstd (making the crate `no_std` but still requiring liballoc) + + +## [0.1.0] - 2019-05-30 +Name reserved on crate.io by someone other than the author of this crate. diff --git a/third_party/rust/oneshot/Cargo.lock b/third_party/rust/oneshot/Cargo.lock @@ -0,0 +1,1499 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + +[[package]] +name = "anstyle" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" + +[[package]] +name = "async-attributes" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "async-channel" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" +dependencies = [ + "concurrent-queue", + "event-listener 2.5.3", + "futures-core", +] + +[[package]] +name = "async-channel" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "136d4d23bcc79e27423727b36823d86233aad06dfea531837b038394d11e9928" +dependencies = [ + "concurrent-queue", + "event-listener 5.3.0", + "event-listener-strategy 0.5.2", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-executor" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b10202063978b3351199d68f8b22c4e47e4b1b822f8d43fd862d5ea8c006b29a" +dependencies = [ + "async-task", + "concurrent-queue", + "fastrand 2.1.0", + "futures-lite 2.3.0", + "slab", +] + +[[package]] +name = "async-global-executor" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" +dependencies = [ + "async-channel 2.2.1", + "async-executor", + "async-io 2.3.2", + "async-lock 3.3.0", + "blocking", + "futures-lite 2.3.0", + "once_cell", +] + +[[package]] +name = "async-io" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" +dependencies = [ + "async-lock 2.8.0", + "autocfg", + "cfg-if", + "concurrent-queue", + "futures-lite 1.13.0", + "log", + "parking", + "polling 2.8.0", + "rustix 0.37.27", + "slab", + "socket2", + "waker-fn", +] + +[[package]] +name = "async-io" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcccb0f599cfa2f8ace422d3555572f47424da5648a4382a9dd0310ff8210884" +dependencies = [ + "async-lock 3.3.0", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite 2.3.0", + "parking", + "polling 3.7.0", + "rustix 0.38.34", + "slab", + "tracing", + "windows-sys 0.52.0", +] + +[[package]] +name = "async-lock" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +dependencies = [ + "event-listener 2.5.3", +] + +[[package]] +name = "async-lock" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" +dependencies = [ + "event-listener 4.0.3", + "event-listener-strategy 0.4.0", + "pin-project-lite", +] + +[[package]] +name = "async-std" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +dependencies = [ + "async-attributes", + "async-channel 1.9.0", + "async-global-executor", + "async-io 1.13.0", + "async-lock 2.8.0", + "crossbeam-utils", + "futures-channel", + "futures-core", + "futures-io", + "futures-lite 1.13.0", + "gloo-timers", + "kv-log-macro", + "log", + "memchr", + "once_cell", + "pin-project-lite", + "pin-utils", + "slab", + "wasm-bindgen-futures", +] + +[[package]] +name = "async-task" +version = "4.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "autocfg" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" + +[[package]] +name = "backtrace" +version = "0.3.71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" + +[[package]] +name = "blocking" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "495f7104e962b7356f0aeb34247aca1fe7d2e783b346582db7f2904cb5717e88" +dependencies = [ + "async-channel 2.2.1", + "async-lock 3.3.0", + "async-task", + "futures-io", + "futures-lite 2.3.0", + "piper", +] + +[[package]] +name = "bumpalo" +version = "3.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" + +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + +[[package]] +name = "cc" +version = "1.0.97" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "099a5357d84c4c61eb35fc8eafa9a79a902c2f76911e5747ced4e032edd8d9b4" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + +[[package]] +name = "clap" +version = "4.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" +dependencies = [ + "clap_builder", +] + +[[package]] +name = "clap_builder" +version = "4.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" +dependencies = [ + "anstyle", + "clap_lex", +] + +[[package]] +name = "clap_lex" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" + +[[package]] +name = "concurrent-queue" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "criterion" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "is-terminal", + "itertools", + "num-traits", + "once_cell", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "either" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" + +[[package]] +name = "errno" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + +[[package]] +name = "event-listener" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d9944b8ca13534cdfb2800775f8dd4902ff3fc75a50101466decadfdf322a24" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" +dependencies = [ + "event-listener 4.0.3", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +dependencies = [ + "event-listener 5.3.0", + "pin-project-lite", +] + +[[package]] +name = "fastrand" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +dependencies = [ + "instant", +] + +[[package]] +name = "fastrand" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" + +[[package]] +name = "futures-channel" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +dependencies = [ + "futures-core", +] + +[[package]] +name = "futures-core" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" + +[[package]] +name = "futures-io" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" + +[[package]] +name = "futures-lite" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +dependencies = [ + "fastrand 1.9.0", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + +[[package]] +name = "futures-lite" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +dependencies = [ + "fastrand 2.1.0", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + +[[package]] +name = "generator" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "186014d53bc231d0090ef8d6f03e0920c54d85a5ed22f4f2f74315ec56cf83fb" +dependencies = [ + "cc", + "cfg-if", + "libc", + "log", + "rustversion", + "windows", +] + +[[package]] +name = "gimli" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" + +[[package]] +name = "gloo-timers" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "half" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "cfg-if", + "crunchy", +] + +[[package]] +name = "hermit-abi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" + +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "io-lifetimes" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" +dependencies = [ + "hermit-abi 0.3.9", + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "is-terminal" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" +dependencies = [ + "hermit-abi 0.4.0", + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" + +[[package]] +name = "js-sys" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "kv-log-macro" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.155" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" + +[[package]] +name = "linux-raw-sys" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" + +[[package]] +name = "linux-raw-sys" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" + +[[package]] +name = "log" +version = "0.4.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +dependencies = [ + "value-bag", +] + +[[package]] +name = "loom" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +dependencies = [ + "cfg-if", + "generator", + "pin-utils", + "scoped-tls", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + +[[package]] +name = "memchr" +version = "2.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" + +[[package]] +name = "miniz_oxide" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" +dependencies = [ + "adler", +] + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi 0.3.9", + "libc", +] + +[[package]] +name = "object" +version = "0.32.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "oneshot" +version = "0.1.11" +dependencies = [ + "async-std", + "criterion", + "loom", + "tokio", +] + +[[package]] +name = "oorandom" +version = "11.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "parking" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" + +[[package]] +name = "pin-project-lite" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "piper" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +dependencies = [ + "atomic-waker", + "fastrand 2.1.0", + "futures-io", +] + +[[package]] +name = "plotters" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" + +[[package]] +name = "plotters-svg" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" +dependencies = [ + "plotters-backend", +] + +[[package]] +name = "polling" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" +dependencies = [ + "autocfg", + "bitflags 1.3.2", + "cfg-if", + "concurrent-queue", + "libc", + "log", + "pin-project-lite", + "windows-sys 0.48.0", +] + +[[package]] +name = "polling" +version = "3.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "645493cf344456ef24219d02a768cf1fb92ddf8c92161679ae3d91b91a637be3" +dependencies = [ + "cfg-if", + "concurrent-queue", + "hermit-abi 0.3.9", + "pin-project-lite", + "rustix 0.38.34", + "tracing", + "windows-sys 0.52.0", +] + +[[package]] +name = "proc-macro2" +version = "1.0.82" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rayon" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "regex" +version = "1.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.3", +] + +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" + +[[package]] +name = "rustc-demangle" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" + +[[package]] +name = "rustix" +version = "0.37.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" +dependencies = [ + "bitflags 1.3.2", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys 0.3.8", + "windows-sys 0.48.0", +] + +[[package]] +name = "rustix" +version = "0.38.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +dependencies = [ + "bitflags 2.5.0", + "errno", + "libc", + "linux-raw-sys 0.4.13", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustversion" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "092474d1a01ea8278f69e6a358998405fae5b8b963ddaeb2b0b04a128bf1dfb0" + +[[package]] +name = "ryu" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + +[[package]] +name = "serde" +version = "1.0.201" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "780f1cebed1629e4753a1a38a3c72d30b97ec044f0aef68cb26650a3c5cf363c" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.201" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5e405930b9796f1c00bee880d03fc7e0bb4b9a11afc776885ffe84320da2865" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.61", +] + +[[package]] +name = "serde_json" +version = "1.0.117" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" + +[[package]] +name = "socket2" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.61" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c993ed8ccba56ae856363b1845da7266a7cb78e1d146c8a32d54b45a8b831fc9" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if", + "once_cell", +] + +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "tokio" +version = "1.37.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" +dependencies = [ + "backtrace", + "num_cpus", + "pin-project-lite", + "tokio-macros", +] + +[[package]] +name = "tokio-macros" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.61", +] + +[[package]] +name = "tracing" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +dependencies = [ + "pin-project-lite", + "tracing-core", +] + +[[package]] +name = "tracing-core" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + +[[package]] +name = "value-bag" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a84c137d37ab0142f0f2ddfe332651fdbf252e7b7dbb4e67b6c1f1b2e925101" + +[[package]] +name = "waker-fn" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn 2.0.61", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.61", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" + +[[package]] +name = "web-sys" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows" +version = "0.54.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9252e5725dbed82865af151df558e754e4a3c2c30818359eb17465f1346a1b49" +dependencies = [ + "windows-core", + "windows-targets 0.52.5", +] + +[[package]] +name = "windows-core" +version = "0.54.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12661b9c89351d684a50a8a643ce5f608e20243b9fb84687800163429f161d65" +dependencies = [ + "windows-result", + "windows-targets 0.52.5", +] + +[[package]] +name = "windows-result" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "749f0da9cc72d82e600d8d2e44cadd0b9eedb9038f71a1c58556ac1c5791813b" +dependencies = [ + "windows-targets 0.52.5", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.5", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +dependencies = [ + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" diff --git a/third_party/rust/oneshot/Cargo.toml b/third_party/rust/oneshot/Cargo.toml @@ -0,0 +1,159 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.60.0" +name = "oneshot" +version = "0.1.11" +authors = ["Linus Färnstrand <faern@faern.net>"] +build = false +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = """ +Oneshot spsc channel with (potentially) lock-free non-blocking send, and a receiver supporting +both thread blocking receive operations as well as Future based async polling. +""" +readme = "README.md" +keywords = [ + "oneshot", + "spsc", + "async", + "sync", + "channel", +] +categories = [ + "asynchronous", + "concurrency", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/faern/oneshot" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "docsrs", +] + +[features] +async = [] +default = [ + "std", + "async", +] +std = [] + +[lib] +name = "oneshot" +path = "src/lib.rs" + +[[example]] +name = "recv_before_send" +path = "examples/recv_before_send.rs" + +[[example]] +name = "recv_before_send_then_drop_sender" +path = "examples/recv_before_send_then_drop_sender.rs" + +[[example]] +name = "recv_ref_before_send" +path = "examples/recv_ref_before_send.rs" + +[[example]] +name = "recv_ref_before_send_then_drop_sender" +path = "examples/recv_ref_before_send_then_drop_sender.rs" + +[[example]] +name = "recv_timeout_before_send" +path = "examples/recv_timeout_before_send.rs" + +[[example]] +name = "recv_timeout_before_send_then_drop_sender" +path = "examples/recv_timeout_before_send_then_drop_sender.rs" + +[[example]] +name = "recv_with_dropped_sender" +path = "examples/recv_with_dropped_sender.rs" + +[[example]] +name = "send_before_recv" +path = "examples/send_before_recv.rs" + +[[example]] +name = "send_then_drop_receiver" +path = "examples/send_then_drop_receiver.rs" + +[[example]] +name = "send_with_dropped_receiver" +path = "examples/send_with_dropped_receiver.rs" + +[[test]] +name = "assert_mem" +path = "tests/assert_mem.rs" + +[[test]] +name = "async" +path = "tests/async.rs" + +[[test]] +name = "future" +path = "tests/future.rs" + +[[test]] +name = "loom" +path = "tests/loom.rs" + +[[test]] +name = "raw" +path = "tests/raw.rs" + +[[test]] +name = "sync" +path = "tests/sync.rs" + +[[bench]] +name = "benches" +path = "benches/benches.rs" +harness = false + +[dev-dependencies.async-std] +version = "1" +features = ["attributes"] + +[dev-dependencies.tokio] +version = "1" +features = [ + "rt", + "rt-multi-thread", + "macros", + "time", +] + +[target."cfg(criterion)".dev-dependencies.criterion] +version = "0.5.1" + +[target."cfg(oneshot_loom)".dependencies.loom] +version = "0.7.2" +features = ["futures"] +optional = true + +[lints.rust.unexpected_cfgs] +level = "deny" +priority = 0 +check-cfg = [ + "cfg(oneshot_loom)", + "cfg(oneshot_test_delay)", + "cfg(criterion)", +] diff --git a/third_party/rust/oneshot/LICENSE-APACHE b/third_party/rust/oneshot/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/third_party/rust/oneshot/LICENSE-MIT b/third_party/rust/oneshot/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/oneshot/README.md b/third_party/rust/oneshot/README.md @@ -0,0 +1,94 @@ +# oneshot + +Oneshot spsc (single producer, single consumer) channel. Meaning each channel instance +can only transport a single message. This has a few nice outcomes. One thing is that +the implementation can be very efficient, utilizing the knowledge that there will +only be one message. But more importantly, it allows the API to be expressed in such +a way that certain edge cases that you don't want to care about when only sending a +single message on a channel does not exist. For example: The sender can't be copied +or cloned, and the send method takes ownership and consumes the sender. +So you are guaranteed, at the type level, that there can only be one message sent. + +The sender's send method is non-blocking, and potentially lock- and wait-free. +See documentation on [Sender::send] for situations where it might not be fully wait-free. +The receiver supports both lock- and wait-free `try_recv` as well as indefinite and time +limited thread blocking receive operations. The receiver also implements `Future` and +supports asynchronously awaiting the message. + + +## Examples + +This example sets up a background worker that processes requests coming in on a standard +mpsc channel and replies on a oneshot channel provided with each request. The worker can +be interacted with both from sync and async contexts since the oneshot receiver +can receive both blocking and async. + +```rust +use std::sync::mpsc; +use std::thread; +use std::time::Duration; + +type Request = String; + +// Starts a background thread performing some computation on requests sent to it. +// Delivers the response back over a oneshot channel. +fn spawn_processing_thread() -> mpsc::Sender<(Request, oneshot::Sender<usize>)> { + let (request_sender, request_receiver) = mpsc::channel::<(Request, oneshot::Sender<usize>)>(); + thread::spawn(move || { + for (request_data, response_sender) in request_receiver.iter() { + let compute_operation = || request_data.len(); + let _ = response_sender.send(compute_operation()); // <- Send on the oneshot channel + } + }); + request_sender +} + +let processor = spawn_processing_thread(); + +// If compiled with `std` the library can receive messages with timeout on regular threads +#[cfg(feature = "std")] { + let (response_sender, response_receiver) = oneshot::channel(); + let request = Request::from("data from sync thread"); + + processor.send((request, response_sender)).expect("Processor down"); + match response_receiver.recv_timeout(Duration::from_secs(1)) { // <- Receive on the oneshot channel + Ok(result) => println!("Processor returned {}", result), + Err(oneshot::RecvTimeoutError::Timeout) => eprintln!("Processor was too slow"), + Err(oneshot::RecvTimeoutError::Disconnected) => panic!("Processor exited"), + } +} + +// If compiled with the `async` feature, the `Receiver` can be awaited in an async context +#[cfg(feature = "async")] { + tokio::runtime::Runtime::new() + .unwrap() + .block_on(async move { + let (response_sender, response_receiver) = oneshot::channel(); + let request = Request::from("data from sync thread"); + + processor.send((request, response_sender)).expect("Processor down"); + match response_receiver.await { // <- Receive on the oneshot channel asynchronously + Ok(result) => println!("Processor returned {}", result), + Err(_e) => panic!("Processor exited"), + } + }); +} +``` + +## Sync vs async + +The main motivation for writing this library was that there were no (known to me) channel +implementations allowing you to seamlessly send messages between a normal thread and an async +task, or the other way around. If message passing is the way you are communicating, of course +that should work smoothly between the sync and async parts of the program! + +This library achieves that by having a fast and cheap send operation that can +be used in both sync threads and async tasks. The receiver has both thread blocking +receive methods for synchronous usage, and implements `Future` for asynchronous usage. + +The receiving endpoint of this channel implements Rust's `Future` trait and can be waited on +in an asynchronous task. This implementation is completely executor/runtime agnostic. It should +be possible to use this library with any executor. + + +License: MIT OR Apache-2.0 diff --git a/third_party/rust/oneshot/benches/benches.rs b/third_party/rust/oneshot/benches/benches.rs @@ -0,0 +1,138 @@ +#[cfg(not(criterion))] +pub fn main() { + eprintln!( + "!!!!!! WARNING +To run benches, you neet to run with RUSTFLAGS=\"--cfg criterion\" +!!!!!! WARNING + " + ); +} + +#[cfg(criterion)] +criterion::criterion_group!(benches, imp::bench); +#[cfg(criterion)] +criterion::criterion_main!(benches); + +#[cfg(criterion)] +mod imp { + macro_rules! bench_send_and_recv { + ($c:expr, $($type:ty => $value:expr);+) => { + // Sanity check that all $values are of $type. + $(let _: $type = $value;)* + { + let mut group = $c.benchmark_group("create_channel"); + $(group.bench_function(stringify!($type), |b| { + b.iter(oneshot::channel::<$type>) + });)* + group.finish(); + } + { + let mut group = $c.benchmark_group("create_and_send"); + $(group.bench_function(stringify!($type), |b| { + b.iter(|| { + let (sender, _receiver) = oneshot::channel(); + sender.send(criterion::black_box($value)).unwrap() + }); + });)* + group.finish(); + } + { + let mut group = $c.benchmark_group("create_and_send_on_closed"); + $(group.bench_function(stringify!($type), |b| { + b.iter(|| { + let (sender, _) = oneshot::channel(); + sender.send(criterion::black_box($value)).unwrap_err() + }); + });)* + group.finish(); + } + #[cfg(feature = "std")] + { + let mut group = $c.benchmark_group("create_send_and_recv"); + $(group.bench_function(stringify!($type), |b| { + b.iter(|| { + let (sender, receiver) = oneshot::channel(); + sender.send(criterion::black_box($value)).unwrap(); + receiver.recv().unwrap() + }); + });)* + group.finish(); + } + #[cfg(feature = "std")] + { + let mut group = $c.benchmark_group("create_send_and_recv_ref"); + $(group.bench_function(stringify!($type), |b| { + b.iter(|| { + let (sender, receiver) = oneshot::channel(); + sender.send(criterion::black_box($value)).unwrap(); + receiver.recv_ref().unwrap() + }); + });)* + group.finish(); + } + }; + } + + pub fn bench(c: &mut criterion::Criterion) { + bench_send_and_recv!(c, + () => (); + u8 => 7u8; + u128 => 1234567u128; + [u8; 64] => [0b10101010u8; 64]; + [u8; 4096] => [0b10101010u8; 4096] + ); + + bench_try_recv(c); + #[cfg(feature = "std")] + bench_recv_deadline_now(c); + #[cfg(feature = "std")] + bench_recv_timeout_zero(c); + } + + fn bench_try_recv(c: &mut criterion::Criterion) { + let (sender, receiver) = oneshot::channel::<u128>(); + c.bench_function("try_recv_empty", |b| { + b.iter(|| receiver.try_recv().unwrap_err()) + }); + drop(sender); + c.bench_function("try_recv_empty_closed", |b| { + b.iter(|| receiver.try_recv().unwrap_err()) + }); + } + + #[cfg(feature = "std")] + fn bench_recv_deadline_now(c: &mut criterion::Criterion) { + let now = std::time::Instant::now(); + { + let (_sender, receiver) = oneshot::channel::<u128>(); + c.bench_function("recv_deadline_now", |b| { + b.iter(|| receiver.recv_deadline(now).unwrap_err()) + }); + } + { + let (sender, receiver) = oneshot::channel::<u128>(); + drop(sender); + c.bench_function("recv_deadline_now_closed", |b| { + b.iter(|| receiver.recv_deadline(now).unwrap_err()) + }); + } + } + + #[cfg(feature = "std")] + fn bench_recv_timeout_zero(c: &mut criterion::Criterion) { + let zero = std::time::Duration::from_nanos(0); + { + let (_sender, receiver) = oneshot::channel::<u128>(); + c.bench_function("recv_timeout_zero", |b| { + b.iter(|| receiver.recv_timeout(zero).unwrap_err()) + }); + } + { + let (sender, receiver) = oneshot::channel::<u128>(); + drop(sender); + c.bench_function("recv_timeout_zero_closed", |b| { + b.iter(|| receiver.recv_timeout(zero).unwrap_err()) + }); + } + } +} diff --git a/third_party/rust/oneshot/check_mem_leaks.sh b/third_party/rust/oneshot/check_mem_leaks.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +set -eu + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd "$SCRIPT_DIR" + +for example_path in examples/*.rs; do + example_filename=$(basename -- $example_path) + example=${example_filename%.*} + echo $example + cargo valgrind run --example "$example" +done diff --git a/third_party/rust/oneshot/examples/recv_before_send.rs b/third_party/rust/oneshot/examples/recv_before_send.rs @@ -0,0 +1,18 @@ +#[cfg(feature = "std")] +fn main() { + use std::thread; + use std::time::Duration; + + let (sender, receiver) = oneshot::channel(); + let t = thread::spawn(move || { + thread::sleep(Duration::from_millis(2)); + sender.send(9u128).unwrap(); + }); + assert_eq!(receiver.recv(), Ok(9)); + t.join().unwrap(); +} + +#[cfg(not(feature = "std"))] +fn main() { + panic!("This example is only for when the \"sync\" feature is used"); +} diff --git a/third_party/rust/oneshot/examples/recv_before_send_then_drop_sender.rs b/third_party/rust/oneshot/examples/recv_before_send_then_drop_sender.rs @@ -0,0 +1,18 @@ +#[cfg(feature = "std")] +fn main() { + use std::thread; + use std::time::Duration; + + let (sender, receiver) = oneshot::channel::<u128>(); + let t = thread::spawn(move || { + thread::sleep(Duration::from_millis(2)); + std::mem::drop(sender); + }); + assert!(receiver.recv().is_err()); + t.join().unwrap(); +} + +#[cfg(not(feature = "std"))] +fn main() { + panic!("This example is only for when the \"sync\" feature is used"); +} diff --git a/third_party/rust/oneshot/examples/recv_ref_before_send.rs b/third_party/rust/oneshot/examples/recv_ref_before_send.rs @@ -0,0 +1,18 @@ +#[cfg(feature = "std")] +fn main() { + use std::thread; + use std::time::Duration; + + let (sender, receiver) = oneshot::channel(); + let t = thread::spawn(move || { + thread::sleep(Duration::from_millis(2)); + sender.send(9u128).unwrap(); + }); + assert_eq!(receiver.recv_ref(), Ok(9)); + t.join().unwrap(); +} + +#[cfg(not(feature = "std"))] +fn main() { + panic!("This example is only for when the \"sync\" feature is used"); +} diff --git a/third_party/rust/oneshot/examples/recv_ref_before_send_then_drop_sender.rs b/third_party/rust/oneshot/examples/recv_ref_before_send_then_drop_sender.rs @@ -0,0 +1,18 @@ +#[cfg(feature = "std")] +fn main() { + use std::thread; + use std::time::Duration; + + let (sender, receiver) = oneshot::channel::<u128>(); + let t = thread::spawn(move || { + thread::sleep(Duration::from_millis(2)); + std::mem::drop(sender); + }); + assert!(receiver.recv_ref().is_err()); + t.join().unwrap(); +} + +#[cfg(not(feature = "std"))] +fn main() { + panic!("This example is only for when the \"sync\" feature is used"); +} diff --git a/third_party/rust/oneshot/examples/recv_timeout_before_send.rs b/third_party/rust/oneshot/examples/recv_timeout_before_send.rs @@ -0,0 +1,18 @@ +#[cfg(feature = "std")] +fn main() { + use std::thread; + use std::time::Duration; + + let (sender, receiver) = oneshot::channel(); + let t = thread::spawn(move || { + thread::sleep(Duration::from_millis(2)); + sender.send(9u128).unwrap(); + }); + assert_eq!(receiver.recv_timeout(Duration::from_millis(100)), Ok(9)); + t.join().unwrap(); +} + +#[cfg(not(feature = "std"))] +fn main() { + panic!("This example is only for when the \"sync\" feature is used"); +} diff --git a/third_party/rust/oneshot/examples/recv_timeout_before_send_then_drop_sender.rs b/third_party/rust/oneshot/examples/recv_timeout_before_send_then_drop_sender.rs @@ -0,0 +1,18 @@ +#[cfg(feature = "std")] +fn main() { + use std::thread; + use std::time::Duration; + + let (sender, receiver) = oneshot::channel::<u128>(); + let t = thread::spawn(move || { + thread::sleep(Duration::from_millis(2)); + std::mem::drop(sender); + }); + assert!(receiver.recv_timeout(Duration::from_millis(100)).is_err()); + t.join().unwrap(); +} + +#[cfg(not(feature = "std"))] +fn main() { + panic!("This example is only for when the \"sync\" feature is used"); +} diff --git a/third_party/rust/oneshot/examples/recv_with_dropped_sender.rs b/third_party/rust/oneshot/examples/recv_with_dropped_sender.rs @@ -0,0 +1,11 @@ +#[cfg(feature = "std")] +fn main() { + let (sender, receiver) = oneshot::channel::<u128>(); + std::mem::drop(sender); + receiver.recv().unwrap_err(); +} + +#[cfg(not(feature = "std"))] +fn main() { + panic!("This example is only for when the \"sync\" feature is used"); +} diff --git a/third_party/rust/oneshot/examples/send_before_recv.rs b/third_party/rust/oneshot/examples/send_before_recv.rs @@ -0,0 +1,11 @@ +#[cfg(feature = "std")] +fn main() { + let (sender, receiver) = oneshot::channel(); + assert!(sender.send(19i128).is_ok()); + assert_eq!(receiver.recv(), Ok(19i128)); +} + +#[cfg(not(feature = "std"))] +fn main() { + panic!("This example is only for when the \"sync\" feature is used"); +} diff --git a/third_party/rust/oneshot/examples/send_then_drop_receiver.rs b/third_party/rust/oneshot/examples/send_then_drop_receiver.rs @@ -0,0 +1,7 @@ +use std::mem; + +fn main() { + let (sender, receiver) = oneshot::channel(); + assert!(sender.send(19i128).is_ok()); + mem::drop(receiver); +} diff --git a/third_party/rust/oneshot/examples/send_with_dropped_receiver.rs b/third_party/rust/oneshot/examples/send_with_dropped_receiver.rs @@ -0,0 +1,8 @@ +use std::mem; + +fn main() { + let (sender, receiver) = oneshot::channel(); + mem::drop(receiver); + let send_error = sender.send(5u128).unwrap_err(); + assert_eq!(send_error.into_inner(), 5); +} diff --git a/third_party/rust/oneshot/src/errors.rs b/third_party/rust/oneshot/src/errors.rs @@ -0,0 +1,151 @@ +use super::{dealloc, Channel}; +use core::fmt; +use core::mem; +use core::ptr::NonNull; + +/// An error returned when trying to send on a closed channel. Returned from +/// [`Sender::send`](crate::Sender::send) if the corresponding [`Receiver`](crate::Receiver) +/// has already been dropped. +/// +/// The message that could not be sent can be retreived again with [`SendError::into_inner`]. +pub struct SendError<T> { + channel_ptr: NonNull<Channel<T>>, +} + +unsafe impl<T: Send> Send for SendError<T> {} +unsafe impl<T: Sync> Sync for SendError<T> {} + +impl<T> SendError<T> { + /// # Safety + /// + /// By calling this function, the caller semantically transfers ownership of the + /// channel's resources to the created `SendError`. Thus the caller must ensure that the + /// pointer is not used in a way which would violate this ownership transfer. Moreover, + /// the caller must assert that the channel contains a valid, initialized message. + pub(crate) const unsafe fn new(channel_ptr: NonNull<Channel<T>>) -> Self { + Self { channel_ptr } + } + + /// Consumes the error and returns the message that failed to be sent. + #[inline] + pub fn into_inner(self) -> T { + let channel_ptr = self.channel_ptr; + + // Don't run destructor if we consumed ourselves. Freeing happens here. + mem::forget(self); + + // SAFETY: we have ownership of the channel + let channel: &Channel<T> = unsafe { channel_ptr.as_ref() }; + + // SAFETY: we know that the message is initialized according to the safety requirements of + // `new` + let message = unsafe { channel.take_message() }; + + // SAFETY: we own the channel + unsafe { dealloc(channel_ptr) }; + + message + } + + /// Get a reference to the message that failed to be sent. + #[inline] + pub fn as_inner(&self) -> &T { + unsafe { self.channel_ptr.as_ref().message().assume_init_ref() } + } +} + +impl<T> Drop for SendError<T> { + fn drop(&mut self) { + // SAFETY: we have ownership of the channel and require that the message is initialized + // upon construction + unsafe { + self.channel_ptr.as_ref().drop_message(); + dealloc(self.channel_ptr); + } + } +} + +impl<T> fmt::Display for SendError<T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + "sending on a closed channel".fmt(f) + } +} + +impl<T> fmt::Debug for SendError<T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "SendError<{}>(_)", stringify!(T)) + } +} + +#[cfg(feature = "std")] +impl<T> std::error::Error for SendError<T> {} + +/// An error returned from receiving methods that block/wait until a message is available. +/// +/// The receive operation can only fail if the corresponding [`Sender`](crate::Sender) was dropped +/// before sending any message, or if a message has already been received on the channel. +#[cfg(any(feature = "std", feature = "async"))] +#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] +pub struct RecvError; + +#[cfg(any(feature = "std", feature = "async"))] +impl fmt::Display for RecvError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + "receiving on a closed channel".fmt(f) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for RecvError {} + +/// An error returned when failing to receive a message in the non-blocking +/// [`Receiver::try_recv`](crate::Receiver::try_recv). +#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] +pub enum TryRecvError { + /// The channel is still open, but there was no message present in it. + Empty, + + /// The channel is closed. Either the sender was dropped before sending any message, or the + /// message has already been extracted from the receiver. + Disconnected, +} + +impl fmt::Display for TryRecvError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let msg = match self { + TryRecvError::Empty => "receiving on an empty channel", + TryRecvError::Disconnected => "receiving on a closed channel", + }; + msg.fmt(f) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for TryRecvError {} + +/// An error returned when failing to receive a message in a method that block/wait for a message +/// for a while, but has a timeout after which it gives up. +#[cfg(feature = "std")] +#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] +pub enum RecvTimeoutError { + /// No message arrived on the channel before the timeout was reached. The channel is still open. + Timeout, + + /// The channel is closed. Either the sender was dropped before sending any message, or the + /// message has already been extracted from the receiver. + Disconnected, +} + +#[cfg(feature = "std")] +impl fmt::Display for RecvTimeoutError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let msg = match self { + RecvTimeoutError::Timeout => "timed out waiting on channel", + RecvTimeoutError::Disconnected => "channel is empty and sending half is closed", + }; + msg.fmt(f) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for RecvTimeoutError {} diff --git a/third_party/rust/oneshot/src/lib.rs b/third_party/rust/oneshot/src/lib.rs @@ -0,0 +1,1343 @@ +//! Oneshot spsc (single producer, single consumer) channel. Meaning each channel instance +//! can only transport a single message. This has a few nice outcomes. One thing is that +//! the implementation can be very efficient, utilizing the knowledge that there will +//! only be one message. But more importantly, it allows the API to be expressed in such +//! a way that certain edge cases that you don't want to care about when only sending a +//! single message on a channel does not exist. For example: The sender can't be copied +//! or cloned, and the send method takes ownership and consumes the sender. +//! So you are guaranteed, at the type level, that there can only be one message sent. +//! +//! The sender's send method is non-blocking, and potentially lock- and wait-free. +//! See documentation on [Sender::send] for situations where it might not be fully wait-free. +//! The receiver supports both lock- and wait-free `try_recv` as well as indefinite and time +//! limited thread blocking receive operations. The receiver also implements `Future` and +//! supports asynchronously awaiting the message. +//! +//! +//! # Examples +//! +//! This example sets up a background worker that processes requests coming in on a standard +//! mpsc channel and replies on a oneshot channel provided with each request. The worker can +//! be interacted with both from sync and async contexts since the oneshot receiver +//! can receive both blocking and async. +//! +//! ```rust +//! # #[cfg(not(feature = "loom"))] { +//! use std::sync::mpsc; +//! use std::thread; +//! use std::time::Duration; +//! +//! type Request = String; +//! +//! // Starts a background thread performing some computation on requests sent to it. +//! // Delivers the response back over a oneshot channel. +//! fn spawn_processing_thread() -> mpsc::Sender<(Request, oneshot::Sender<usize>)> { +//! let (request_sender, request_receiver) = mpsc::channel::<(Request, oneshot::Sender<usize>)>(); +//! thread::spawn(move || { +//! for (request_data, response_sender) in request_receiver.iter() { +//! let compute_operation = || request_data.len(); +//! let _ = response_sender.send(compute_operation()); // <- Send on the oneshot channel +//! } +//! }); +//! request_sender +//! } +//! +//! let processor = spawn_processing_thread(); +//! +//! // If compiled with `std` the library can receive messages with timeout on regular threads +//! #[cfg(feature = "std")] { +//! let (response_sender, response_receiver) = oneshot::channel(); +//! let request = Request::from("data from sync thread"); +//! +//! processor.send((request, response_sender)).expect("Processor down"); +//! match response_receiver.recv_timeout(Duration::from_secs(1)) { // <- Receive on the oneshot channel +//! Ok(result) => println!("Processor returned {}", result), +//! Err(oneshot::RecvTimeoutError::Timeout) => eprintln!("Processor was too slow"), +//! Err(oneshot::RecvTimeoutError::Disconnected) => panic!("Processor exited"), +//! } +//! } +//! +//! // If compiled with the `async` feature, the `Receiver` can be awaited in an async context +//! #[cfg(feature = "async")] { +//! tokio::runtime::Runtime::new() +//! .unwrap() +//! .block_on(async move { +//! let (response_sender, response_receiver) = oneshot::channel(); +//! let request = Request::from("data from sync thread"); +//! +//! processor.send((request, response_sender)).expect("Processor down"); +//! match response_receiver.await { // <- Receive on the oneshot channel asynchronously +//! Ok(result) => println!("Processor returned {}", result), +//! Err(_e) => panic!("Processor exited"), +//! } +//! }); +//! } +//! # } +//! ``` +//! +//! # Sync vs async +//! +//! The main motivation for writing this library was that there were no (known to me) channel +//! implementations allowing you to seamlessly send messages between a normal thread and an async +//! task, or the other way around. If message passing is the way you are communicating, of course +//! that should work smoothly between the sync and async parts of the program! +//! +//! This library achieves that by having a fast and cheap send operation that can +//! be used in both sync threads and async tasks. The receiver has both thread blocking +//! receive methods for synchronous usage, and implements `Future` for asynchronous usage. +//! +//! The receiving endpoint of this channel implements Rust's `Future` trait and can be waited on +//! in an asynchronous task. This implementation is completely executor/runtime agnostic. It should +//! be possible to use this library with any executor, or even pass messages between tasks running +//! in different executors. +//! + +// # Implementation description +// +// When a channel is created via the `channel` function, it creates a single heap allocation +// containing: +// * A one byte atomic integer that represents the current channel state, +// * Uninitialized memory to fit the message, +// * Uninitialized memory to fit the waker that can wake the receiving task or thread up. +// +// The size of the waker depends on which features are activated, it ranges from 0 to 24 bytes[1]. +// So with all features enabled each channel allocates 25 bytes plus the size of the +// message, plus any padding needed to get correct memory alignment. +// +// The Sender and Receiver only holds a raw pointer to the heap channel object. The last endpoint +// to be consumed or dropped is responsible for freeing the heap memory. The first endpoint to +// be consumed or dropped signal via the state that it is gone. And the second one see this and +// frees the memory. +// +// ## Footnotes +// +// [1]: Mind that the waker only takes zero bytes when all features are disabled, making it +// impossible to *wait* for the message. `try_recv` is the only available method in this scenario. + +#![deny(rust_2018_idioms)] +#![cfg_attr(not(feature = "std"), no_std)] +// Enables this nightly only feature for the documentation build on docs.rs. +// To test this locally, build the docs with: +// `RUSTDOCFLAGS="--cfg docsrs" cargo +nightly doc --all-features` +#![cfg_attr(docsrs, feature(doc_cfg))] +#![cfg_attr(docsrs, feature(doc_auto_cfg))] + +#[cfg(not(oneshot_loom))] +extern crate alloc; + +use core::{ + marker::PhantomData, + mem::{self, MaybeUninit}, + ptr::{self, NonNull}, +}; + +#[cfg(not(oneshot_loom))] +use core::{ + cell::UnsafeCell, + sync::atomic::{fence, AtomicU8, Ordering::*}, +}; +#[cfg(oneshot_loom)] +use loom::{ + cell::UnsafeCell, + sync::atomic::{fence, AtomicU8, Ordering::*}, +}; + +#[cfg(all(any(feature = "std", feature = "async"), not(oneshot_loom)))] +use core::hint; +#[cfg(all(any(feature = "std", feature = "async"), oneshot_loom))] +use loom::hint; + +#[cfg(feature = "async")] +use core::{ + pin::Pin, + task::{self, Poll}, +}; +#[cfg(feature = "std")] +use std::time::{Duration, Instant}; + +#[cfg(feature = "std")] +mod thread { + #[cfg(not(oneshot_loom))] + pub use std::thread::{current, park, park_timeout, Thread}; + + #[cfg(oneshot_loom)] + pub use loom::thread::{current, park, Thread}; + + // loom does not support parking with a timeout. So we just + // yield. This means that the "park" will "spuriously" wake up + // way too early. But the code should properly handle this. + // One thing to note is that very short timeouts are needed + // when using loom, since otherwise the looping will cause + // an overflow in loom. + #[cfg(oneshot_loom)] + pub fn park_timeout(_timeout: std::time::Duration) { + loom::thread::yield_now() + } +} + +#[cfg(oneshot_loom)] +mod loombox; +#[cfg(not(oneshot_loom))] +use alloc::boxed::Box; +#[cfg(oneshot_loom)] +use loombox::Box; + +mod errors; +// Wildcard imports are not nice. But since multiple errors have various conditional compilation, +// this is easier than doing three different imports. +pub use errors::*; + +/// Creates a new oneshot channel and returns the two endpoints, [`Sender`] and [`Receiver`]. +pub fn channel<T>() -> (Sender<T>, Receiver<T>) { + // Allocate the channel on the heap and get the pointer. + // The last endpoint of the channel to be alive is responsible for freeing the channel + // and dropping any object that might have been written to it. + let channel_ptr = NonNull::from(Box::leak(Box::new(Channel::new()))); + + ( + Sender { + channel_ptr, + _invariant: PhantomData, + }, + Receiver { channel_ptr }, + ) +} + +/// Sending end of a oneshot channel. +/// +/// Created and returned from the [`channel`] function. +/// +/// Can be used to send a message to the corresponding [`Receiver`]. +#[derive(Debug)] +pub struct Sender<T> { + channel_ptr: NonNull<Channel<T>>, + // In reality we want contravariance, however we can't obtain that. + // + // Consider the following scenario: + // ``` + // let (mut tx, rx) = channel::<&'short u8>(); + // let (tx2, rx2) = channel::<&'long u8>(); + // + // tx = tx2; + // + // // Pretend short_ref is some &'short u8 + // tx.send(short_ref).unwrap(); + // let long_ref = rx2.recv().unwrap(); + // ``` + // + // If this type were covariant then we could safely extend lifetimes, which is not okay. + // Hence, we enforce invariance. + _invariant: PhantomData<fn(T) -> T>, +} + +/// Receiving end of a oneshot channel. +/// +/// Created and returned from the [`channel`] function. +/// +/// Can be used to receive a message from the corresponding [`Sender`]. How the message +/// can be received depends on what features are enabled. +/// +/// This type implement [`IntoFuture`](core::future::IntoFuture) when the `async` feature is enabled. +/// This allows awaiting it directly in an async context. +#[derive(Debug)] +pub struct Receiver<T> { + // Covariance is the right choice here. Consider the example presented in Sender, and you'll + // see that if we replaced `rx` instead then we would get the expected behavior + channel_ptr: NonNull<Channel<T>>, +} + +unsafe impl<T: Send> Send for Sender<T> {} + +// SAFETY: The only methods that assumes there is only a single reference to the sender +// takes `self` by value, guaranteeing that there is only one reference to the sender at +// the time it is called. +unsafe impl<T: Sync> Sync for Sender<T> {} + +unsafe impl<T: Send> Send for Receiver<T> {} +impl<T> Unpin for Receiver<T> {} + +impl<T> Sender<T> { + /// Sends `message` over the channel to the corresponding [`Receiver`]. + /// + /// Returns an error if the receiver has already been dropped. The message can + /// be extracted from the error. + /// + /// This method is lock-free and wait-free when sending on a channel that the + /// receiver is currently not receiving on. If the receiver is receiving during the send + /// operation this method includes waking up the thread/task. Unparking a thread involves + /// a mutex in Rust's standard library at the time of writing this. + /// How lock-free waking up an async task is + /// depends on your executor. If this method returns a `SendError`, please mind that dropping + /// the error involves running any drop implementation on the message type, and freeing the + /// channel's heap allocation, which might or might not be lock-free. + pub fn send(self, message: T) -> Result<(), SendError<T>> { + let channel_ptr = self.channel_ptr; + + // Don't run our Drop implementation if send was called, any cleanup now happens here + mem::forget(self); + + // SAFETY: The channel exists on the heap for the entire duration of this method and we + // only ever acquire shared references to it. Note that if the receiver disconnects it + // does not free the channel. + let channel = unsafe { channel_ptr.as_ref() }; + + // Write the message into the channel on the heap. + // SAFETY: The receiver only ever accesses this memory location if we are in the MESSAGE + // state, and since we're responsible for setting that state, we can guarantee that we have + // exclusive access to this memory location to perform this write. + unsafe { channel.write_message(message) }; + + // Set the state to signal there is a message on the channel. + // ORDERING: we use release ordering to ensure the write of the message is visible to the + // receiving thread. The EMPTY and DISCONNECTED branches do not observe any shared state, + // and thus we do not need acquire ordering. The RECEIVING branch manages synchronization + // independent of this operation. + // + // EMPTY + 1 = MESSAGE + // RECEIVING + 1 = UNPARKING + // DISCONNECTED + 1 = invalid, however this state is never observed + match channel.state.fetch_add(1, Release) { + // The receiver is alive and has not started waiting. Send done. + EMPTY => Ok(()), + // The receiver is waiting. Wake it up so it can return the message. + RECEIVING => { + // ORDERING: Synchronizes with the write of the waker to memory, and prevents the + // taking of the waker from being ordered before this operation. + fence(Acquire); + + // Take the waker, but critically do not unpark it. If we unparked now, then the + // receiving thread could still observe the UNPARKING state and re-park, meaning + // that after we change to the MESSAGE state, it would remain parked indefinitely + // or until a spurious wakeup. + // SAFETY: at this point we are in the UNPARKING state, and the receiving thread + // does not access the waker while in this state, nor does it free the channel + // allocation in this state. + let waker = unsafe { channel.take_waker() }; + + // ORDERING: this ordering serves two-fold: it synchronizes with the acquire load + // in the receiving thread, ensuring that both our read of the waker and write of + // the message happen-before the taking of the message and freeing of the channel. + // Furthermore, we need acquire ordering to ensure the unparking of the receiver + // happens after the channel state is updated. + channel.state.swap(MESSAGE, AcqRel); + + // Note: it is possible that between the store above and this statement that + // the receiving thread is spuriously unparked, takes the message, and frees + // the channel allocation. However, we took ownership of the channel out of + // that allocation, and freeing the channel does not drop the waker since the + // waker is wrapped in MaybeUninit. Therefore this data is valid regardless of + // whether or not the receive has completed by this point. + waker.unpark(); + + Ok(()) + } + // The receiver was already dropped. The error is responsible for freeing the channel. + // SAFETY: since the receiver disconnected it will no longer access `channel_ptr`, so + // we can transfer exclusive ownership of the channel's resources to the error. + // Moreover, since we just placed the message in the channel, the channel contains a + // valid message. + DISCONNECTED => Err(unsafe { SendError::new(channel_ptr) }), + _ => unreachable!(), + } + } + + /// Returns true if the associated [`Receiver`] has been dropped. + /// + /// If true is returned, a future call to send is guaranteed to return an error. + pub fn is_closed(&self) -> bool { + // SAFETY: The channel exists on the heap for the entire duration of this method and we + // only ever acquire shared references to it. Note that if the receiver disconnects it + // does not free the channel. + let channel = unsafe { self.channel_ptr.as_ref() }; + + // ORDERING: We *chose* a Relaxed ordering here as it sufficient to + // enforce the method's contract: "if true is returned, a future + // call to send is guaranteed to return an error." + channel.state.load(Relaxed) == DISCONNECTED + } + + /// Consumes the Sender, returning a raw pointer to the channel on the heap. + /// + /// This is intended to simplify using oneshot channels with some FFI code. The only safe thing + /// to do with the returned pointer is to later reconstruct the Sender with [Sender::from_raw]. + /// Memory will leak if the Sender is never reconstructed. + pub fn into_raw(self) -> *mut () { + let raw = self.channel_ptr.as_ptr() as *mut (); + mem::forget(self); + raw + } + + /// Consumes a raw pointer from [Sender::into_raw], recreating the Sender. + /// + /// # Safety + /// + /// This pointer must have come from [`Sender<T>::into_raw`] with the same message type, `T`. + /// At most one Sender must exist for a channel at any point in time. + /// Constructing multiple Senders from the same raw pointer leads to undefined behavior. + pub unsafe fn from_raw(raw: *mut ()) -> Self { + Self { + channel_ptr: NonNull::new_unchecked(raw as *mut Channel<T>), + _invariant: PhantomData, + } + } +} + +impl<T> Drop for Sender<T> { + fn drop(&mut self) { + // SAFETY: The receiver only ever frees the channel if we are in the MESSAGE or + // DISCONNECTED states. If we are in the MESSAGE state, then we called + // mem::forget(self), so we should not be in this function call. If we are in the + // DISCONNECTED state, then the receiver either received a MESSAGE so this statement is + // unreachable, or was dropped and observed that our side was still alive, and thus didn't + // free the channel. + let channel = unsafe { self.channel_ptr.as_ref() }; + + // Set the channel state to disconnected and read what state the receiver was in + // ORDERING: we don't need release ordering here since there are no modifications we + // need to make visible to other thread, and the Err(RECEIVING) branch handles + // synchronization independent of this cmpxchg + // + // EMPTY ^ 001 = DISCONNECTED + // RECEIVING ^ 001 = UNPARKING + // DISCONNECTED ^ 001 = EMPTY (invalid), but this state is never observed + match channel.state.fetch_xor(0b001, Relaxed) { + // The receiver has not started waiting, nor is it dropped. + EMPTY => (), + // The receiver is waiting. Wake it up so it can detect that the channel disconnected. + RECEIVING => { + // See comments in Sender::send + + fence(Acquire); + + let waker = unsafe { channel.take_waker() }; + + // We still need release ordering here to make sure our read of the waker happens + // before this, and acquire ordering to ensure the unparking of the receiver + // happens after this. + channel.state.swap(DISCONNECTED, AcqRel); + + // The Acquire ordering above ensures that the write of the DISCONNECTED state + // happens-before unparking the receiver. + waker.unpark(); + } + // The receiver was already dropped. We are responsible for freeing the channel. + DISCONNECTED => { + // SAFETY: when the receiver switches the state to DISCONNECTED they have received + // the message or will no longer be trying to receive the message, and have + // observed that the sender is still alive, meaning that we're responsible for + // freeing the channel allocation. + unsafe { dealloc(self.channel_ptr) }; + } + _ => unreachable!(), + } + } +} + +impl<T> Receiver<T> { + /// Checks if there is a message in the channel without blocking. Returns: + /// * `Ok(message)` if there was a message in the channel. + /// * `Err(Empty)` if the [`Sender`] is alive, but has not yet sent a message. + /// * `Err(Disconnected)` if the [`Sender`] was dropped before sending anything or if the + /// message has already been extracted by a previous receive call. + /// + /// If a message is returned, the channel is disconnected and any subsequent receive operation + /// using this receiver will return an error. + /// + /// This method is completely lock-free and wait-free. The only thing it does is an atomic + /// integer load of the channel state. And if there is a message in the channel it additionally + /// performs one atomic integer store and copies the message from the heap to the stack for + /// returning it. + pub fn try_recv(&self) -> Result<T, TryRecvError> { + // SAFETY: The channel will not be freed while this method is still running. + let channel = unsafe { self.channel_ptr.as_ref() }; + + // ORDERING: we use acquire ordering to synchronize with the store of the message. + match channel.state.load(Acquire) { + MESSAGE => { + // It's okay to break up the load and store since once we're in the message state + // the sender no longer modifies the state + // ORDERING: at this point the sender has done its job and is no longer active, so + // we don't need to make any side effects visible to it + channel.state.store(DISCONNECTED, Relaxed); + + // SAFETY: we are in the MESSAGE state so the message is present + Ok(unsafe { channel.take_message() }) + } + EMPTY => Err(TryRecvError::Empty), + DISCONNECTED => Err(TryRecvError::Disconnected), + #[cfg(feature = "async")] + RECEIVING | UNPARKING => Err(TryRecvError::Empty), + _ => unreachable!(), + } + } + + /// Attempts to wait for a message from the [`Sender`], returning an error if the channel is + /// disconnected. + /// + /// This method will always block the current thread if there is no data available and it is + /// still possible for the message to be sent. Once the message is sent to the corresponding + /// [`Sender`], then this receiver will wake up and return that message. + /// + /// If the corresponding [`Sender`] has disconnected (been dropped), or it disconnects while + /// this call is blocking, this call will wake up and return `Err` to indicate that the message + /// can never be received on this channel. + /// + /// If a sent message has already been extracted from this channel this method will return an + /// error. + /// + /// # Panics + /// + /// Panics if called after this receiver has been polled asynchronously. + #[cfg(feature = "std")] + pub fn recv(self) -> Result<T, RecvError> { + // Note that we don't need to worry about changing the state to disconnected or setting the + // state to an invalid value at any point in this function because we take ownership of + // self, and this function does not exit until the message has been received or both side + // of the channel are inactive and cleaned up. + + let channel_ptr = self.channel_ptr; + + // Don't run our Drop implementation. This consuming recv method is responsible for freeing. + mem::forget(self); + + // SAFETY: the existence of the `self` parameter serves as a certificate that the receiver + // is still alive, meaning that even if the sender was dropped then it would have observed + // the fact that we're still alive and left the responsibility of deallocating the + // channel to us, so channel_ptr is valid + let channel = unsafe { channel_ptr.as_ref() }; + + // ORDERING: we use acquire ordering to synchronize with the write of the message in the + // case that it's available + match channel.state.load(Acquire) { + // The sender is alive but has not sent anything yet. We prepare to park. + EMPTY => { + // Conditionally add a delay here to help the tests trigger the edge cases where + // the sender manages to be dropped or send something before we are able to store + // our waker object in the channel. + #[cfg(all(oneshot_test_delay, not(oneshot_loom)))] + std::thread::sleep(std::time::Duration::from_millis(10)); + + // Write our waker instance to the channel. + // SAFETY: we are not yet in the RECEIVING state, meaning that the sender will not + // try to access the waker until it sees the state set to RECEIVING below + unsafe { channel.write_waker(ReceiverWaker::current_thread()) }; + + // Switch the state to RECEIVING. We need to do this in one atomic step in case the + // sender disconnected or sent the message while we wrote the waker to memory. We + // don't need to do a compare exchange here however because if the original state + // was not EMPTY, then the sender has either finished sending the message or is + // being dropped, so the RECEIVING state will never be observed after we return. + // ORDERING: we use release ordering so the sender can synchronize with our writing + // of the waker to memory. The individual branches handle any additional + // synchronizaton + match channel.state.swap(RECEIVING, Release) { + // We stored our waker, now we park until the sender has changed the state + EMPTY => loop { + thread::park(); + + // ORDERING: synchronize with the write of the message + match channel.state.load(Acquire) { + // The sender sent the message while we were parked. + MESSAGE => { + // SAFETY: we are in the message state so the message is valid + let message = unsafe { channel.take_message() }; + + // SAFETY: the Sender delegates the responsibility of deallocating + // the channel to us upon sending the message + unsafe { dealloc(channel_ptr) }; + + break Ok(message); + } + // The sender was dropped while we were parked. + DISCONNECTED => { + // SAFETY: the Sender doesn't deallocate the channel allocation in + // its drop implementation if we're receiving + unsafe { dealloc(channel_ptr) }; + + break Err(RecvError); + } + // State did not change, spurious wakeup, park again. + RECEIVING | UNPARKING => (), + _ => unreachable!(), + } + }, + // The sender sent the message while we prepared to park. + MESSAGE => { + // ORDERING: Synchronize with the write of the message. This branch is + // unlikely to be taken, so it's likely more efficient to use a fence here + // instead of AcqRel ordering on the RMW operation + fence(Acquire); + + // SAFETY: we started in the empty state and the sender switched us to the + // message state. This means that it did not take the waker, so we're + // responsible for dropping it. + unsafe { channel.drop_waker() }; + + // SAFETY: we are in the message state so the message is valid + let message = unsafe { channel.take_message() }; + + // SAFETY: the Sender delegates the responsibility of deallocating the + // channel to us upon sending the message + unsafe { dealloc(channel_ptr) }; + + Ok(message) + } + // The sender was dropped before sending anything while we prepared to park. + DISCONNECTED => { + // SAFETY: we started in the empty state and the sender switched us to the + // disconnected state. It does not take the waker when it does this so we + // need to drop it. + unsafe { channel.drop_waker() }; + + // SAFETY: the sender does not deallocate the channel if it switches from + // empty to disconnected so we need to free the allocation + unsafe { dealloc(channel_ptr) }; + + Err(RecvError) + } + _ => unreachable!(), + } + } + // The sender already sent the message. + MESSAGE => { + // SAFETY: we are in the message state so the message is valid + let message = unsafe { channel.take_message() }; + + // SAFETY: we are already in the message state so the sender has been forgotten + // and it's our job to clean up resources + unsafe { dealloc(channel_ptr) }; + + Ok(message) + } + // The sender was dropped before sending anything, or we already received the message. + DISCONNECTED => { + // SAFETY: the sender does not deallocate the channel if it switches from empty to + // disconnected so we need to free the allocation + unsafe { dealloc(channel_ptr) }; + + Err(RecvError) + } + // The receiver must have been `Future::poll`ed prior to this call. + #[cfg(feature = "async")] + RECEIVING | UNPARKING => panic!("{}", RECEIVER_USED_SYNC_AND_ASYNC_ERROR), + _ => unreachable!(), + } + } + + /// Attempts to wait for a message from the [`Sender`], returning an error if the channel is + /// disconnected. This is a non consuming version of [`Receiver::recv`], but with a bit + /// worse performance. Prefer `[`Receiver::recv`]` if your code allows consuming the receiver. + /// + /// If a message is returned, the channel is disconnected and any subsequent receive operation + /// using this receiver will return an error. + /// + /// # Panics + /// + /// Panics if called after this receiver has been polled asynchronously. + #[cfg(feature = "std")] + pub fn recv_ref(&self) -> Result<T, RecvError> { + self.start_recv_ref(RecvError, |channel| { + loop { + thread::park(); + + // ORDERING: we use acquire ordering to synchronize with the write of the message + match channel.state.load(Acquire) { + // The sender sent the message while we were parked. + // We take the message and mark the channel disconnected. + MESSAGE => { + // ORDERING: the sender is inactive at this point so we don't need to make + // any reads or writes visible to the sending thread + channel.state.store(DISCONNECTED, Relaxed); + + // SAFETY: we were just in the message state so the message is valid + break Ok(unsafe { channel.take_message() }); + } + // The sender was dropped while we were parked. + DISCONNECTED => break Err(RecvError), + // State did not change, spurious wakeup, park again. + RECEIVING | UNPARKING => (), + _ => unreachable!(), + } + } + }) + } + + /// Like [`Receiver::recv`], but will not block longer than `timeout`. Returns: + /// * `Ok(message)` if there was a message in the channel before the timeout was reached. + /// * `Err(Timeout)` if no message arrived on the channel before the timeout was reached. + /// * `Err(Disconnected)` if the sender was dropped before sending anything or if the message + /// has already been extracted by a previous receive call. + /// + /// If a message is returned, the channel is disconnected and any subsequent receive operation + /// using this receiver will return an error. + /// + /// If the supplied `timeout` is so large that Rust's `Instant` type can't represent this point + /// in the future this falls back to an indefinitely blocking receive operation. + /// + /// # Panics + /// + /// Panics if called after this receiver has been polled asynchronously. + #[cfg(feature = "std")] + pub fn recv_timeout(&self, timeout: Duration) -> Result<T, RecvTimeoutError> { + match Instant::now().checked_add(timeout) { + Some(deadline) => self.recv_deadline(deadline), + None => self.recv_ref().map_err(|_| RecvTimeoutError::Disconnected), + } + } + + /// Like [`Receiver::recv`], but will not block longer than until `deadline`. Returns: + /// * `Ok(message)` if there was a message in the channel before the deadline was reached. + /// * `Err(Timeout)` if no message arrived on the channel before the deadline was reached. + /// * `Err(Disconnected)` if the sender was dropped before sending anything or if the message + /// has already been extracted by a previous receive call. + /// + /// If a message is returned, the channel is disconnected and any subsequent receive operation + /// using this receiver will return an error. + /// + /// # Panics + /// + /// Panics if called after this receiver has been polled asynchronously. + #[cfg(feature = "std")] + pub fn recv_deadline(&self, deadline: Instant) -> Result<T, RecvTimeoutError> { + /// # Safety + /// + /// If the sender is unparking us after a message send, the message must already have been + /// written to the channel and an acquire memory barrier issued before calling this function + #[cold] + unsafe fn wait_for_unpark<T>(channel: &Channel<T>) -> Result<T, RecvTimeoutError> { + loop { + thread::park(); + + // ORDERING: The callee has already synchronized with any message write + match channel.state.load(Relaxed) { + MESSAGE => { + // ORDERING: the sender has been dropped, so this update only + // needs to be visible to us + channel.state.store(DISCONNECTED, Relaxed); + break Ok(channel.take_message()); + } + DISCONNECTED => break Err(RecvTimeoutError::Disconnected), + // The sender is still unparking us. We continue on the empty state here since + // the current implementation eagerly sets the state to EMPTY upon timeout. + EMPTY => (), + _ => unreachable!(), + } + } + } + + self.start_recv_ref(RecvTimeoutError::Disconnected, |channel| { + loop { + match deadline.checked_duration_since(Instant::now()) { + Some(timeout) => { + thread::park_timeout(timeout); + + // ORDERING: synchronize with the write of the message + match channel.state.load(Acquire) { + // The sender sent the message while we were parked. + MESSAGE => { + // ORDERING: the sender has been `mem::forget`-ed so this update + // only needs to be visible to us. + channel.state.store(DISCONNECTED, Relaxed); + + // SAFETY: we either are in the message state or were just in the + // message state + break Ok(unsafe { channel.take_message() }); + } + // The sender was dropped while we were parked. + DISCONNECTED => break Err(RecvTimeoutError::Disconnected), + // State did not change, spurious wakeup, park again. + RECEIVING | UNPARKING => (), + _ => unreachable!(), + } + } + None => { + // ORDERING: synchronize with the write of the message + match channel.state.swap(EMPTY, Acquire) { + // We reached the end of the timeout without receiving a message + RECEIVING => { + // SAFETY: we were in the receiving state and are now in the empty + // state, so the sender has not and will not try to read the waker, + // so we have exclusive access to drop it. + unsafe { channel.drop_waker() }; + + break Err(RecvTimeoutError::Timeout); + } + // The sender sent the message while we were parked. + MESSAGE => { + // Same safety and ordering as the Some branch + + channel.state.store(DISCONNECTED, Relaxed); + break Ok(unsafe { channel.take_message() }); + } + // The sender was dropped while we were parked. + DISCONNECTED => { + // ORDERING: we were originally in the disconnected state meaning + // that the sender is inactive and no longer observing the state, + // so we only need to change it back to DISCONNECTED for if the + // receiver is dropped or a recv* method is called again + channel.state.store(DISCONNECTED, Relaxed); + + break Err(RecvTimeoutError::Disconnected); + } + // The sender sent the message and started unparking us + UNPARKING => { + // We were in the UNPARKING state and are now in the EMPTY state. + // We wait to be properly unparked and to observe if the sender + // sets MESSAGE or DISCONNECTED state. + // SAFETY: The load above has synchronized with any message write. + break unsafe { wait_for_unpark(channel) }; + } + _ => unreachable!(), + } + } + } + } + }) + } + + /// Returns true if the associated [`Sender`] was dropped before sending a message. Or if + /// the message has already been received. + /// + /// If `true` is returned, all future calls to receive methods are guaranteed to return + /// a disconnected error. And future calls to this method is guaranteed to also return `true`. + pub fn is_closed(&self) -> bool { + // SAFETY: the existence of the `self` parameter serves as a certificate that the receiver + // is still alive, meaning that even if the sender was dropped then it would have observed + // the fact that we're still alive and left the responsibility of deallocating the + // channel to us, so `self.channel` is valid + let channel = unsafe { self.channel_ptr.as_ref() }; + + // ORDERING: We *chose* a Relaxed ordering here as it is sufficient to + // enforce the method's contract. Once true has been observed, it will remain true. + // However, if false is observed, the sender might have just disconnected but this thread + // has not observed it yet. + channel.state.load(Relaxed) == DISCONNECTED + } + + /// Returns true if there is a message in the channel, ready to be received. + /// + /// If `true` is returned, the next call to a receive method is guaranteed to return + /// a message. + pub fn has_message(&self) -> bool { + // SAFETY: the existence of the `self` parameter serves as a certificate that the receiver + // is still alive, meaning that even if the sender was dropped then it would have observed + // the fact that we're still alive and left the responsibility of deallocating the + // channel to us, so `self.channel` is valid + let channel = unsafe { self.channel_ptr.as_ref() }; + + // ORDERING: An acquire ordering is used to guarantee no subsequent loads is reordered + // before this one. This upholds the contract that if true is returned, the next call to + // a receive method is guaranteed to also abserve the `MESSAGE` state and return a message. + channel.state.load(Acquire) == MESSAGE + } + + /// Begins the process of receiving on the channel by reference. If the message is already + /// ready, or the sender has disconnected, then this function will return the appropriate + /// Result immediately. Otherwise, it will write the waker to memory, check to see if the + /// sender has finished or disconnected again, and then will call `finish`. `finish` is + /// thus responsible for cleaning up the channel's resources appropriately before it returns, + /// such as destroying the waker, for instance. + #[cfg(feature = "std")] + #[inline] + fn start_recv_ref<E>( + &self, + disconnected_error: E, + finish: impl FnOnce(&Channel<T>) -> Result<T, E>, + ) -> Result<T, E> { + // SAFETY: the existence of the `self` parameter serves as a certificate that the receiver + // is still alive, meaning that even if the sender was dropped then it would have observed + // the fact that we're still alive and left the responsibility of deallocating the + // channel to us, so `self.channel` is valid + let channel = unsafe { self.channel_ptr.as_ref() }; + + // ORDERING: synchronize with the write of the message + match channel.state.load(Acquire) { + // The sender is alive but has not sent anything yet. We prepare to park. + EMPTY => { + // Conditionally add a delay here to help the tests trigger the edge cases where + // the sender manages to be dropped or send something before we are able to store + // our waker object in the channel. + #[cfg(all(oneshot_test_delay, not(oneshot_loom)))] + std::thread::sleep(std::time::Duration::from_millis(10)); + + // Write our waker instance to the channel. + // SAFETY: we are not yet in the RECEIVING state, meaning that the sender will not + // try to access the waker until it sees the state set to RECEIVING below + unsafe { channel.write_waker(ReceiverWaker::current_thread()) }; + + // ORDERING: we use release ordering on success so the sender can synchronize with + // our write of the waker. We use relaxed ordering on failure since the sender does + // not need to synchronize with our write and the individual match arms handle any + // additional synchronization + match channel + .state + .compare_exchange(EMPTY, RECEIVING, Release, Relaxed) + { + // We stored our waker, now we delegate to the callback to finish the receive + // operation + Ok(_) => finish(channel), + // The sender sent the message while we prepared to finish + Err(MESSAGE) => { + // See comments in `recv` for ordering and safety + + fence(Acquire); + + unsafe { channel.drop_waker() }; + + // ORDERING: the sender has been `mem::forget`-ed so this update only + // needs to be visible to us + channel.state.store(DISCONNECTED, Relaxed); + + // SAFETY: The MESSAGE state tells us there is a correctly initialized + // message + Ok(unsafe { channel.take_message() }) + } + // The sender was dropped before sending anything while we prepared to park. + Err(DISCONNECTED) => { + // See comments in `recv` for safety + unsafe { channel.drop_waker() }; + Err(disconnected_error) + } + _ => unreachable!(), + } + } + // The sender sent the message. We take the message and mark the channel disconnected. + MESSAGE => { + // ORDERING: the sender has been `mem::forget`-ed so this update only needs to be + // visible to us + channel.state.store(DISCONNECTED, Relaxed); + + // SAFETY: we are in the message state so the message is valid + Ok(unsafe { channel.take_message() }) + } + // The sender was dropped before sending anything, or we already received the message. + DISCONNECTED => Err(disconnected_error), + // The receiver must have been `Future::poll`ed prior to this call. + #[cfg(feature = "async")] + RECEIVING | UNPARKING => panic!("{}", RECEIVER_USED_SYNC_AND_ASYNC_ERROR), + _ => unreachable!(), + } + } + + /// Consumes the Receiver, returning a raw pointer to the channel on the heap. + /// + /// This is intended to simplify using oneshot channels with some FFI code. The only safe thing + /// to do with the returned pointer is to later reconstruct the Receiver with + /// [Receiver::from_raw]. Memory will leak if the Receiver is never reconstructed. + pub fn into_raw(self) -> *mut () { + let raw = self.channel_ptr.as_ptr() as *mut (); + mem::forget(self); + raw + } + + /// Consumes a raw pointer from [Receiver::into_raw], recreating the Receiver. + /// + /// # Safety + /// + /// This pointer must have come from [`Receiver<T>::into_raw`] with the same message type, `T`. + /// At most one Receiver must exist for a channel at any point in time. + /// Constructing multiple Receivers from the same raw pointer leads to undefined behavior. + pub unsafe fn from_raw(raw: *mut ()) -> Self { + Self { + channel_ptr: NonNull::new_unchecked(raw as *mut Channel<T>), + } + } +} + +#[cfg(feature = "async")] +impl<T> core::future::Future for Receiver<T> { + type Output = Result<T, RecvError>; + + fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> { + // SAFETY: the existence of the `self` parameter serves as a certificate that the receiver + // is still alive, meaning that even if the sender was dropped then it would have observed + // the fact that we're still alive and left the responsibility of deallocating the + // channel to us, so `self.channel` is valid + let channel = unsafe { self.channel_ptr.as_ref() }; + + // ORDERING: we use acquire ordering to synchronize with the store of the message. + match channel.state.load(Acquire) { + // The sender is alive but has not sent anything yet. + EMPTY => { + // SAFETY: We can't be in the forbidden states, and no waker in the channel. + unsafe { channel.write_async_waker(cx) } + } + // We were polled again while waiting for the sender. Replace the waker with the new one. + RECEIVING => { + // ORDERING: We use relaxed ordering on both success and failure since we have not + // written anything above that must be released, and the individual match arms + // handle any additional synchronization. + match channel + .state + .compare_exchange(RECEIVING, EMPTY, Relaxed, Relaxed) + { + // We successfully changed the state back to EMPTY. Replace the waker. + // This is the most likely branch to be taken, which is why we don't use any + // memory barriers in the compare_exchange above. + Ok(_) => { + // SAFETY: We wrote the waker in a previous call to poll. We do not need + // a memory barrier since the previous write here was by ourselves. + unsafe { channel.drop_waker() }; + // SAFETY: We can't be in the forbidden states, and no waker in the channel. + unsafe { channel.write_async_waker(cx) } + } + // The sender sent the message while we prepared to replace the waker. + // We take the message and mark the channel disconnected. + // The sender has already taken the waker. + Err(MESSAGE) => { + // ORDERING: Synchronize with the write of the message. This branch is + // unlikely to be taken. + channel.state.swap(DISCONNECTED, Acquire); + // SAFETY: The state tells us the sender has initialized the message. + Poll::Ready(Ok(unsafe { channel.take_message() })) + } + // The sender was dropped before sending anything while we prepared to park. + // The sender has taken the waker already. + Err(DISCONNECTED) => Poll::Ready(Err(RecvError)), + // The sender is currently waking us up. + Err(UNPARKING) => { + // We can't trust that the old waker that the sender has access to + // is honored by the async runtime at this point. So we wake ourselves + // up to get polled instantly again. + cx.waker().wake_by_ref(); + Poll::Pending + } + _ => unreachable!(), + } + } + // The sender sent the message. + MESSAGE => { + // ORDERING: the sender has been dropped so this update only needs to be + // visible to us + channel.state.store(DISCONNECTED, Relaxed); + Poll::Ready(Ok(unsafe { channel.take_message() })) + } + // The sender was dropped before sending anything, or we already received the message. + DISCONNECTED => Poll::Ready(Err(RecvError)), + // The sender has observed the RECEIVING state and is currently reading the waker from + // a previous poll. We need to loop here until we observe the MESSAGE or DISCONNECTED + // state. We busy loop here since we know the sender is done very soon. + UNPARKING => loop { + hint::spin_loop(); + // ORDERING: The load above has already synchronized with the write of the message. + match channel.state.load(Relaxed) { + MESSAGE => { + // ORDERING: the sender has been dropped, so this update only + // needs to be visible to us + channel.state.store(DISCONNECTED, Relaxed); + // SAFETY: We observed the MESSAGE state + break Poll::Ready(Ok(unsafe { channel.take_message() })); + } + DISCONNECTED => break Poll::Ready(Err(RecvError)), + UNPARKING => (), + _ => unreachable!(), + } + }, + _ => unreachable!(), + } + } +} + +impl<T> Drop for Receiver<T> { + fn drop(&mut self) { + // SAFETY: since the receiving side is still alive the sender would have observed that and + // left deallocating the channel allocation to us. + let channel = unsafe { self.channel_ptr.as_ref() }; + + // Set the channel state to disconnected and read what state the receiver was in + match channel.state.swap(DISCONNECTED, Acquire) { + // The sender has not sent anything, nor is it dropped. + EMPTY => (), + // The sender already sent something. We must drop it, and free the channel. + MESSAGE => { + // SAFETY: we are in the message state so the message is initialized + unsafe { channel.drop_message() }; + + // SAFETY: see safety comment at top of function + unsafe { dealloc(self.channel_ptr) }; + } + // The receiver has been polled. + #[cfg(feature = "async")] + RECEIVING => { + // TODO: figure this out when async is fixed + unsafe { channel.drop_waker() }; + } + // The sender was already dropped. We are responsible for freeing the channel. + DISCONNECTED => { + // SAFETY: see safety comment at top of function + unsafe { dealloc(self.channel_ptr) }; + } + // This receiver was previously polled, so the channel was in the RECEIVING state. + // But the sender has observed the RECEIVING state and is currently reading the waker + // to wake us up. We need to loop here until we observe the MESSAGE or DISCONNECTED state. + // We busy loop here since we know the sender is done very soon. + #[cfg(any(feature = "std", feature = "async"))] + UNPARKING => { + loop { + hint::spin_loop(); + // ORDERING: The swap above has already synchronized with the write of the message. + match channel.state.load(Relaxed) { + MESSAGE => { + // SAFETY: we are in the message state so the message is initialized + unsafe { channel.drop_message() }; + break; + } + DISCONNECTED => break, + UNPARKING => (), + _ => unreachable!(), + } + } + // SAFETY: see safety comment at top of function + unsafe { dealloc(self.channel_ptr) }; + } + _ => unreachable!(), + } + } +} + +/// All the values that the `Channel::state` field can have during the lifetime of a channel. +mod states { + // These values are very explicitly chosen so that we can replace some cmpxchg calls with + // fetch_* calls. + + /// The initial channel state. Active while both endpoints are still alive, no message has been + /// sent, and the receiver is not receiving. + pub const EMPTY: u8 = 0b011; + /// A message has been sent to the channel, but the receiver has not yet read it. + pub const MESSAGE: u8 = 0b100; + /// No message has yet been sent on the channel, but the receiver is currently receiving. + pub const RECEIVING: u8 = 0b000; + #[cfg(any(feature = "std", feature = "async"))] + pub const UNPARKING: u8 = 0b001; + /// The channel has been closed. This means that either the sender or receiver has been dropped, + /// or the message sent to the channel has already been received. Since this is a oneshot + /// channel, it is disconnected after the one message it is supposed to hold has been + /// transmitted. + pub const DISCONNECTED: u8 = 0b010; +} +use states::*; + +/// Internal channel data structure structure. the `channel` method allocates and puts one instance +/// of this struct on the heap for each oneshot channel instance. The struct holds: +/// * The current state of the channel. +/// * The message in the channel. This memory is uninitialized until the message is sent. +/// * The waker instance for the thread or task that is currently receiving on this channel. +/// This memory is uninitialized until the receiver starts receiving. +struct Channel<T> { + state: AtomicU8, + message: UnsafeCell<MaybeUninit<T>>, + waker: UnsafeCell<MaybeUninit<ReceiverWaker>>, +} + +impl<T> Channel<T> { + pub fn new() -> Self { + Self { + state: AtomicU8::new(EMPTY), + message: UnsafeCell::new(MaybeUninit::uninit()), + waker: UnsafeCell::new(MaybeUninit::uninit()), + } + } + + #[inline(always)] + unsafe fn message(&self) -> &MaybeUninit<T> { + #[cfg(oneshot_loom)] + { + self.message.with(|ptr| &*ptr) + } + + #[cfg(not(oneshot_loom))] + { + &*self.message.get() + } + } + + #[inline(always)] + unsafe fn with_message_mut<F>(&self, op: F) + where + F: FnOnce(&mut MaybeUninit<T>), + { + #[cfg(oneshot_loom)] + { + self.message.with_mut(|ptr| op(&mut *ptr)) + } + + #[cfg(not(oneshot_loom))] + { + op(&mut *self.message.get()) + } + } + + #[inline(always)] + #[cfg(any(feature = "std", feature = "async"))] + unsafe fn with_waker_mut<F>(&self, op: F) + where + F: FnOnce(&mut MaybeUninit<ReceiverWaker>), + { + #[cfg(oneshot_loom)] + { + self.waker.with_mut(|ptr| op(&mut *ptr)) + } + + #[cfg(not(oneshot_loom))] + { + op(&mut *self.waker.get()) + } + } + + #[inline(always)] + unsafe fn write_message(&self, message: T) { + self.with_message_mut(|slot| slot.as_mut_ptr().write(message)); + } + + #[inline(always)] + unsafe fn take_message(&self) -> T { + #[cfg(oneshot_loom)] + { + self.message.with(|ptr| ptr::read(ptr)).assume_init() + } + + #[cfg(not(oneshot_loom))] + { + ptr::read(self.message.get()).assume_init() + } + } + + #[inline(always)] + unsafe fn drop_message(&self) { + self.with_message_mut(|slot| slot.assume_init_drop()); + } + + #[cfg(any(feature = "std", feature = "async"))] + #[inline(always)] + unsafe fn write_waker(&self, waker: ReceiverWaker) { + self.with_waker_mut(|slot| slot.as_mut_ptr().write(waker)); + } + + #[inline(always)] + unsafe fn take_waker(&self) -> ReceiverWaker { + #[cfg(oneshot_loom)] + { + self.waker.with(|ptr| ptr::read(ptr)).assume_init() + } + + #[cfg(not(oneshot_loom))] + { + ptr::read(self.waker.get()).assume_init() + } + } + + #[cfg(any(feature = "std", feature = "async"))] + #[inline(always)] + unsafe fn drop_waker(&self) { + self.with_waker_mut(|slot| slot.assume_init_drop()); + } + + /// # Safety + /// + /// * `Channel::waker` must not have a waker stored in it when calling this method. + /// * Channel state must not be RECEIVING or UNPARKING when calling this method. + #[cfg(feature = "async")] + unsafe fn write_async_waker(&self, cx: &mut task::Context<'_>) -> Poll<Result<T, RecvError>> { + // Write our thread instance to the channel. + // SAFETY: we are not yet in the RECEIVING state, meaning that the sender will not + // try to access the waker until it sees the state set to RECEIVING below + self.write_waker(ReceiverWaker::task_waker(cx)); + + // ORDERING: we use release ordering on success so the sender can synchronize with + // our write of the waker. We use relaxed ordering on failure since the sender does + // not need to synchronize with our write and the individual match arms handle any + // additional synchronization + match self + .state + .compare_exchange(EMPTY, RECEIVING, Release, Relaxed) + { + // We stored our waker, now we return and let the sender wake us up + Ok(_) => Poll::Pending, + // The sender sent the message while we prepared to park. + // We take the message and mark the channel disconnected. + Err(MESSAGE) => { + // ORDERING: Synchronize with the write of the message. This branch is + // unlikely to be taken, so it's likely more efficient to use a fence here + // instead of AcqRel ordering on the compare_exchange operation + fence(Acquire); + + // SAFETY: we started in the EMPTY state and the sender switched us to the + // MESSAGE state. This means that it did not take the waker, so we're + // responsible for dropping it. + self.drop_waker(); + + // ORDERING: sender does not exist, so this update only needs to be visible to us + self.state.store(DISCONNECTED, Relaxed); + + // SAFETY: The MESSAGE state tells us there is a correctly initialized message + Poll::Ready(Ok(self.take_message())) + } + // The sender was dropped before sending anything while we prepared to park. + Err(DISCONNECTED) => { + // SAFETY: we started in the EMPTY state and the sender switched us to the + // DISCONNECTED state. This means that it did not take the waker, so we're + // responsible for dropping it. + self.drop_waker(); + Poll::Ready(Err(RecvError)) + } + _ => unreachable!(), + } + } +} + +enum ReceiverWaker { + /// The receiver is waiting synchronously. Its thread is parked. + #[cfg(feature = "std")] + Thread(thread::Thread), + /// The receiver is waiting asynchronously. Its task can be woken up with this `Waker`. + #[cfg(feature = "async")] + Task(task::Waker), + /// A little hack to not make this enum an uninhibitable type when no features are enabled. + #[cfg(not(any(feature = "async", feature = "std")))] + _Uninhabited, +} + +impl ReceiverWaker { + #[cfg(feature = "std")] + pub fn current_thread() -> Self { + Self::Thread(thread::current()) + } + + #[cfg(feature = "async")] + pub fn task_waker(cx: &task::Context<'_>) -> Self { + Self::Task(cx.waker().clone()) + } + + pub fn unpark(self) { + match self { + #[cfg(feature = "std")] + ReceiverWaker::Thread(thread) => thread.unpark(), + #[cfg(feature = "async")] + ReceiverWaker::Task(waker) => waker.wake(), + #[cfg(not(any(feature = "async", feature = "std")))] + ReceiverWaker::_Uninhabited => unreachable!(), + } + } +} + +#[cfg(not(oneshot_loom))] +#[test] +#[ignore = "Unstable test. Different Rust versions have different sizes for Thread"] +fn receiver_waker_size() { + let expected: usize = match (cfg!(feature = "std"), cfg!(feature = "async")) { + (false, false) => 0, + (false, true) => 16, + (true, false) => 16, + (true, true) => 24, + }; + assert_eq!(mem::size_of::<ReceiverWaker>(), expected); +} + +#[cfg(all(feature = "std", feature = "async"))] +const RECEIVER_USED_SYNC_AND_ASYNC_ERROR: &str = + "Invalid to call a blocking receive method on oneshot::Receiver after it has been polled"; + +#[inline] +pub(crate) unsafe fn dealloc<T>(channel: NonNull<Channel<T>>) { + drop(Box::from_raw(channel.as_ptr())) +} diff --git a/third_party/rust/oneshot/src/loombox.rs b/third_party/rust/oneshot/src/loombox.rs @@ -0,0 +1,158 @@ +use core::{borrow, fmt, hash, mem, ptr}; +use loom::alloc; + +pub struct Box<T: ?Sized> { + ptr: *mut T, +} + +impl<T> Box<T> { + pub fn new(value: T) -> Self { + let layout = alloc::Layout::new::<T>(); + let ptr = unsafe { alloc::alloc(layout) } as *mut T; + unsafe { ptr::write(ptr, value) }; + Self { ptr } + } +} + +impl<T: ?Sized> Box<T> { + pub fn leak<'a>(b: Self) -> &'a mut T + where + T: 'a, + { + unsafe { &mut *Box::into_raw(b) } + } + + #[inline] + pub fn into_raw(b: Box<T>) -> *mut T { + let ptr = b.ptr; + mem::forget(b); + ptr + } + + pub const unsafe fn from_raw(ptr: *mut T) -> Box<T> { + Self { ptr } + } +} + +impl<T: ?Sized> Drop for Box<T> { + fn drop(&mut self) { + unsafe { + let size = mem::size_of_val(&*self.ptr); + let align = mem::align_of_val(&*self.ptr); + let layout = alloc::Layout::from_size_align(size, align).unwrap(); + ptr::drop_in_place(self.ptr); + alloc::dealloc(self.ptr as *mut u8, layout); + } + } +} + +unsafe impl<T: Send> Send for Box<T> {} +unsafe impl<T: Sync> Sync for Box<T> {} + +impl<T: ?Sized> core::ops::Deref for Box<T> { + type Target = T; + + fn deref(&self) -> &T { + unsafe { &*self.ptr } + } +} + +impl<T: ?Sized> core::ops::DerefMut for Box<T> { + fn deref_mut(&mut self) -> &mut T { + unsafe { &mut *self.ptr } + } +} + +impl<T: ?Sized> borrow::Borrow<T> for Box<T> { + fn borrow(&self) -> &T { + self + } +} + +impl<T: ?Sized> borrow::BorrowMut<T> for Box<T> { + fn borrow_mut(&mut self) -> &mut T { + self + } +} + +impl<T: ?Sized> AsRef<T> for Box<T> { + fn as_ref(&self) -> &T { + self + } +} + +impl<T: ?Sized> AsMut<T> for Box<T> { + fn as_mut(&mut self) -> &mut T { + self + } +} + +impl<T: fmt::Display + ?Sized> fmt::Display for Box<T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&**self, f) + } +} + +impl<T: fmt::Debug + ?Sized> fmt::Debug for Box<T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +impl<T: Clone> Clone for Box<T> { + #[inline] + fn clone(&self) -> Box<T> { + Self::new(self.as_ref().clone()) + } +} + +impl<T: ?Sized + PartialEq> PartialEq for Box<T> { + #[inline] + fn eq(&self, other: &Box<T>) -> bool { + PartialEq::eq(&**self, &**other) + } + + #[allow(clippy::partialeq_ne_impl)] + #[inline] + fn ne(&self, other: &Box<T>) -> bool { + PartialEq::ne(&**self, &**other) + } +} + +impl<T: ?Sized + Eq> Eq for Box<T> {} + +impl<T: ?Sized + PartialOrd> PartialOrd for Box<T> { + #[inline] + fn partial_cmp(&self, other: &Box<T>) -> Option<core::cmp::Ordering> { + PartialOrd::partial_cmp(&**self, &**other) + } + #[inline] + fn lt(&self, other: &Box<T>) -> bool { + PartialOrd::lt(&**self, &**other) + } + #[inline] + fn le(&self, other: &Box<T>) -> bool { + PartialOrd::le(&**self, &**other) + } + #[inline] + fn ge(&self, other: &Box<T>) -> bool { + PartialOrd::ge(&**self, &**other) + } + #[inline] + fn gt(&self, other: &Box<T>) -> bool { + PartialOrd::gt(&**self, &**other) + } +} + +impl<T: ?Sized + Ord> Ord for Box<T> { + #[inline] + fn cmp(&self, other: &Box<T>) -> core::cmp::Ordering { + Ord::cmp(&**self, &**other) + } +} + +impl<T: ?Sized + hash::Hash> hash::Hash for Box<T> { + fn hash<H: hash::Hasher>(&self, state: &mut H) { + (**self).hash(state); + } +} diff --git a/third_party/rust/oneshot/tests/assert_mem.rs b/third_party/rust/oneshot/tests/assert_mem.rs @@ -0,0 +1,37 @@ +use oneshot::{Receiver, Sender}; +use std::mem; + +/// Just sanity check that both channel endpoints stay the size of a single pointer. +#[test] +fn channel_endpoints_single_pointer() { + const PTR_SIZE: usize = mem::size_of::<*const ()>(); + + assert_eq!(mem::size_of::<Sender<()>>(), PTR_SIZE); + assert_eq!(mem::size_of::<Receiver<()>>(), PTR_SIZE); + + assert_eq!(mem::size_of::<Sender<u8>>(), PTR_SIZE); + assert_eq!(mem::size_of::<Receiver<u8>>(), PTR_SIZE); + + assert_eq!(mem::size_of::<Sender<[u8; 1024]>>(), PTR_SIZE); + assert_eq!(mem::size_of::<Receiver<[u8; 1024]>>(), PTR_SIZE); + + assert_eq!(mem::size_of::<Option<Sender<[u8; 1024]>>>(), PTR_SIZE); + assert_eq!(mem::size_of::<Option<Receiver<[u8; 1024]>>>(), PTR_SIZE); +} + +/// Check that the `SendError` stays small. Useful to automatically detect if it is refactored +/// to become large. We do not want the stack requirement for calling `Sender::send` to grow. +#[test] +fn error_sizes() { + const PTR_SIZE: usize = mem::size_of::<usize>(); + + assert_eq!(mem::size_of::<oneshot::SendError<()>>(), PTR_SIZE); + assert_eq!(mem::size_of::<oneshot::SendError<u8>>(), PTR_SIZE); + assert_eq!(mem::size_of::<oneshot::SendError<[u8; 1024]>>(), PTR_SIZE); + + // The type returned from `Sender::send` is also just pointer sized + assert_eq!( + mem::size_of::<Result<(), oneshot::SendError<[u8; 1024]>>>(), + PTR_SIZE + ); +} diff --git a/third_party/rust/oneshot/tests/async.rs b/third_party/rust/oneshot/tests/async.rs @@ -0,0 +1,128 @@ +#![cfg(all(feature = "async", not(oneshot_loom)))] + +use core::mem; +use core::time::Duration; + +mod helpers; +use helpers::DropCounter; + +#[tokio::test] +async fn send_before_await_tokio() { + let (sender, receiver) = oneshot::channel(); + assert!(sender.send(19i128).is_ok()); + assert_eq!(receiver.await, Ok(19i128)); +} + +#[async_std::test] +async fn send_before_await_async_std() { + let (sender, receiver) = oneshot::channel(); + assert!(sender.send(19i128).is_ok()); + assert_eq!(receiver.await, Ok(19i128)); +} + +#[tokio::test] +async fn await_with_dropped_sender_tokio() { + let (sender, receiver) = oneshot::channel::<u128>(); + mem::drop(sender); + receiver.await.unwrap_err(); +} + +#[async_std::test] +async fn await_with_dropped_sender_async_std() { + let (sender, receiver) = oneshot::channel::<u128>(); + mem::drop(sender); + receiver.await.unwrap_err(); +} + +#[tokio::test] +async fn await_before_send_tokio() { + let (sender, receiver) = oneshot::channel(); + let (message, counter) = DropCounter::new(79u128); + let t = tokio::spawn(async move { + tokio::time::sleep(Duration::from_millis(10)).await; + sender.send(message) + }); + let returned_message = receiver.await.unwrap(); + assert_eq!(counter.count(), 0); + assert_eq!(*returned_message.value(), 79u128); + mem::drop(returned_message); + assert_eq!(counter.count(), 1); + t.await.unwrap().unwrap(); +} + +#[async_std::test] +async fn await_before_send_async_std() { + let (sender, receiver) = oneshot::channel(); + let (message, counter) = DropCounter::new(79u128); + let t = async_std::task::spawn(async move { + async_std::task::sleep(Duration::from_millis(10)).await; + sender.send(message) + }); + let returned_message = receiver.await.unwrap(); + assert_eq!(counter.count(), 0); + assert_eq!(*returned_message.value(), 79u128); + mem::drop(returned_message); + assert_eq!(counter.count(), 1); + t.await.unwrap(); +} + +#[tokio::test] +async fn await_before_send_then_drop_sender_tokio() { + let (sender, receiver) = oneshot::channel::<u128>(); + let t = tokio::spawn(async { + tokio::time::sleep(Duration::from_millis(10)).await; + mem::drop(sender); + }); + assert!(receiver.await.is_err()); + t.await.unwrap(); +} + +#[async_std::test] +async fn await_before_send_then_drop_sender_async_std() { + let (sender, receiver) = oneshot::channel::<u128>(); + let t = async_std::task::spawn(async { + async_std::task::sleep(Duration::from_millis(10)).await; + mem::drop(sender); + }); + assert!(receiver.await.is_err()); + t.await; +} + +// Tests that the Receiver handles being used synchronously even after being polled +#[tokio::test] +async fn poll_future_and_then_try_recv() { + use core::future::Future; + use core::pin::Pin; + use core::task::{self, Poll}; + + struct StupidReceiverFuture(oneshot::Receiver<()>); + + impl Future for StupidReceiverFuture { + type Output = Result<(), oneshot::RecvError>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> { + let poll_result = Future::poll(Pin::new(&mut self.0), cx); + self.0.try_recv().expect_err("Should never be a message"); + poll_result + } + } + + let (sender, receiver) = oneshot::channel(); + let t = tokio::spawn(async { + tokio::time::sleep(Duration::from_millis(20)).await; + mem::drop(sender); + }); + StupidReceiverFuture(receiver).await.unwrap_err(); + t.await.unwrap(); +} + +#[tokio::test] +async fn poll_receiver_then_drop_it() { + let (sender, receiver) = oneshot::channel::<()>(); + // This will poll the receiver and then give up after 100 ms. + tokio::time::timeout(Duration::from_millis(100), receiver) + .await + .unwrap_err(); + // Make sure the receiver has been dropped by the runtime. + assert!(sender.send(()).is_err()); +} diff --git a/third_party/rust/oneshot/tests/future.rs b/third_party/rust/oneshot/tests/future.rs @@ -0,0 +1,65 @@ +#![cfg(feature = "async")] + +use core::{future, mem, pin, task}; + +#[cfg(oneshot_loom)] +pub use loom::sync::{Arc, Mutex}; +#[cfg(not(oneshot_loom))] +pub use std::sync::{Arc, Mutex}; + +mod helpers; +use helpers::maybe_loom_model; + +#[test] +fn multiple_receiver_polls_keeps_only_latest_waker() { + #[derive(Default)] + struct MockWaker { + cloned: usize, + dropped: usize, + } + + fn clone_mock_waker(waker: *const ()) -> task::RawWaker { + let mock_waker = unsafe { Arc::from_raw(waker as *const Mutex<MockWaker>) }; + mock_waker.lock().unwrap().cloned += 1; + let new_waker = + task::RawWaker::new(Arc::into_raw(mock_waker.clone()) as *const (), &VTABLE); + mem::forget(mock_waker); + new_waker + } + + fn drop_mock_waker(waker: *const ()) { + let mock_waker = unsafe { Arc::from_raw(waker as *const Mutex<MockWaker>) }; + mock_waker.lock().unwrap().dropped += 1; + } + + const VTABLE: task::RawWakerVTable = + task::RawWakerVTable::new(clone_mock_waker, |_| (), |_| (), drop_mock_waker); + + maybe_loom_model(|| { + let mock_waker1 = Arc::new(Mutex::new(MockWaker::default())); + let raw_waker1 = + task::RawWaker::new(Arc::into_raw(mock_waker1.clone()) as *const (), &VTABLE); + let waker1 = unsafe { task::Waker::from_raw(raw_waker1) }; + let mut context1 = task::Context::from_waker(&waker1); + + let (_sender, mut receiver) = oneshot::channel::<()>(); + + let poll_result = future::Future::poll(pin::Pin::new(&mut receiver), &mut context1); + assert_eq!(poll_result, task::Poll::Pending); + assert_eq!(mock_waker1.lock().unwrap().cloned, 1); + assert_eq!(mock_waker1.lock().unwrap().dropped, 0); + + let mock_waker2 = Arc::new(Mutex::new(MockWaker::default())); + let raw_waker2 = + task::RawWaker::new(Arc::into_raw(mock_waker2.clone()) as *const (), &VTABLE); + let waker2 = unsafe { task::Waker::from_raw(raw_waker2) }; + let mut context2 = task::Context::from_waker(&waker2); + + let poll_result = future::Future::poll(pin::Pin::new(&mut receiver), &mut context2); + assert_eq!(poll_result, task::Poll::Pending); + assert_eq!(mock_waker2.lock().unwrap().cloned, 1); + assert_eq!(mock_waker2.lock().unwrap().dropped, 0); + assert_eq!(mock_waker1.lock().unwrap().cloned, 1); + assert_eq!(mock_waker1.lock().unwrap().dropped, 1); + }); +} diff --git a/third_party/rust/oneshot/tests/helpers/mod.rs b/third_party/rust/oneshot/tests/helpers/mod.rs @@ -0,0 +1,63 @@ +#![allow(dead_code)] + +extern crate alloc; + +#[cfg(not(oneshot_loom))] +use alloc::sync::Arc; +#[cfg(not(oneshot_loom))] +use core::sync::atomic::{AtomicUsize, Ordering::SeqCst}; +#[cfg(oneshot_loom)] +use loom::sync::{ + atomic::{AtomicUsize, Ordering::SeqCst}, + Arc, +}; + +#[cfg(oneshot_loom)] +pub mod waker; + +pub fn maybe_loom_model(test: impl Fn() + Sync + Send + 'static) { + #[cfg(oneshot_loom)] + loom::model(test); + #[cfg(not(oneshot_loom))] + test(); +} + +pub struct DropCounter<T> { + drop_count: Arc<AtomicUsize>, + value: Option<T>, +} + +pub struct DropCounterHandle(Arc<AtomicUsize>); + +impl<T> DropCounter<T> { + pub fn new(value: T) -> (Self, DropCounterHandle) { + let drop_count = Arc::new(AtomicUsize::new(0)); + ( + Self { + drop_count: drop_count.clone(), + value: Some(value), + }, + DropCounterHandle(drop_count), + ) + } + + pub fn value(&self) -> &T { + self.value.as_ref().unwrap() + } + + pub fn into_value(mut self) -> T { + self.value.take().unwrap() + } +} + +impl DropCounterHandle { + pub fn count(&self) -> usize { + self.0.load(SeqCst) + } +} + +impl<T> Drop for DropCounter<T> { + fn drop(&mut self) { + self.drop_count.fetch_add(1, SeqCst); + } +} diff --git a/third_party/rust/oneshot/tests/helpers/waker.rs b/third_party/rust/oneshot/tests/helpers/waker.rs @@ -0,0 +1,64 @@ +//! Creates a Waker that can be observed from tests. + +use std::mem::forget; +use std::sync::atomic::{AtomicU32, Ordering}; +use std::sync::Arc; +use std::task::{RawWaker, RawWakerVTable, Waker}; + +#[derive(Default)] +pub struct WakerHandle { + clone_count: AtomicU32, + drop_count: AtomicU32, + wake_count: AtomicU32, +} + +impl WakerHandle { + pub fn clone_count(&self) -> u32 { + self.clone_count.load(Ordering::Relaxed) + } + + pub fn drop_count(&self) -> u32 { + self.drop_count.load(Ordering::Relaxed) + } + + pub fn wake_count(&self) -> u32 { + self.wake_count.load(Ordering::Relaxed) + } +} + +pub fn waker() -> (Waker, Arc<WakerHandle>) { + let waker_handle = Arc::new(WakerHandle::default()); + let waker_handle_ptr = Arc::into_raw(waker_handle.clone()); + let raw_waker = RawWaker::new(waker_handle_ptr as *const _, waker_vtable()); + (unsafe { Waker::from_raw(raw_waker) }, waker_handle) +} + +pub(super) fn waker_vtable() -> &'static RawWakerVTable { + &RawWakerVTable::new(clone_raw, wake_raw, wake_by_ref_raw, drop_raw) +} + +unsafe fn clone_raw(data: *const ()) -> RawWaker { + let handle: Arc<WakerHandle> = Arc::from_raw(data as *const _); + handle.clone_count.fetch_add(1, Ordering::Relaxed); + forget(handle.clone()); + forget(handle); + RawWaker::new(data, waker_vtable()) +} + +unsafe fn wake_raw(data: *const ()) { + let handle: Arc<WakerHandle> = Arc::from_raw(data as *const _); + handle.wake_count.fetch_add(1, Ordering::Relaxed); + handle.drop_count.fetch_add(1, Ordering::Relaxed); +} + +unsafe fn wake_by_ref_raw(data: *const ()) { + let handle: Arc<WakerHandle> = Arc::from_raw(data as *const _); + handle.wake_count.fetch_add(1, Ordering::Relaxed); + forget(handle) +} + +unsafe fn drop_raw(data: *const ()) { + let handle: Arc<WakerHandle> = Arc::from_raw(data as *const _); + handle.drop_count.fetch_add(1, Ordering::Relaxed); + drop(handle) +} diff --git a/third_party/rust/oneshot/tests/loom.rs b/third_party/rust/oneshot/tests/loom.rs @@ -0,0 +1,249 @@ +#![cfg(oneshot_loom)] + +use oneshot::TryRecvError; + +use loom::hint; +use loom::thread; +#[cfg(feature = "async")] +use std::future::Future; +#[cfg(feature = "async")] +use std::pin::Pin; +#[cfg(feature = "async")] +use std::task::{self, Poll}; +#[cfg(feature = "std")] +use std::time::Duration; + +mod helpers; + +#[test] +fn try_recv() { + loom::model(|| { + let (sender, receiver) = oneshot::channel::<u128>(); + + let t = thread::spawn(move || loop { + match receiver.try_recv() { + Ok(msg) => break msg, + Err(TryRecvError::Empty) => hint::spin_loop(), + Err(TryRecvError::Disconnected) => panic!("Should not be disconnected"), + } + }); + + assert!(sender.send(19).is_ok()); + assert_eq!(t.join().unwrap(), 19); + }) +} + +#[cfg(feature = "std")] +#[test] +fn send_recv_different_threads() { + loom::model(|| { + let (sender, receiver) = oneshot::channel(); + let t2 = thread::spawn(move || { + assert_eq!(receiver.recv_timeout(Duration::from_millis(1)), Ok(9)); + }); + let t1 = thread::spawn(move || { + sender.send(9u128).unwrap(); + }); + t1.join().unwrap(); + t2.join().unwrap(); + }) +} + +#[cfg(feature = "std")] +#[test] +fn recv_drop_sender_different_threads() { + loom::model(|| { + let (sender, receiver) = oneshot::channel::<u128>(); + let t2 = thread::spawn(move || { + assert!(receiver.recv_timeout(Duration::from_millis(0)).is_err()); + }); + let t1 = thread::spawn(move || { + drop(sender); + }); + t1.join().unwrap(); + t2.join().unwrap(); + }) +} + +#[cfg(feature = "async")] +#[test] +fn async_recv() { + loom::model(|| { + let (sender, receiver) = oneshot::channel::<u128>(); + let t1 = thread::spawn(move || { + sender.send(987).unwrap(); + }); + assert_eq!(loom::future::block_on(receiver), Ok(987)); + t1.join().unwrap(); + }) +} + +#[cfg(feature = "async")] +#[test] +fn send_then_poll() { + loom::model(|| { + let (sender, mut receiver) = oneshot::channel::<u128>(); + sender.send(1234).unwrap(); + + let (waker, waker_handle) = helpers::waker::waker(); + let mut context = task::Context::from_waker(&waker); + + assert_eq!( + Pin::new(&mut receiver).poll(&mut context), + Poll::Ready(Ok(1234)) + ); + assert_eq!(waker_handle.clone_count(), 0); + assert_eq!(waker_handle.drop_count(), 0); + assert_eq!(waker_handle.wake_count(), 0); + }) +} + +// Make sure the receiver can be dropped while a send is happening in parallel +#[cfg(feature = "async")] +#[test] +fn poll_then_drop_receiver_during_send() { + loom::model(|| { + let (sender, mut receiver) = oneshot::channel::<u128>(); + + let (waker, _waker_handle) = helpers::waker::waker(); + let mut context = task::Context::from_waker(&waker); + + // Put the channel into the receiving state + assert_eq!(Pin::new(&mut receiver).poll(&mut context), Poll::Pending); + + // Spawn a separate thread that sends in parallel + let t = thread::spawn(move || { + let _ = sender.send(1234); + }); + + // Drop the receiver. Loom will make sure all thread interleavings with the send are tested + drop(receiver); + + // The send operation should also not have panicked + t.join().unwrap(); + }) +} + +#[cfg(feature = "async")] +#[test] +fn poll_then_send() { + loom::model(|| { + let (sender, mut receiver) = oneshot::channel::<u128>(); + + let (waker, waker_handle) = helpers::waker::waker(); + let mut context = task::Context::from_waker(&waker); + + assert_eq!(Pin::new(&mut receiver).poll(&mut context), Poll::Pending); + assert_eq!(waker_handle.clone_count(), 1); + assert_eq!(waker_handle.drop_count(), 0); + assert_eq!(waker_handle.wake_count(), 0); + + sender.send(1234).unwrap(); + assert_eq!(waker_handle.clone_count(), 1); + assert_eq!(waker_handle.drop_count(), 1); + assert_eq!(waker_handle.wake_count(), 1); + + assert_eq!( + Pin::new(&mut receiver).poll(&mut context), + Poll::Ready(Ok(1234)) + ); + assert_eq!(waker_handle.clone_count(), 1); + assert_eq!(waker_handle.drop_count(), 1); + assert_eq!(waker_handle.wake_count(), 1); + }) +} + +#[cfg(feature = "async")] +#[test] +fn poll_with_different_wakers() { + loom::model(|| { + let (sender, mut receiver) = oneshot::channel::<u128>(); + + let (waker1, waker_handle1) = helpers::waker::waker(); + let mut context1 = task::Context::from_waker(&waker1); + + assert_eq!(Pin::new(&mut receiver).poll(&mut context1), Poll::Pending); + assert_eq!(waker_handle1.clone_count(), 1); + assert_eq!(waker_handle1.drop_count(), 0); + assert_eq!(waker_handle1.wake_count(), 0); + + let (waker2, waker_handle2) = helpers::waker::waker(); + let mut context2 = task::Context::from_waker(&waker2); + + assert_eq!(Pin::new(&mut receiver).poll(&mut context2), Poll::Pending); + assert_eq!(waker_handle1.clone_count(), 1); + assert_eq!(waker_handle1.drop_count(), 1); + assert_eq!(waker_handle1.wake_count(), 0); + + assert_eq!(waker_handle2.clone_count(), 1); + assert_eq!(waker_handle2.drop_count(), 0); + assert_eq!(waker_handle2.wake_count(), 0); + + // Sending should cause the waker from the latest poll to be woken up + sender.send(1234).unwrap(); + assert_eq!(waker_handle1.clone_count(), 1); + assert_eq!(waker_handle1.drop_count(), 1); + assert_eq!(waker_handle1.wake_count(), 0); + + assert_eq!(waker_handle2.clone_count(), 1); + assert_eq!(waker_handle2.drop_count(), 1); + assert_eq!(waker_handle2.wake_count(), 1); + }) +} + +#[cfg(feature = "async")] +#[test] +fn poll_then_try_recv() { + loom::model(|| { + let (_sender, mut receiver) = oneshot::channel::<u128>(); + + let (waker, waker_handle) = helpers::waker::waker(); + let mut context = task::Context::from_waker(&waker); + + assert_eq!(Pin::new(&mut receiver).poll(&mut context), Poll::Pending); + assert_eq!(waker_handle.clone_count(), 1); + assert_eq!(waker_handle.drop_count(), 0); + assert_eq!(waker_handle.wake_count(), 0); + + assert_eq!(receiver.try_recv(), Err(TryRecvError::Empty)); + + assert_eq!(Pin::new(&mut receiver).poll(&mut context), Poll::Pending); + assert_eq!(waker_handle.clone_count(), 2); + assert_eq!(waker_handle.drop_count(), 1); + assert_eq!(waker_handle.wake_count(), 0); + }) +} + +#[cfg(feature = "async")] +#[test] +fn poll_then_try_recv_while_sending() { + loom::model(|| { + let (sender, mut receiver) = oneshot::channel::<u128>(); + + let (waker, waker_handle) = helpers::waker::waker(); + let mut context = task::Context::from_waker(&waker); + + assert_eq!(Pin::new(&mut receiver).poll(&mut context), Poll::Pending); + assert_eq!(waker_handle.clone_count(), 1); + assert_eq!(waker_handle.drop_count(), 0); + assert_eq!(waker_handle.wake_count(), 0); + + let t = thread::spawn(move || { + sender.send(1234).unwrap(); + }); + + let msg = loop { + match receiver.try_recv() { + Ok(msg) => break msg, + Err(TryRecvError::Empty) => hint::spin_loop(), + Err(TryRecvError::Disconnected) => panic!("Should not be disconnected"), + } + }; + assert_eq!(msg, 1234); + assert_eq!(waker_handle.clone_count(), 1); + assert_eq!(waker_handle.drop_count(), 1); + assert_eq!(waker_handle.wake_count(), 1); + + t.join().unwrap(); + }) +} diff --git a/third_party/rust/oneshot/tests/raw.rs b/third_party/rust/oneshot/tests/raw.rs @@ -0,0 +1,46 @@ +#![cfg(not(oneshot_loom))] + +use oneshot::{channel, Receiver, Sender}; + +#[test] +fn test_raw_sender() { + let (sender, receiver) = channel::<u32>(); + let raw = sender.into_raw(); + let recreated = unsafe { Sender::<u32>::from_raw(raw) }; + recreated + .send(100) + .unwrap_or_else(|e| panic!("error sending after into_raw/from_raw roundtrip: {e}")); + assert_eq!(receiver.try_recv(), Ok(100)) +} + +#[test] +fn test_raw_receiver() { + let (sender, receiver) = channel::<u32>(); + let raw = receiver.into_raw(); + sender.send(100).unwrap(); + let recreated = unsafe { Receiver::<u32>::from_raw(raw) }; + assert_eq!( + recreated + .try_recv() + .unwrap_or_else(|e| panic!("error receiving after into_raw/from_raw roundtrip: {e}")), + 100 + ) +} + +#[test] +fn test_raw_sender_and_receiver() { + let (sender, receiver) = channel::<u32>(); + let raw_receiver = receiver.into_raw(); + let raw_sender = sender.into_raw(); + + let recreated_sender = unsafe { Sender::<u32>::from_raw(raw_sender) }; + recreated_sender.send(100).unwrap(); + + let recreated_receiver = unsafe { Receiver::<u32>::from_raw(raw_receiver) }; + assert_eq!( + recreated_receiver + .try_recv() + .unwrap_or_else(|e| panic!("error receiving after into_raw/from_raw roundtrip: {e}")), + 100 + ) +} diff --git a/third_party/rust/oneshot/tests/sync.rs b/third_party/rust/oneshot/tests/sync.rs @@ -0,0 +1,369 @@ +use core::mem; +use oneshot::TryRecvError; + +#[cfg(feature = "std")] +use oneshot::{RecvError, RecvTimeoutError}; +#[cfg(feature = "std")] +use std::time::{Duration, Instant}; + +#[cfg(feature = "std")] +mod thread { + #[cfg(oneshot_loom)] + pub use loom::thread::spawn; + #[cfg(not(oneshot_loom))] + pub use std::thread::{sleep, spawn}; + + #[cfg(oneshot_loom)] + pub fn sleep(_timeout: core::time::Duration) { + loom::thread::yield_now() + } +} + +mod helpers; +use helpers::{maybe_loom_model, DropCounter}; + +#[test] +fn send_before_try_recv() { + maybe_loom_model(|| { + let (sender, receiver) = oneshot::channel(); + assert!(!receiver.has_message()); + assert!(sender.send(19i128).is_ok()); + + assert!(receiver.has_message()); + assert_eq!(receiver.try_recv(), Ok(19i128)); + assert!(!receiver.has_message()); + assert_eq!(receiver.try_recv(), Err(TryRecvError::Disconnected)); + #[cfg(feature = "std")] + { + assert_eq!(receiver.recv_ref(), Err(RecvError)); + assert!(receiver.recv_timeout(Duration::from_secs(1)).is_err()); + } + }) +} + +#[cfg(feature = "std")] +#[test] +fn send_before_recv() { + maybe_loom_model(|| { + let (sender, receiver) = oneshot::channel::<()>(); + assert!(sender.send(()).is_ok()); + assert_eq!(receiver.recv(), Ok(())); + }); + maybe_loom_model(|| { + let (sender, receiver) = oneshot::channel::<u8>(); + assert!(sender.send(19).is_ok()); + assert_eq!(receiver.recv(), Ok(19)); + }); + maybe_loom_model(|| { + let (sender, receiver) = oneshot::channel::<u64>(); + assert!(sender.send(21).is_ok()); + assert_eq!(receiver.recv(), Ok(21)); + }); + // FIXME: This test does not work with loom. There is something that happens after the + // channel object becomes larger than ~500 bytes and that makes an atomic read from the state + // result in "signal: 10, SIGBUS: access to undefined memory" + #[cfg(not(oneshot_loom))] + maybe_loom_model(|| { + let (sender, receiver) = oneshot::channel::<[u8; 4096]>(); + assert!(sender.send([0b10101010; 4096]).is_ok()); + assert!(receiver.recv().unwrap()[..] == [0b10101010; 4096][..]); + }); +} + +#[cfg(feature = "std")] +#[test] +fn send_before_recv_ref() { + maybe_loom_model(|| { + let (sender, receiver) = oneshot::channel(); + assert!(sender.send(19i128).is_ok()); + + assert_eq!(receiver.recv_ref(), Ok(19i128)); + assert_eq!(receiver.recv_ref(), Err(RecvError)); + assert_eq!(receiver.try_recv(), Err(TryRecvError::Disconnected)); + assert!(receiver.recv_timeout(Duration::from_secs(1)).is_err()); + }) +} + +#[cfg(feature = "std")] +#[test] +fn send_before_recv_timeout() { + maybe_loom_model(|| { + let (sender, receiver) = oneshot::channel(); + assert!(sender.send(19i128).is_ok()); + + let start = Instant::now(); + let timeout = Duration::from_secs(1); + assert_eq!(receiver.recv_timeout(timeout), Ok(19i128)); + assert!(start.elapsed() < Duration::from_millis(100)); + + assert!(receiver.recv_timeout(timeout).is_err()); + assert!(receiver.try_recv().is_err()); + assert!(receiver.recv().is_err()); + }) +} + +#[test] +fn send_then_drop_receiver() { + maybe_loom_model(|| { + let (sender, receiver) = oneshot::channel(); + assert!(sender.send(19i128).is_ok()); + mem::drop(receiver); + }) +} + +#[test] +fn send_with_dropped_receiver() { + maybe_loom_model(|| { + let (sender, receiver) = oneshot::channel(); + mem::drop(receiver); + let send_error = sender.send(5u128).unwrap_err(); + assert_eq!(*send_error.as_inner(), 5); + assert_eq!(send_error.into_inner(), 5); + }) +} + +#[test] +fn try_recv_with_dropped_sender() { + maybe_loom_model(|| { + let (sender, receiver) = oneshot::channel::<u128>(); + mem::drop(sender); + assert!(!receiver.has_message()); + receiver.try_recv().unwrap_err(); + }) +} + +#[cfg(feature = "std")] +#[test] +fn recv_with_dropped_sender() { + maybe_loom_model(|| { + let (sender, receiver) = oneshot::channel::<u128>(); + mem::drop(sender); + receiver.recv().unwrap_err(); + }) +} + +#[cfg(feature = "std")] +#[test] +fn recv_before_send() { + maybe_loom_model(|| { + let (sender, receiver) = oneshot::channel(); + let t = thread::spawn(move || { + thread::sleep(Duration::from_millis(2)); + sender.send(9u128).unwrap(); + }); + assert_eq!(receiver.recv(), Ok(9)); + t.join().unwrap(); + }) +} + +#[cfg(feature = "std")] +#[test] +fn recv_timeout_before_send() { + maybe_loom_model(|| { + let (sender, receiver) = oneshot::channel(); + let t = thread::spawn(move || { + thread::sleep(Duration::from_millis(2)); + sender.send(9u128).unwrap(); + }); + assert_eq!(receiver.recv_timeout(Duration::from_secs(1)), Ok(9)); + t.join().unwrap(); + }) +} + +#[cfg(feature = "std")] +#[test] +fn recv_before_send_then_drop_sender() { + maybe_loom_model(|| { + let (sender, receiver) = oneshot::channel::<u128>(); + let t = thread::spawn(move || { + thread::sleep(Duration::from_millis(10)); + mem::drop(sender); + }); + assert!(receiver.recv().is_err()); + t.join().unwrap(); + }) +} + +#[cfg(feature = "std")] +#[test] +fn recv_timeout_before_send_then_drop_sender() { + maybe_loom_model(|| { + let (sender, receiver) = oneshot::channel::<u128>(); + let t = thread::spawn(move || { + thread::sleep(Duration::from_millis(10)); + mem::drop(sender); + }); + assert!(receiver.recv_timeout(Duration::from_secs(1)).is_err()); + t.join().unwrap(); + }) +} + +#[test] +fn try_recv() { + maybe_loom_model(|| { + let (sender, receiver) = oneshot::channel::<u128>(); + assert_eq!(receiver.try_recv(), Err(TryRecvError::Empty)); + mem::drop(sender) + }) +} + +#[cfg(feature = "std")] +#[test] +fn try_recv_then_drop_receiver() { + maybe_loom_model(|| { + let (sender, receiver) = oneshot::channel::<u128>(); + let t1 = thread::spawn(move || { + let _ = sender.send(42); + }); + let t2 = thread::spawn(move || { + assert!(matches!( + receiver.try_recv(), + Ok(42) | Err(TryRecvError::Empty) + )); + mem::drop(receiver); + }); + t1.join().unwrap(); + t2.join().unwrap(); + }) +} + +#[cfg(feature = "std")] +#[test] +fn recv_deadline_and_timeout_no_time() { + maybe_loom_model(|| { + let (_sender, receiver) = oneshot::channel::<u128>(); + + let start = Instant::now(); + assert_eq!( + receiver.recv_deadline(start), + Err(RecvTimeoutError::Timeout) + ); + assert!(start.elapsed() < Duration::from_millis(200)); + + let start = Instant::now(); + assert_eq!( + receiver.recv_timeout(Duration::from_millis(0)), + Err(RecvTimeoutError::Timeout) + ); + assert!(start.elapsed() < Duration::from_millis(200)); + }) +} + +// This test doesn't give meaningful results when run with oneshot_test_delay and loom +#[cfg(all(feature = "std", not(all(oneshot_test_delay, oneshot_loom))))] +#[test] +fn recv_deadline_time_should_elapse() { + maybe_loom_model(|| { + let (_sender, receiver) = oneshot::channel::<u128>(); + + let start = Instant::now(); + #[cfg(not(oneshot_loom))] + let timeout = Duration::from_millis(100); + #[cfg(oneshot_loom)] + let timeout = Duration::from_millis(1); + assert_eq!( + receiver.recv_deadline(start + timeout), + Err(RecvTimeoutError::Timeout) + ); + assert!(start.elapsed() > timeout); + assert!(start.elapsed() < timeout * 3); + }) +} + +#[cfg(all(feature = "std", not(all(oneshot_test_delay, oneshot_loom))))] +#[test] +fn recv_timeout_time_should_elapse() { + maybe_loom_model(|| { + let (_sender, receiver) = oneshot::channel::<u128>(); + + let start = Instant::now(); + #[cfg(not(oneshot_loom))] + let timeout = Duration::from_millis(100); + #[cfg(oneshot_loom)] + let timeout = Duration::from_millis(1); + + assert_eq!( + receiver.recv_timeout(timeout), + Err(RecvTimeoutError::Timeout) + ); + assert!(start.elapsed() > timeout); + assert!(start.elapsed() < timeout * 3); + }) +} + +#[cfg(not(oneshot_loom))] +#[test] +fn non_send_type_can_be_used_on_same_thread() { + use std::ptr; + + #[derive(Debug, Eq, PartialEq)] + struct NotSend(*mut ()); + + let (sender, receiver) = oneshot::channel(); + sender.send(NotSend(ptr::null_mut())).unwrap(); + let reply = receiver.try_recv().unwrap(); + assert_eq!(reply, NotSend(ptr::null_mut())); +} + +#[test] +fn message_in_channel_dropped_on_receiver_drop() { + maybe_loom_model(|| { + let (sender, receiver) = oneshot::channel(); + let (message, counter) = DropCounter::new(()); + assert_eq!(counter.count(), 0); + sender.send(message).unwrap(); + assert_eq!(counter.count(), 0); + mem::drop(receiver); + assert_eq!(counter.count(), 1); + }) +} + +#[test] +fn send_error_drops_message_correctly() { + maybe_loom_model(|| { + let (sender, _) = oneshot::channel(); + let (message, counter) = DropCounter::new(()); + + let send_error = sender.send(message).unwrap_err(); + assert_eq!(counter.count(), 0); + mem::drop(send_error); + assert_eq!(counter.count(), 1); + }); +} + +#[test] +fn send_error_drops_message_correctly_on_into_inner() { + maybe_loom_model(|| { + let (sender, _) = oneshot::channel(); + let (message, counter) = DropCounter::new(()); + + let send_error = sender.send(message).unwrap_err(); + assert_eq!(counter.count(), 0); + let message = send_error.into_inner(); + assert_eq!(counter.count(), 0); + mem::drop(message); + assert_eq!(counter.count(), 1); + }); +} + +#[test] +fn dropping_receiver_disconnects_sender() { + maybe_loom_model(|| { + let (sender, receiver) = oneshot::channel::<()>(); + assert!(!sender.is_closed()); + assert!(!receiver.is_closed()); + drop(receiver); + assert!(sender.is_closed()); + }); +} + +#[test] +fn dropping_sender_disconnects_receiver() { + maybe_loom_model(|| { + let (sender, receiver) = oneshot::channel::<()>(); + assert!(!sender.is_closed()); + assert!(!receiver.is_closed()); + drop(sender); + assert!(receiver.is_closed()); + }); +} diff --git a/toolkit/library/rust/shared/Cargo.toml b/toolkit/library/rust/shared/Cargo.toml @@ -125,6 +125,7 @@ osclientcerts = { path = "../../../../security/manager/ssl/osclientcerts" } gkrust-uniffi-components = { path = "../../../components/uniffi-bindgen-gecko-js/components/", features = ["xpcom"] } uniffi-bindgen-gecko-js-test-fixtures = { path = "../../../components/uniffi-bindgen-gecko-js/test-fixtures/", optional = true } viaduct = "0.1" +viaduct-necko = { path = "../../../../services/application-services/components/viaduct-necko" } webext-storage = "0.1" [target.'cfg(target_os = "windows")'.dependencies] diff --git a/toolkit/library/rust/shared/lib.rs b/toolkit/library/rust/shared/lib.rs @@ -107,6 +107,8 @@ extern crate uniffi_bindgen_gecko_js_test_fixtures; #[cfg(not(target_os = "android"))] extern crate viaduct; +#[cfg(not(target_os = "android"))] +extern crate viaduct_necko; extern crate gecko_logger; extern crate gecko_tracing; @@ -162,6 +164,10 @@ pub extern "C" fn GkRust_Init() { let _ = GeckoLogger::init(); // Initialize tracing. gecko_tracing::initialize_tracing(); + #[cfg(not(target_os = "android"))] + if let Err(e) = viaduct_necko::init_necko_backend() { + log::warn!("Failed to initialize viaduct-necko backend: {:?}", e); + } } #[no_mangle]