PannerNode.cpp (26994B)
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ 2 /* vim:set ts=2 sw=2 sts=2 et cindent: */ 3 /* This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 #include "PannerNode.h" 8 9 #include "AlignmentUtils.h" 10 #include "AudioBufferSourceNode.h" 11 #include "AudioDestinationNode.h" 12 #include "AudioListener.h" 13 #include "AudioNodeEngine.h" 14 #include "AudioNodeTrack.h" 15 #include "PanningUtils.h" 16 #include "PlayingRefChangeHandler.h" 17 #include "Tracing.h" 18 #include "blink/HRTFDatabaseLoader.h" 19 #include "blink/HRTFPanner.h" 20 21 using WebCore::HRTFDatabaseLoader; 22 using WebCore::HRTFPanner; 23 24 namespace mozilla::dom { 25 26 NS_IMPL_CYCLE_COLLECTION_CLASS(PannerNode) 27 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(PannerNode, AudioNode) 28 NS_IMPL_CYCLE_COLLECTION_UNLINK(mPositionX, mPositionY, mPositionZ, 29 mOrientationX, mOrientationY, mOrientationZ) 30 NS_IMPL_CYCLE_COLLECTION_UNLINK_END 31 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(PannerNode, AudioNode) 32 NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPositionX, mPositionY, mPositionZ, 33 mOrientationX, mOrientationY, mOrientationZ) 34 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END 35 36 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(PannerNode) 37 NS_INTERFACE_MAP_END_INHERITING(AudioNode) 38 39 NS_IMPL_ADDREF_INHERITED(PannerNode, AudioNode) 40 NS_IMPL_RELEASE_INHERITED(PannerNode, AudioNode) 41 42 class PannerNodeEngine final : public AudioNodeEngine { 43 public: 44 explicit PannerNodeEngine(AudioNode* aNode, 45 AudioDestinationNode* aDestination, 46 AudioListenerEngine* aListenerEngine) 47 : AudioNodeEngine(aNode), 48 mDestination(aDestination->Track()), 49 mListenerEngine(aListenerEngine) 50 // Please keep these default values consistent with 51 // PannerNode::PannerNode below. 52 , 53 mPanningModelFunction(&PannerNodeEngine::EqualPowerPanningFunction), 54 mDistanceModelFunction(&PannerNodeEngine::InverseGainFunction), 55 mPositionX(0.), 56 mPositionY(0.), 57 mPositionZ(0.), 58 mOrientationX(1.), 59 mOrientationY(0.), 60 mOrientationZ(0.), 61 mRefDistance(1.), 62 mMaxDistance(10000.), 63 mRolloffFactor(1.), 64 mConeInnerAngle(360.), 65 mConeOuterAngle(360.), 66 mConeOuterGain(0.), 67 mLeftOverData(INT_MIN) {} 68 69 void RecvTimelineEvent(uint32_t aIndex, AudioParamEvent& aEvent) override { 70 MOZ_ASSERT(mDestination); 71 aEvent.ConvertToTicks(mDestination); 72 switch (aIndex) { 73 case PannerNode::POSITIONX: 74 mPositionX.InsertEvent<int64_t>(aEvent); 75 break; 76 case PannerNode::POSITIONY: 77 mPositionY.InsertEvent<int64_t>(aEvent); 78 break; 79 case PannerNode::POSITIONZ: 80 mPositionZ.InsertEvent<int64_t>(aEvent); 81 break; 82 case PannerNode::ORIENTATIONX: 83 mOrientationX.InsertEvent<int64_t>(aEvent); 84 break; 85 case PannerNode::ORIENTATIONY: 86 mOrientationY.InsertEvent<int64_t>(aEvent); 87 break; 88 case PannerNode::ORIENTATIONZ: 89 mOrientationZ.InsertEvent<int64_t>(aEvent); 90 break; 91 default: 92 NS_ERROR("Bad PannerNode TimelineParameter"); 93 } 94 } 95 96 void CreateHRTFPanner() { 97 MOZ_ASSERT(NS_IsMainThread()); 98 if (mHRTFPanner) { 99 return; 100 } 101 // HRTFDatabaseLoader needs to be fetched on the main thread. 102 RefPtr<HRTFDatabaseLoader> loader = 103 HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary( 104 NodeMainThread()->Context()->SampleRate()); 105 mHRTFPanner = MakeUnique<HRTFPanner>( 106 NodeMainThread()->Context()->SampleRate(), loader.forget()); 107 } 108 109 void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override { 110 switch (aIndex) { 111 case PannerNode::PANNING_MODEL: 112 switch (PanningModelType(aParam)) { 113 case PanningModelType::Equalpower: 114 mPanningModelFunction = 115 &PannerNodeEngine::EqualPowerPanningFunction; 116 break; 117 case PanningModelType::HRTF: 118 mPanningModelFunction = &PannerNodeEngine::HRTFPanningFunction; 119 break; 120 default: 121 MOZ_ASSERT_UNREACHABLE("We should never see alternate names here"); 122 break; 123 } 124 break; 125 case PannerNode::DISTANCE_MODEL: 126 switch (DistanceModelType(aParam)) { 127 case DistanceModelType::Inverse: 128 mDistanceModelFunction = &PannerNodeEngine::InverseGainFunction; 129 break; 130 case DistanceModelType::Linear: 131 mDistanceModelFunction = &PannerNodeEngine::LinearGainFunction; 132 break; 133 case DistanceModelType::Exponential: 134 mDistanceModelFunction = &PannerNodeEngine::ExponentialGainFunction; 135 break; 136 default: 137 MOZ_ASSERT_UNREACHABLE("We should never see alternate names here"); 138 break; 139 } 140 break; 141 default: 142 NS_ERROR("Bad PannerNodeEngine Int32Parameter"); 143 } 144 } 145 void SetDoubleParameter(uint32_t aIndex, double aParam) override { 146 switch (aIndex) { 147 case PannerNode::REF_DISTANCE: 148 mRefDistance = aParam; 149 break; 150 case PannerNode::MAX_DISTANCE: 151 mMaxDistance = aParam; 152 break; 153 case PannerNode::ROLLOFF_FACTOR: 154 mRolloffFactor = aParam; 155 break; 156 case PannerNode::CONE_INNER_ANGLE: 157 mConeInnerAngle = aParam; 158 break; 159 case PannerNode::CONE_OUTER_ANGLE: 160 mConeOuterAngle = aParam; 161 break; 162 case PannerNode::CONE_OUTER_GAIN: 163 mConeOuterGain = aParam; 164 break; 165 default: 166 NS_ERROR("Bad PannerNodeEngine DoubleParameter"); 167 } 168 } 169 170 void ProcessBlock(AudioNodeTrack* aTrack, GraphTime aFrom, 171 const AudioBlock& aInput, AudioBlock* aOutput, 172 bool* aFinished) override { 173 TRACE("PannerNodeEngine::ProcessBlock"); 174 175 if (aInput.IsNull()) { 176 // mLeftOverData != INT_MIN means that the panning model was HRTF and a 177 // tail-time reference was added. Even if the model is now equalpower, 178 // the reference will need to be removed. 179 if (mLeftOverData > 0 && 180 mPanningModelFunction == &PannerNodeEngine::HRTFPanningFunction) { 181 mLeftOverData -= WEBAUDIO_BLOCK_SIZE; 182 } else { 183 if (mLeftOverData != INT_MIN) { 184 mLeftOverData = INT_MIN; 185 aTrack->ScheduleCheckForInactive(); 186 mHRTFPanner->reset(); 187 188 RefPtr<PlayingRefChangeHandler> refchanged = 189 new PlayingRefChangeHandler(aTrack, 190 PlayingRefChangeHandler::RELEASE); 191 aTrack->Graph()->DispatchToMainThreadStableState(refchanged.forget()); 192 } 193 aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); 194 return; 195 } 196 } else if (mPanningModelFunction == 197 &PannerNodeEngine::HRTFPanningFunction) { 198 if (mLeftOverData == INT_MIN) { 199 RefPtr<PlayingRefChangeHandler> refchanged = 200 new PlayingRefChangeHandler(aTrack, 201 PlayingRefChangeHandler::ADDREF); 202 aTrack->Graph()->DispatchToMainThreadStableState(refchanged.forget()); 203 } 204 mLeftOverData = mHRTFPanner->maxTailFrames(); 205 } 206 207 TrackTime tick = mDestination->GraphTimeToTrackTime(aFrom); 208 (this->*mPanningModelFunction)(aInput, aOutput, tick); 209 } 210 211 bool IsActive() const override { return mLeftOverData != INT_MIN; } 212 213 void ComputeAzimuthAndElevation(const ThreeDPoint& position, float& aAzimuth, 214 float& aElevation); 215 float ComputeConeGain(const ThreeDPoint& position, 216 const ThreeDPoint& orientation); 217 // Compute how much the distance contributes to the gain reduction. 218 double ComputeDistanceGain(const ThreeDPoint& position); 219 220 void EqualPowerPanningFunction(const AudioBlock& aInput, AudioBlock* aOutput, 221 TrackTime tick); 222 void HRTFPanningFunction(const AudioBlock& aInput, AudioBlock* aOutput, 223 TrackTime tick); 224 225 float LinearGainFunction(double aDistance); 226 float InverseGainFunction(double aDistance); 227 float ExponentialGainFunction(double aDistance); 228 229 ThreeDPoint ConvertAudioParamTimelineTo3DP(AudioParamTimeline& aX, 230 AudioParamTimeline& aY, 231 AudioParamTimeline& aZ, 232 TrackTime& tick); 233 234 size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override { 235 size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); 236 if (mHRTFPanner) { 237 amount += mHRTFPanner->sizeOfIncludingThis(aMallocSizeOf); 238 } 239 240 return amount; 241 } 242 243 size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override { 244 return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); 245 } 246 247 RefPtr<AudioNodeTrack> mDestination; 248 // This member is set on the main thread, but is not accessed on the rendering 249 // thread untile mPanningModelFunction has changed, and this happens strictly 250 // later, via a MediaTrackGraph ControlMessage. 251 UniquePtr<HRTFPanner> mHRTFPanner; 252 RefPtr<AudioListenerEngine> mListenerEngine; 253 using PanningModelFunction = void (PannerNodeEngine::*)(const AudioBlock&, 254 AudioBlock*, 255 TrackTime); 256 PanningModelFunction mPanningModelFunction; 257 using DistanceModelFunction = float (PannerNodeEngine::*)(double); 258 DistanceModelFunction mDistanceModelFunction; 259 AudioParamTimeline mPositionX; 260 AudioParamTimeline mPositionY; 261 AudioParamTimeline mPositionZ; 262 AudioParamTimeline mOrientationX; 263 AudioParamTimeline mOrientationY; 264 AudioParamTimeline mOrientationZ; 265 double mRefDistance; 266 double mMaxDistance; 267 double mRolloffFactor; 268 double mConeInnerAngle; 269 double mConeOuterAngle; 270 double mConeOuterGain; 271 int mLeftOverData; 272 }; 273 274 PannerNode::PannerNode(AudioContext* aContext) 275 : AudioNode(aContext, 2, ChannelCountMode::Clamped_max, 276 ChannelInterpretation::Speakers) 277 // Please keep these default values consistent with 278 // PannerNodeEngine::PannerNodeEngine above. 279 , 280 mPanningModel(PanningModelType::Equalpower), 281 mDistanceModel(DistanceModelType::Inverse), 282 mRefDistance(1.), 283 mMaxDistance(10000.), 284 mRolloffFactor(1.), 285 mConeInnerAngle(360.), 286 mConeOuterAngle(360.), 287 mConeOuterGain(0.) { 288 mPositionX = CreateAudioParam(PannerNode::POSITIONX, u"PositionX"_ns, 0.f); 289 mPositionY = CreateAudioParam(PannerNode::POSITIONY, u"PositionY"_ns, 0.f); 290 mPositionZ = CreateAudioParam(PannerNode::POSITIONZ, u"PositionZ"_ns, 0.f); 291 mOrientationX = 292 CreateAudioParam(PannerNode::ORIENTATIONX, u"OrientationX"_ns, 1.0f); 293 mOrientationY = 294 CreateAudioParam(PannerNode::ORIENTATIONY, u"OrientationY"_ns, 0.f); 295 mOrientationZ = 296 CreateAudioParam(PannerNode::ORIENTATIONZ, u"OrientationZ"_ns, 0.f); 297 mTrack = AudioNodeTrack::Create( 298 aContext, 299 new PannerNodeEngine(this, aContext->Destination(), 300 aContext->Listener()->Engine()), 301 AudioNodeTrack::NO_TRACK_FLAGS, aContext->Graph()); 302 } 303 304 /* static */ 305 already_AddRefed<PannerNode> PannerNode::Create(AudioContext& aAudioContext, 306 const PannerOptions& aOptions, 307 ErrorResult& aRv) { 308 RefPtr<PannerNode> audioNode = new PannerNode(&aAudioContext); 309 310 audioNode->Initialize(aOptions, aRv); 311 if (NS_WARN_IF(aRv.Failed())) { 312 return nullptr; 313 } 314 315 audioNode->SetPanningModel(aOptions.mPanningModel); 316 audioNode->SetDistanceModel(aOptions.mDistanceModel); 317 audioNode->mPositionX->SetInitialValue(aOptions.mPositionX); 318 audioNode->mPositionY->SetInitialValue(aOptions.mPositionY); 319 audioNode->mPositionZ->SetInitialValue(aOptions.mPositionZ); 320 audioNode->mOrientationX->SetInitialValue(aOptions.mOrientationX); 321 audioNode->mOrientationY->SetInitialValue(aOptions.mOrientationY); 322 audioNode->mOrientationZ->SetInitialValue(aOptions.mOrientationZ); 323 audioNode->SetRefDistance(aOptions.mRefDistance, aRv); 324 if (NS_WARN_IF(aRv.Failed())) { 325 return nullptr; 326 } 327 audioNode->SetMaxDistance(aOptions.mMaxDistance, aRv); 328 if (NS_WARN_IF(aRv.Failed())) { 329 return nullptr; 330 } 331 audioNode->SetRolloffFactor(aOptions.mRolloffFactor, aRv); 332 if (NS_WARN_IF(aRv.Failed())) { 333 return nullptr; 334 } 335 audioNode->SetConeInnerAngle(aOptions.mConeInnerAngle); 336 audioNode->SetConeOuterAngle(aOptions.mConeOuterAngle); 337 audioNode->SetConeOuterGain(aOptions.mConeOuterGain, aRv); 338 if (NS_WARN_IF(aRv.Failed())) { 339 return nullptr; 340 } 341 342 return audioNode.forget(); 343 } 344 345 void PannerNode::SetPanningModel(PanningModelType aPanningModel) { 346 mPanningModel = aPanningModel; 347 if (mPanningModel == PanningModelType::HRTF) { 348 // We can set the engine's `mHRTFPanner` member here from the main thread, 349 // because the engine will not touch it from the MediaTrackGraph 350 // thread until the PANNING_MODEL message sent below is received. 351 static_cast<PannerNodeEngine*>(mTrack->Engine())->CreateHRTFPanner(); 352 } 353 SendInt32ParameterToTrack(PANNING_MODEL, int32_t(mPanningModel)); 354 } 355 356 static bool SetParamFromDouble(AudioParam* aParam, double aValue, 357 const char (&aParamName)[2], ErrorResult& aRv) { 358 float value = static_cast<float>(aValue); 359 if (!std::isfinite(value)) { 360 aRv.ThrowTypeError<MSG_NOT_FINITE>(aParamName); 361 return false; 362 } 363 aParam->SetValue(value, aRv); 364 return !aRv.Failed(); 365 } 366 367 void PannerNode::SetPosition(double aX, double aY, double aZ, 368 ErrorResult& aRv) { 369 if (!SetParamFromDouble(mPositionX, aX, "x", aRv)) { 370 return; 371 } 372 if (!SetParamFromDouble(mPositionY, aY, "y", aRv)) { 373 return; 374 } 375 SetParamFromDouble(mPositionZ, aZ, "z", aRv); 376 } 377 378 void PannerNode::SetOrientation(double aX, double aY, double aZ, 379 ErrorResult& aRv) { 380 if (!SetParamFromDouble(mOrientationX, aX, "x", aRv)) { 381 return; 382 } 383 if (!SetParamFromDouble(mOrientationY, aY, "y", aRv)) { 384 return; 385 } 386 SetParamFromDouble(mOrientationZ, aZ, "z", aRv); 387 } 388 389 size_t PannerNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { 390 return AudioNode::SizeOfExcludingThis(aMallocSizeOf); 391 } 392 393 size_t PannerNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { 394 return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); 395 } 396 397 JSObject* PannerNode::WrapObject(JSContext* aCx, 398 JS::Handle<JSObject*> aGivenProto) { 399 return PannerNode_Binding::Wrap(aCx, this, aGivenProto); 400 } 401 402 // Those three functions are described in the spec. 403 float PannerNodeEngine::LinearGainFunction(double aDistance) { 404 double clampedRollof = std::clamp(mRolloffFactor, 0.0, 1.0); 405 return AssertedCast<float>( 406 1.0 - clampedRollof * 407 (std::max(std::min(aDistance, mMaxDistance), mRefDistance) - 408 mRefDistance) / 409 (mMaxDistance - mRefDistance)); 410 } 411 412 float PannerNodeEngine::InverseGainFunction(double aDistance) { 413 return mRefDistance / 414 (mRefDistance + 415 mRolloffFactor * (std::max(aDistance, mRefDistance) - mRefDistance)); 416 } 417 418 float PannerNodeEngine::ExponentialGainFunction(double aDistance) { 419 return fdlibm_pow(std::max(aDistance, mRefDistance) / mRefDistance, 420 -mRolloffFactor); 421 } 422 423 void PannerNodeEngine::HRTFPanningFunction(const AudioBlock& aInput, 424 AudioBlock* aOutput, 425 TrackTime tick) { 426 // The output of this node is always stereo, no matter what the inputs are. 427 aOutput->AllocateChannels(2); 428 429 float azimuth, elevation; 430 431 ThreeDPoint position = 432 ConvertAudioParamTimelineTo3DP(mPositionX, mPositionY, mPositionZ, tick); 433 ThreeDPoint orientation = ConvertAudioParamTimelineTo3DP( 434 mOrientationX, mOrientationY, mOrientationZ, tick); 435 if (!orientation.IsZero()) { 436 orientation.Normalize(); 437 } 438 ComputeAzimuthAndElevation(position, azimuth, elevation); 439 440 AudioBlock input = aInput; 441 // Gain is applied before the delay and convolution of the HRTF. 442 input.mVolume *= 443 ComputeConeGain(position, orientation) * ComputeDistanceGain(position); 444 445 mHRTFPanner->pan(azimuth, elevation, &input, aOutput); 446 } 447 448 ThreeDPoint PannerNodeEngine::ConvertAudioParamTimelineTo3DP( 449 AudioParamTimeline& aX, AudioParamTimeline& aY, AudioParamTimeline& aZ, 450 TrackTime& tick) { 451 return ThreeDPoint(aX.GetValueAtTime(tick), aY.GetValueAtTime(tick), 452 aZ.GetValueAtTime(tick)); 453 } 454 455 void PannerNodeEngine::EqualPowerPanningFunction(const AudioBlock& aInput, 456 AudioBlock* aOutput, 457 TrackTime tick) { 458 float azimuth, elevation, gainL, gainR, normalizedAzimuth, distanceGain, 459 coneGain; 460 int inputChannels = aInput.ChannelCount(); 461 462 // Optimize the case where the position and orientation is constant for this 463 // processing block: we can just apply a constant gain on the left and right 464 // channel 465 if (mPositionX.HasSimpleValue() && mPositionY.HasSimpleValue() && 466 mPositionZ.HasSimpleValue() && mOrientationX.HasSimpleValue() && 467 mOrientationY.HasSimpleValue() && mOrientationZ.HasSimpleValue()) { 468 ThreeDPoint position(mPositionX.GetValue(), mPositionY.GetValue(), 469 mPositionZ.GetValue()); 470 ThreeDPoint orientation(mOrientationX.GetValue(), mOrientationY.GetValue(), 471 mOrientationZ.GetValue()); 472 if (!orientation.IsZero()) { 473 orientation.Normalize(); 474 } 475 476 // For a stereo source, when both the listener and the panner are in 477 // the same spot, and no cone gain is specified, this node is noop. 478 if (inputChannels == 2 && mListenerEngine->Position() == position && 479 mConeInnerAngle == 360 && mConeOuterAngle == 360) { 480 *aOutput = aInput; 481 return; 482 } 483 484 ComputeAzimuthAndElevation(position, azimuth, elevation); 485 coneGain = ComputeConeGain(position, orientation); 486 487 // The following algorithm is described in the spec. 488 // Clamp azimuth in the [-90, 90] range. 489 azimuth = std::min(180.f, std::max(-180.f, azimuth)); 490 491 // Wrap around 492 if (azimuth < -90.f) { 493 azimuth = -180.f - azimuth; 494 } else if (azimuth > 90) { 495 azimuth = 180.f - azimuth; 496 } 497 498 // Normalize the value in the [0, 1] range. 499 if (inputChannels == 1) { 500 normalizedAzimuth = (azimuth + 90.f) / 180.f; 501 } else { 502 if (azimuth <= 0) { 503 normalizedAzimuth = (azimuth + 90.f) / 90.f; 504 } else { 505 normalizedAzimuth = azimuth / 90.f; 506 } 507 } 508 509 distanceGain = ComputeDistanceGain(position); 510 511 // Actually compute the left and right gain. 512 gainL = fdlibm_cos(0.5 * M_PI * normalizedAzimuth); 513 gainR = fdlibm_sin(0.5 * M_PI * normalizedAzimuth); 514 515 // Compute the output. 516 ApplyStereoPanning(aInput, aOutput, gainL, gainR, azimuth <= 0); 517 518 aOutput->mVolume *= distanceGain * coneGain; 519 } else { 520 float positionX[WEBAUDIO_BLOCK_SIZE]; 521 float positionY[WEBAUDIO_BLOCK_SIZE]; 522 float positionZ[WEBAUDIO_BLOCK_SIZE]; 523 float orientationX[WEBAUDIO_BLOCK_SIZE]; 524 float orientationY[WEBAUDIO_BLOCK_SIZE]; 525 float orientationZ[WEBAUDIO_BLOCK_SIZE]; 526 527 if (!mPositionX.HasSimpleValue()) { 528 mPositionX.GetValuesAtTime(tick, positionX, WEBAUDIO_BLOCK_SIZE); 529 } else { 530 positionX[0] = mPositionX.GetValue(); 531 } 532 if (!mPositionY.HasSimpleValue()) { 533 mPositionY.GetValuesAtTime(tick, positionY, WEBAUDIO_BLOCK_SIZE); 534 } else { 535 positionY[0] = mPositionY.GetValue(); 536 } 537 if (!mPositionZ.HasSimpleValue()) { 538 mPositionZ.GetValuesAtTime(tick, positionZ, WEBAUDIO_BLOCK_SIZE); 539 } else { 540 positionZ[0] = mPositionZ.GetValue(); 541 } 542 if (!mOrientationX.HasSimpleValue()) { 543 mOrientationX.GetValuesAtTime(tick, orientationX, WEBAUDIO_BLOCK_SIZE); 544 } else { 545 orientationX[0] = mOrientationX.GetValue(); 546 } 547 if (!mOrientationY.HasSimpleValue()) { 548 mOrientationY.GetValuesAtTime(tick, orientationY, WEBAUDIO_BLOCK_SIZE); 549 } else { 550 orientationY[0] = mOrientationY.GetValue(); 551 } 552 if (!mOrientationZ.HasSimpleValue()) { 553 mOrientationZ.GetValuesAtTime(tick, orientationZ, WEBAUDIO_BLOCK_SIZE); 554 } else { 555 orientationZ[0] = mOrientationZ.GetValue(); 556 } 557 558 float buffer[3 * WEBAUDIO_BLOCK_SIZE + 4]; 559 alignas(16) bool onLeft[WEBAUDIO_BLOCK_SIZE]; 560 561 float* alignedPanningL = ALIGNED16(buffer); 562 float* alignedPanningR = alignedPanningL + WEBAUDIO_BLOCK_SIZE; 563 float* alignedGain = alignedPanningR + WEBAUDIO_BLOCK_SIZE; 564 ASSERT_ALIGNED16(alignedPanningL); 565 ASSERT_ALIGNED16(alignedPanningR); 566 ASSERT_ALIGNED16(alignedGain); 567 568 for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) { 569 ThreeDPoint position( 570 mPositionX.HasSimpleValue() ? positionX[0] : positionX[counter], 571 mPositionY.HasSimpleValue() ? positionY[0] : positionY[counter], 572 mPositionZ.HasSimpleValue() ? positionZ[0] : positionZ[counter]); 573 ThreeDPoint orientation( 574 mOrientationX.HasSimpleValue() ? orientationX[0] 575 : orientationX[counter], 576 mOrientationY.HasSimpleValue() ? orientationY[0] 577 : orientationY[counter], 578 mOrientationZ.HasSimpleValue() ? orientationZ[0] 579 : orientationZ[counter]); 580 if (!orientation.IsZero()) { 581 orientation.Normalize(); 582 } 583 584 ComputeAzimuthAndElevation(position, azimuth, elevation); 585 coneGain = ComputeConeGain(position, orientation); 586 587 // The following algorithm is described in the spec. 588 // Clamp azimuth in the [-90, 90] range. 589 azimuth = std::min(180.f, std::max(-180.f, azimuth)); 590 591 // Wrap around 592 if (azimuth < -90.f) { 593 azimuth = -180.f - azimuth; 594 } else if (azimuth > 90) { 595 azimuth = 180.f - azimuth; 596 } 597 598 // Normalize the value in the [0, 1] range. 599 if (inputChannels == 1) { 600 normalizedAzimuth = (azimuth + 90.f) / 180.f; 601 } else { 602 if (azimuth <= 0) { 603 normalizedAzimuth = (azimuth + 90.f) / 90.f; 604 } else { 605 normalizedAzimuth = azimuth / 90.f; 606 } 607 } 608 609 distanceGain = ComputeDistanceGain(position); 610 611 // Actually compute the left and right gain. 612 float gainL = fdlibm_cos(0.5 * M_PI * normalizedAzimuth); 613 float gainR = fdlibm_sin(0.5 * M_PI * normalizedAzimuth); 614 615 alignedPanningL[counter] = gainL; 616 alignedPanningR[counter] = gainR; 617 alignedGain[counter] = distanceGain * coneGain; 618 onLeft[counter] = azimuth <= 0; 619 } 620 621 // Apply the panning to the output buffer 622 ApplyStereoPanning(aInput, aOutput, alignedPanningL, alignedPanningR, 623 onLeft); 624 625 // Apply the input volume, cone and distance gain to the output buffer. 626 float* outputL = aOutput->ChannelFloatsForWrite(0); 627 float* outputR = aOutput->ChannelFloatsForWrite(1); 628 AudioBlockInPlaceScale(outputL, alignedGain); 629 AudioBlockInPlaceScale(outputR, alignedGain); 630 } 631 } 632 633 // This algorithm is specified in the webaudio spec. 634 void PannerNodeEngine::ComputeAzimuthAndElevation(const ThreeDPoint& position, 635 float& aAzimuth, 636 float& aElevation) { 637 ThreeDPoint sourceListener = position - mListenerEngine->Position(); 638 if (sourceListener.IsZero()) { 639 aAzimuth = 0.0; 640 aElevation = 0.0; 641 return; 642 } 643 644 sourceListener.Normalize(); 645 646 // Project the source-listener vector on the x-z plane. 647 const ThreeDPoint& listenerFront = mListenerEngine->FrontVector(); 648 const ThreeDPoint& listenerRight = mListenerEngine->RightVector(); 649 ThreeDPoint up = listenerRight.CrossProduct(listenerFront); 650 651 double upProjection = sourceListener.DotProduct(up); 652 aElevation = 90 - 180 * fdlibm_acos(upProjection) / M_PI; 653 654 if (aElevation > 90) { 655 aElevation = 180 - aElevation; 656 } else if (aElevation < -90) { 657 aElevation = -180 - aElevation; 658 } 659 660 ThreeDPoint projectedSource = sourceListener - up * upProjection; 661 if (projectedSource.IsZero()) { 662 // source - listener direction is up or down. 663 aAzimuth = 0.0; 664 return; 665 } 666 projectedSource.Normalize(); 667 668 // Actually compute the angle, and convert to degrees 669 double projection = projectedSource.DotProduct(listenerRight); 670 aAzimuth = 180 * fdlibm_acos(projection) / M_PI; 671 672 // Compute whether the source is in front or behind the listener. 673 double frontBack = projectedSource.DotProduct(listenerFront); 674 if (frontBack < 0) { 675 aAzimuth = 360 - aAzimuth; 676 } 677 // Rotate the azimuth so it is relative to the listener front vector instead 678 // of the right vector. 679 if ((aAzimuth >= 0) && (aAzimuth <= 270)) { 680 aAzimuth = 90 - aAzimuth; 681 } else { 682 aAzimuth = 450 - aAzimuth; 683 } 684 } 685 686 // This algorithm is described in the WebAudio spec. 687 float PannerNodeEngine::ComputeConeGain(const ThreeDPoint& position, 688 const ThreeDPoint& orientation) { 689 // Omnidirectional source 690 if (orientation.IsZero() || 691 ((mConeInnerAngle == 360) && (mConeOuterAngle == 360))) { 692 return 1; 693 } 694 695 // Normalized source-listener vector 696 ThreeDPoint sourceToListener = mListenerEngine->Position() - position; 697 sourceToListener.Normalize(); 698 699 // Angle between the source orientation vector and the source-listener vector 700 double dotProduct = sourceToListener.DotProduct(orientation); 701 double angle = 180 * fdlibm_acos(dotProduct) / M_PI; 702 double absAngle = fabs(angle); 703 704 // Divide by 2 here since API is entire angle (not half-angle) 705 double absInnerAngle = fabs(mConeInnerAngle) / 2; 706 double absOuterAngle = fabs(mConeOuterAngle) / 2; 707 double gain = 1; 708 709 if (absAngle <= absInnerAngle) { 710 // No attenuation 711 gain = 1; 712 } else if (absAngle >= absOuterAngle) { 713 // Max attenuation 714 gain = mConeOuterGain; 715 } else { 716 // Between inner and outer cones 717 // inner -> outer, x goes from 0 -> 1 718 double x = (absAngle - absInnerAngle) / (absOuterAngle - absInnerAngle); 719 gain = (1 - x) + mConeOuterGain * x; 720 } 721 722 return gain; 723 } 724 725 double PannerNodeEngine::ComputeDistanceGain(const ThreeDPoint& position) { 726 ThreeDPoint distanceVec = position - mListenerEngine->Position(); 727 float distance = sqrt(distanceVec.DotProduct(distanceVec)); 728 return std::max(0.0f, (this->*mDistanceModelFunction)(distance)); 729 } 730 731 } // namespace mozilla::dom