AudioNodeExternalInputTrack.cpp (7970B)
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ 2 /* This Source Code Form is subject to the terms of the Mozilla Public 3 * License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 * You can obtain one at http://mozilla.org/MPL/2.0/. */ 5 6 #include "AudioNodeExternalInputTrack.h" 7 8 #include "AlignedTArray.h" 9 #include "AlignmentUtils.h" 10 #include "AudioChannelFormat.h" 11 #include "AudioNodeEngine.h" 12 #include "mozilla/dom/MediaStreamAudioSourceNode.h" 13 14 using namespace mozilla::dom; 15 16 namespace mozilla { 17 18 AudioNodeExternalInputTrack::AudioNodeExternalInputTrack( 19 AudioNodeEngine* aEngine, TrackRate aSampleRate) 20 : AudioNodeTrack(aEngine, NO_TRACK_FLAGS, aSampleRate) { 21 MOZ_COUNT_CTOR(AudioNodeExternalInputTrack); 22 } 23 24 AudioNodeExternalInputTrack::~AudioNodeExternalInputTrack() { 25 MOZ_COUNT_DTOR(AudioNodeExternalInputTrack); 26 } 27 28 /* static */ 29 already_AddRefed<AudioNodeExternalInputTrack> 30 AudioNodeExternalInputTrack::Create(MediaTrackGraph* aGraph, 31 AudioNodeEngine* aEngine) { 32 AudioContext* ctx = aEngine->NodeMainThread()->Context(); 33 MOZ_ASSERT(NS_IsMainThread()); 34 MOZ_ASSERT(aGraph == ctx->Graph()); 35 36 RefPtr<AudioNodeExternalInputTrack> track = 37 new AudioNodeExternalInputTrack(aEngine, aGraph->GraphRate()); 38 track->mSuspendedCount += ctx->ShouldSuspendNewTrack(); 39 aGraph->AddTrack(track); 40 return track.forget(); 41 } 42 43 /** 44 * Copies the data in aInput to aOffsetInBlock within aBlock. 45 * aBlock must have been allocated with AllocateInputBlock and have a channel 46 * count that's a superset of the channels in aInput. 47 */ 48 template <typename T> 49 static void CopyChunkToBlock(AudioChunk& aInput, AudioBlock* aBlock, 50 uint32_t aOffsetInBlock) { 51 uint32_t blockChannels = aBlock->ChannelCount(); 52 AutoTArray<const T*, 2> channels; 53 if (aInput.IsNull()) { 54 channels.SetLength(blockChannels); 55 PodZero(channels.Elements(), blockChannels); 56 } else { 57 Span inputChannels = aInput.ChannelData<T>(); 58 channels.SetLength(inputChannels.Length()); 59 PodCopy(channels.Elements(), inputChannels.Elements(), channels.Length()); 60 if (channels.Length() != blockChannels) { 61 // We only need to upmix here because aBlock's channel count has been 62 // chosen to be a superset of the channel count of every chunk. 63 AudioChannelsUpMix(&channels, blockChannels, static_cast<T*>(nullptr)); 64 } 65 } 66 67 for (uint32_t c = 0; c < blockChannels; ++c) { 68 float* outputData = aBlock->ChannelFloatsForWrite(c) + aOffsetInBlock; 69 if (channels[c]) { 70 ConvertAudioSamplesWithScale(channels[c], outputData, 71 aInput.GetDuration(), aInput.mVolume); 72 } else { 73 PodZero(outputData, aInput.GetDuration()); 74 } 75 } 76 } 77 78 /** 79 * Converts the data in aSegment to a single chunk aBlock. aSegment must have 80 * duration WEBAUDIO_BLOCK_SIZE. aFallbackChannelCount is a superset of the 81 * channels in every chunk of aSegment. aBlock must be float format or null. 82 */ 83 static void ConvertSegmentToAudioBlock(AudioSegment* aSegment, 84 AudioBlock* aBlock, 85 int32_t aFallbackChannelCount) { 86 NS_ASSERTION(aSegment->GetDuration() == WEBAUDIO_BLOCK_SIZE, 87 "Bad segment duration"); 88 89 { 90 AudioSegment::ChunkIterator ci(*aSegment); 91 NS_ASSERTION(!ci.IsEnded(), "Should be at least one chunk!"); 92 if (ci->GetDuration() == WEBAUDIO_BLOCK_SIZE && 93 (ci->IsNull() || ci->mBufferFormat == AUDIO_FORMAT_FLOAT32)) { 94 bool aligned = true; 95 for (size_t i = 0; i < ci->mChannelData.Length(); ++i) { 96 if (!IS_ALIGNED16(ci->mChannelData[i])) { 97 aligned = false; 98 break; 99 } 100 } 101 102 // Return this chunk directly to avoid copying data. 103 if (aligned) { 104 *aBlock = *ci; 105 return; 106 } 107 } 108 } 109 110 aBlock->AllocateChannels(aFallbackChannelCount); 111 112 uint32_t duration = 0; 113 for (AudioSegment::ChunkIterator ci(*aSegment); !ci.IsEnded(); ci.Next()) { 114 switch (ci->mBufferFormat) { 115 case AUDIO_FORMAT_S16: { 116 CopyChunkToBlock<int16_t>(*ci, aBlock, duration); 117 break; 118 } 119 case AUDIO_FORMAT_FLOAT32: { 120 CopyChunkToBlock<float>(*ci, aBlock, duration); 121 break; 122 } 123 case AUDIO_FORMAT_SILENCE: { 124 // The actual type of the sample does not matter here, but we still need 125 // to send some audio to the graph. 126 CopyChunkToBlock<float>(*ci, aBlock, duration); 127 break; 128 } 129 } 130 duration += ci->GetDuration(); 131 } 132 } 133 134 void AudioNodeExternalInputTrack::ProcessInput(GraphTime aFrom, GraphTime aTo, 135 uint32_t aFlags) { 136 // According to spec, number of outputs is always 1. 137 MOZ_ASSERT(mLastChunks.Length() == 1); 138 139 // GC stuff can result in our input track being destroyed before this track. 140 // Handle that. 141 if (!IsEnabled() || mInputs.IsEmpty() || mPassThrough) { 142 mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); 143 return; 144 } 145 146 MOZ_ASSERT(mInputs.Length() == 1); 147 148 MediaTrack* source = mInputs[0]->GetSource(); 149 AutoTArray<AudioSegment, 1> audioSegments; 150 uint32_t inputChannels = 0; 151 152 MOZ_ASSERT(source->GetData()->GetType() == MediaSegment::AUDIO, 153 "AudioNodeExternalInputTrack shouldn't have a video input"); 154 155 const AudioSegment& inputSegment = 156 *mInputs[0]->GetSource()->GetData<AudioSegment>(); 157 if (!inputSegment.IsNull()) { 158 AudioSegment& segment = *audioSegments.AppendElement(); 159 GraphTime next; 160 for (GraphTime t = aFrom; t < aTo; t = next) { 161 MediaInputPort::InputInterval interval = 162 MediaInputPort::GetNextInputInterval(mInputs[0], t); 163 interval.mEnd = std::min(interval.mEnd, aTo); 164 if (interval.mStart >= interval.mEnd) { 165 break; 166 } 167 next = interval.mEnd; 168 169 // We know this track does not block during the processing interval --- 170 // we're not finished, we don't underrun, and we're not suspended. 171 TrackTime outputStart = GraphTimeToTrackTime(interval.mStart); 172 TrackTime outputEnd = GraphTimeToTrackTime(interval.mEnd); 173 TrackTime ticks = outputEnd - outputStart; 174 175 if (interval.mInputIsBlocked) { 176 segment.AppendNullData(ticks); 177 } else { 178 // The input track is not blocked in this interval, so no need to call 179 // GraphTimeToTrackTimeWithBlocking. 180 TrackTime inputStart = 181 std::min(inputSegment.GetDuration(), 182 source->GraphTimeToTrackTime(interval.mStart)); 183 TrackTime inputEnd = 184 std::min(inputSegment.GetDuration(), 185 source->GraphTimeToTrackTime(interval.mEnd)); 186 187 segment.AppendSlice(inputSegment, inputStart, inputEnd); 188 // Pad if we're looking past the end of the track 189 segment.AppendNullData(ticks - (inputEnd - inputStart)); 190 } 191 } 192 193 for (AudioSegment::ChunkIterator iter(segment); !iter.IsEnded(); 194 iter.Next()) { 195 inputChannels = 196 GetAudioChannelsSuperset(inputChannels, iter->ChannelCount()); 197 } 198 } 199 200 uint32_t accumulateIndex = 0; 201 if (inputChannels) { 202 DownmixBufferType downmixBuffer; 203 ASSERT_ALIGNED16(downmixBuffer.Elements()); 204 for (auto& audioSegment : audioSegments) { 205 AudioBlock tmpChunk; 206 ConvertSegmentToAudioBlock(&audioSegment, &tmpChunk, inputChannels); 207 if (!tmpChunk.IsNull()) { 208 if (accumulateIndex == 0) { 209 mLastChunks[0].AllocateChannels(inputChannels); 210 } 211 AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], 212 &downmixBuffer); 213 accumulateIndex++; 214 } 215 } 216 } 217 if (accumulateIndex == 0) { 218 mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); 219 } 220 } 221 222 bool AudioNodeExternalInputTrack::IsEnabled() { 223 return ((MediaStreamAudioSourceNodeEngine*)Engine())->IsEnabled(); 224 } 225 226 } // namespace mozilla