tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

FFmpegAudioDecoder.cpp (20118B)


      1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
      2 /* vim:set ts=2 sw=2 sts=2 et cindent: */
      3 /* This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "FFmpegAudioDecoder.h"
      8 
      9 #include "AudioSampleFormat.h"
     10 #include "BufferReader.h"
     11 #include "FFmpegLog.h"
     12 #include "FFmpegUtils.h"
     13 #include "TimeUnits.h"
     14 #include "VideoUtils.h"
     15 #include "libavutil/dict.h"
     16 #include "libavutil/samplefmt.h"
     17 #if defined(FFVPX_VERSION)
     18 #  include "libavutil/channel_layout.h"
     19 #endif
     20 #include "mozilla/StaticPrefs_media.h"
     21 
     22 #ifdef MOZ_WIDGET_ANDROID
     23 #  include "ffvpx/hwcontext_mediacodec.h"
     24 #  include "ffvpx/mediacodec.h"
     25 #endif
     26 
     27 namespace mozilla {
     28 
     29 using TimeUnit = media::TimeUnit;
     30 
     31 FFmpegAudioDecoder<LIBAV_VER>::FFmpegAudioDecoder(
     32    const FFmpegLibWrapper* aLib, const CreateDecoderParams& aDecoderParams)
     33    : FFmpegDataDecoder(aLib,
     34                        GetCodecId(aDecoderParams.AudioConfig().mMimeType,
     35                                   aDecoderParams.AudioConfig()),
     36                        aDecoderParams.mCDM),
     37      mAudioInfo(aDecoderParams.AudioConfig()) {
     38  MOZ_COUNT_CTOR(FFmpegAudioDecoder);
     39 
     40  if (mCodecID == AV_CODEC_ID_AAC &&
     41      mAudioInfo.mCodecSpecificConfig.is<AacCodecSpecificData>()) {
     42    const AacCodecSpecificData& aacCodecSpecificData =
     43        mAudioInfo.mCodecSpecificConfig.as<AacCodecSpecificData>();
     44    mExtraData = new MediaByteBuffer;
     45    // Ffmpeg expects the DecoderConfigDescriptor blob.
     46    mExtraData->AppendElements(
     47        *aacCodecSpecificData.mDecoderConfigDescriptorBinaryBlob);
     48    FFMPEG_LOG("FFmpegAudioDecoder ctor (aac)");
     49    return;
     50  }
     51 
     52  if (mCodecID == AV_CODEC_ID_MP3) {
     53    // Nothing to do
     54    return;
     55  }
     56 
     57  if (mCodecID == AV_CODEC_ID_FLAC) {
     58    // Gracefully handle bad data. If don't hit the preceding assert once this
     59    // has been shipped for awhile, we can remove it and make the following code
     60    // non-conditional.
     61    if (mAudioInfo.mCodecSpecificConfig.is<FlacCodecSpecificData>()) {
     62      const FlacCodecSpecificData& flacCodecSpecificData =
     63          mAudioInfo.mCodecSpecificConfig.as<FlacCodecSpecificData>();
     64      if (flacCodecSpecificData.mStreamInfoBinaryBlob->IsEmpty()) {
     65        // Flac files without headers will be missing stream info. In this case
     66        // we don't want to feed ffmpeg empty extra data as it will fail, just
     67        // early return.
     68        return;
     69      }
     70      // Use a new MediaByteBuffer as the object will be modified during
     71      // initialization.
     72      mExtraData = new MediaByteBuffer;
     73      mExtraData->AppendElements(*flacCodecSpecificData.mStreamInfoBinaryBlob);
     74      return;
     75    }
     76  }
     77 
     78  // Vorbis, Opus are handled by this case, as well as any codec that has
     79  // non-tagged variant, because the data comes from Web Codecs.
     80  RefPtr<MediaByteBuffer> audioCodecSpecificBinaryBlob =
     81      GetAudioCodecSpecificBlob(mAudioInfo.mCodecSpecificConfig);
     82  if (audioCodecSpecificBinaryBlob && audioCodecSpecificBinaryBlob->Length()) {
     83    // Use a new MediaByteBuffer as the object will be modified during
     84    // initialization.
     85    mExtraData = new MediaByteBuffer;
     86    mExtraData->AppendElements(*audioCodecSpecificBinaryBlob);
     87  }
     88 
     89  if (mCodecID == AV_CODEC_ID_OPUS) {
     90    mDefaultPlaybackDeviceMono = aDecoderParams.mOptions.contains(
     91        CreateDecoderParams::Option::DefaultPlaybackDeviceMono);
     92  }
     93 }
     94 
     95 RefPtr<MediaDataDecoder::InitPromise> FFmpegAudioDecoder<LIBAV_VER>::Init() {
     96  AUTO_PROFILER_LABEL("FFmpegAudioDecoder::Init", MEDIA_PLAYBACK);
     97  AVDictionary* options = nullptr;
     98  if (mCodecID == AV_CODEC_ID_OPUS) {
     99    // Opus has a special feature for stereo coding where it represent wide
    100    // stereo channels by 180-degree out of phase. This improves quality, but
    101    // needs to be disabled when the output is downmixed to mono. Playback
    102    // number of channels are set in AudioSink, using the same method
    103    // `DecideAudioPlaybackChannels()`, and triggers downmix if needed.
    104    if (mDefaultPlaybackDeviceMono ||
    105        DecideAudioPlaybackChannels(mAudioInfo) == 1) {
    106      mLib->av_dict_set(&options, "apply_phase_inv", "false", 0);
    107    }
    108    // extradata is required for Opus when the number of channels is > 2.
    109    // FFmpeg will happily (but incorrectly) initialize a decoder without a
    110    // description, but it will have only two channels.
    111    if (mAudioInfo.mChannels > 2 &&
    112        (!mExtraData || mExtraData->Length() < 10)) {
    113      FFMPEG_LOG(
    114          "Cannot initialize decoder with %d channels without extradata of at "
    115          "least 10 bytes",
    116          mAudioInfo.mChannels);
    117      return InitPromise::CreateAndReject(NS_ERROR_FAILURE, __func__);
    118    }
    119  }
    120 
    121  MediaResult rv(NS_ERROR_NOT_AVAILABLE);
    122 #if defined(MOZ_WIDGET_ANDROID) && defined(USING_MOZFFVPX)
    123  if (XRE_IsRDDProcess() || XRE_IsUtilityProcess()) {
    124    AVCodec* codec = FindHardwareAVCodec(mLib, mCodecID, AV_HWDEVICE_TYPE_NONE);
    125    if (codec) {
    126      rv = InitDecoder(codec, &options);
    127    }
    128  }
    129 
    130  if (NS_FAILED(rv))
    131 #endif
    132  {
    133    rv = InitSWDecoder(&options);
    134  }
    135 
    136  mLib->av_dict_free(&options);
    137 
    138  return NS_SUCCEEDED(rv)
    139             ? InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__)
    140             : InitPromise::CreateAndReject(rv, __func__);
    141 }
    142 
    143 void FFmpegAudioDecoder<LIBAV_VER>::InitCodecContext() {
    144  MOZ_ASSERT(mCodecContext);
    145  // We do not want to set this value to 0 as FFmpeg by default will
    146  // use the number of cores, which with our mozlibavutil get_cpu_count
    147  // isn't implemented.
    148  mCodecContext->thread_count = 1;
    149  // FFmpeg takes this as a suggestion for what format to use for audio samples.
    150  // LibAV 0.8 produces rubbish float interleaved samples, request 16 bits
    151  // audio.
    152  mCodecContext->request_sample_fmt =
    153      (mLib->mVersion == 53) ? AV_SAMPLE_FMT_S16 : AV_SAMPLE_FMT_FLT;
    154 #ifdef FFVPX_VERSION
    155  // AudioInfo's layout first 32-bits are bit-per-bit compatible with
    156  // WAVEFORMATEXTENSIBLE and FFmpeg's AVChannel enum. We can cast here.
    157  mCodecContext->ch_layout.nb_channels =
    158      AssertedCast<int>(mAudioInfo.mChannels);
    159  if (mAudioInfo.mChannelMap != AudioConfig::ChannelLayout::UNKNOWN_MAP) {
    160    mLib->av_channel_layout_from_mask(
    161        &mCodecContext->ch_layout,
    162        AssertedCast<uint64_t>(mAudioInfo.mChannelMap));
    163  } else {
    164    mLib->av_channel_layout_default(&mCodecContext->ch_layout,
    165                                    AssertedCast<int>(mAudioInfo.mChannels));
    166  }
    167  mCodecContext->sample_rate = AssertedCast<int>(mAudioInfo.mRate);
    168 #endif
    169 }
    170 
    171 static AlignedAudioBuffer CopyAndPackAudio(AVFrame* aFrame,
    172                                           uint32_t aNumChannels,
    173                                           uint32_t aNumAFrames) {
    174  AlignedAudioBuffer audio(aNumChannels * aNumAFrames);
    175  if (!audio) {
    176    return audio;
    177  }
    178 
    179  if (aFrame->format == AV_SAMPLE_FMT_FLT) {
    180    // Audio data already packed. No need to do anything other than copy it
    181    // into a buffer we own.
    182    memcpy(audio.get(), aFrame->data[0],
    183           aNumChannels * aNumAFrames * sizeof(AudioDataValue));
    184  } else if (aFrame->format == AV_SAMPLE_FMT_FLTP) {
    185    // Planar audio data. Pack it into something we can understand.
    186    AudioDataValue* tmp = audio.get();
    187    AudioDataValue** data = reinterpret_cast<AudioDataValue**>(aFrame->data);
    188    for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
    189      for (uint32_t channel = 0; channel < aNumChannels; channel++) {
    190        *tmp++ = data[channel][frame];
    191      }
    192    }
    193  } else if (aFrame->format == AV_SAMPLE_FMT_S16) {
    194    // Audio data already packed. Need to convert from S16 to 32 bits Float
    195    AudioDataValue* tmp = audio.get();
    196    int16_t* data = reinterpret_cast<int16_t**>(aFrame->data)[0];
    197    for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
    198      for (uint32_t channel = 0; channel < aNumChannels; channel++) {
    199        *tmp++ = ConvertAudioSample<float>(*data++);
    200      }
    201    }
    202  } else if (aFrame->format == AV_SAMPLE_FMT_S16P) {
    203    // Planar audio data. Convert it from S16 to 32 bits float
    204    // and pack it into something we can understand.
    205    AudioDataValue* tmp = audio.get();
    206    int16_t** data = reinterpret_cast<int16_t**>(aFrame->data);
    207    for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
    208      for (uint32_t channel = 0; channel < aNumChannels; channel++) {
    209        *tmp++ = ConvertAudioSample<float>(data[channel][frame]);
    210      }
    211    }
    212  } else if (aFrame->format == AV_SAMPLE_FMT_S32) {
    213    // Audio data already packed. Need to convert from S16 to 32 bits Float
    214    AudioDataValue* tmp = audio.get();
    215    int32_t* data = reinterpret_cast<int32_t**>(aFrame->data)[0];
    216    for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
    217      for (uint32_t channel = 0; channel < aNumChannels; channel++) {
    218        *tmp++ = ConvertAudioSample<float>(*data++);
    219      }
    220    }
    221  } else if (aFrame->format == AV_SAMPLE_FMT_S32P) {
    222    // Planar audio data. Convert it from S32 to 32 bits float
    223    // and pack it into something we can understand.
    224    AudioDataValue* tmp = audio.get();
    225    int32_t** data = reinterpret_cast<int32_t**>(aFrame->data);
    226    for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
    227      for (uint32_t channel = 0; channel < aNumChannels; channel++) {
    228        *tmp++ = ConvertAudioSample<float>(data[channel][frame]);
    229      }
    230    }
    231  } else if (aFrame->format == AV_SAMPLE_FMT_U8) {
    232    // Interleaved audio data. Convert it from u8 to the expected sample-format
    233    AudioDataValue* tmp = audio.get();
    234    uint8_t* data = reinterpret_cast<uint8_t**>(aFrame->data)[0];
    235    for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
    236      for (uint32_t channel = 0; channel < aNumChannels; channel++) {
    237        *tmp++ = ConvertAudioSample<float>(*data++);
    238      }
    239    }
    240  } else if (aFrame->format == AV_SAMPLE_FMT_U8P) {
    241    // Planar audio data. Convert it from u8 to the expected sample-format
    242    // and pack it into something we can understand.
    243    AudioDataValue* tmp = audio.get();
    244    uint8_t** data = reinterpret_cast<uint8_t**>(aFrame->data);
    245    for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
    246      for (uint32_t channel = 0; channel < aNumChannels; channel++) {
    247        *tmp++ = ConvertAudioSample<float>(data[channel][frame]);
    248      }
    249    }
    250  }
    251 
    252  return audio;
    253 }
    254 
    255 using ChannelLayout = AudioConfig::ChannelLayout;
    256 
    257 MediaResult FFmpegAudioDecoder<LIBAV_VER>::PostProcessOutput(
    258    bool aDecoded, MediaRawData* aSample, DecodedData& aResults,
    259    bool* aGotFrame, int32_t aSubmitted) {
    260  media::TimeUnit pts = aSample->mTime;
    261 
    262  if (mFrame->format != AV_SAMPLE_FMT_FLT &&
    263      mFrame->format != AV_SAMPLE_FMT_FLTP &&
    264      mFrame->format != AV_SAMPLE_FMT_S16 &&
    265      mFrame->format != AV_SAMPLE_FMT_S16P &&
    266      mFrame->format != AV_SAMPLE_FMT_S32 &&
    267      mFrame->format != AV_SAMPLE_FMT_S32P &&
    268      mFrame->format != AV_SAMPLE_FMT_U8 &&
    269      mFrame->format != AV_SAMPLE_FMT_U8P) {
    270    return MediaResult(
    271        NS_ERROR_DOM_MEDIA_DECODE_ERR,
    272        RESULT_DETAIL("FFmpeg audio decoder outputs unsupported audio format"));
    273  }
    274 
    275  if (aSubmitted < 0) {
    276    FFMPEG_LOG("Got %d more frame from packet", mFrame->nb_samples);
    277  }
    278 
    279  FFMPEG_LOG("FFmpegAudioDecoder decoded: [%s,%s] (Duration: %s) [%s]",
    280             aSample->mTime.ToString().get(),
    281             aSample->GetEndTime().ToString().get(),
    282             aSample->mDuration.ToString().get(),
    283             mLib->av_get_sample_fmt_name(mFrame->format));
    284 
    285  uint32_t numChannels = ChannelCount(mCodecContext);
    286  uint32_t samplingRate = mCodecContext->sample_rate;
    287  if (!numChannels) {
    288    numChannels = mAudioInfo.mChannels;
    289  }
    290  if (!samplingRate) {
    291    samplingRate = mAudioInfo.mRate;
    292  }
    293  AlignedAudioBuffer audio =
    294      CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples);
    295  if (!audio) {
    296    FFMPEG_LOG("CopyAndPackAudio error (OOM)");
    297    return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
    298  }
    299 
    300  media::TimeUnit duration = TimeUnit(mFrame->nb_samples, samplingRate);
    301  if (!duration.IsValid()) {
    302    FFMPEG_LOG("Duration isn't valid (%d + %d)", mFrame->nb_samples,
    303               samplingRate);
    304    return MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
    305                       RESULT_DETAIL("Invalid sample duration"));
    306  }
    307 
    308  media::TimeUnit newpts = pts + duration;
    309  if (!newpts.IsValid()) {
    310    FFMPEG_LOG("New pts isn't valid (%lf + %lf)", pts.ToSeconds(),
    311               duration.ToSeconds());
    312    return MediaResult(
    313        NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
    314        RESULT_DETAIL("Invalid count of accumulated audio samples"));
    315  }
    316 
    317  RefPtr<AudioData> data =
    318      new AudioData(aSample->mOffset, pts, std::move(audio), numChannels,
    319                    samplingRate, mAudioInfo.mChannelMap);
    320  MOZ_ASSERT(duration == data->mDuration, "must be equal");
    321  aResults.AppendElement(std::move(data));
    322 
    323  pts = newpts;
    324 
    325  if (aGotFrame) {
    326    *aGotFrame = true;
    327  }
    328  return NS_OK;
    329 }
    330 
    331 #if LIBAVCODEC_VERSION_MAJOR < 59
    332 MediaResult FFmpegAudioDecoder<LIBAV_VER>::DecodeUsingFFmpeg(
    333    AVPacket* aPacket, bool& aDecoded, MediaRawData* aSample,
    334    DecodedData& aResults, bool* aGotFrame) {
    335  int decoded = 0;
    336  int rv =
    337      mLib->avcodec_decode_audio4(mCodecContext, mFrame, &decoded, aPacket);
    338  aDecoded = decoded == 1;
    339  if (rv < 0) {
    340    NS_WARNING("FFmpeg audio decoder error.");
    341    return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
    342                       RESULT_DETAIL("FFmpeg audio error"));
    343  }
    344  PostProcessOutput(decoded, aSample, aResults, aGotFrame, 0);
    345  return NS_OK;
    346 }
    347 #else
    348 #  define AVRESULT_OK 0
    349 
    350 MediaResult FFmpegAudioDecoder<LIBAV_VER>::DecodeUsingFFmpeg(
    351    AVPacket* aPacket, bool& aDecoded, MediaRawData* aSample,
    352    DecodedData& aResults, bool* aGotFrame) {
    353  // This in increment whenever avcodec_send_packet succeeds, and decremented
    354  // whenever avcodec_receive_frame succeeds. Because it is possible to have
    355  // multiple AVFrames from a single AVPacket, this number can be negative.
    356  // This is used to ensure that pts and duration are correctly set on the
    357  // resulting audio buffers.
    358  int32_t submitted = 0;
    359  int ret = mLib->avcodec_send_packet(mCodecContext, aPacket);
    360  switch (ret) {
    361    case AVRESULT_OK:
    362      submitted++;
    363      break;
    364    case AVERROR(EAGAIN):
    365      FFMPEG_LOG("  av_codec_send_packet: EAGAIN.");
    366      MOZ_ASSERT(false, "EAGAIN");
    367      break;
    368    case AVERROR_EOF:
    369      FFMPEG_LOG("  End of stream.");
    370      return MediaResult(NS_ERROR_DOM_MEDIA_END_OF_STREAM,
    371                         RESULT_DETAIL("End of stream"));
    372    default:
    373      NS_WARNING("FFmpeg audio decoder error (avcodec_send_packet).");
    374      return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
    375                         RESULT_DETAIL("FFmpeg audio error"));
    376  }
    377 
    378  MediaResult rv;
    379 
    380  while (ret == 0) {
    381    aDecoded = false;
    382    ret = mLib->avcodec_receive_frame(mCodecContext, mFrame);
    383    switch (ret) {
    384      case AVRESULT_OK:
    385        aDecoded = true;
    386        submitted--;
    387        if (submitted < 0) {
    388          FFMPEG_LOG("Multiple AVFrame from a single AVPacket");
    389        }
    390        break;
    391      case AVERROR(EAGAIN): {
    392        // Quirk of the vorbis decoder -- the first packet doesn't return audio.
    393        if (submitted == 1 && mCodecID == AV_CODEC_ID_VORBIS) {
    394          AlignedAudioBuffer buf;
    395          aResults.AppendElement(
    396              new AudioData(0, TimeUnit::Zero(), std::move(buf),
    397                            mAudioInfo.mChannels, mAudioInfo.mRate));
    398        }
    399        FFMPEG_LOG("  EAGAIN (packets submitted: %" PRIu32 ").", submitted);
    400        rv = NS_OK;
    401        break;
    402      }
    403      case AVERROR_EOF: {
    404        FFMPEG_LOG("  End of stream.");
    405        rv = MediaResult(NS_ERROR_DOM_MEDIA_END_OF_STREAM,
    406                         RESULT_DETAIL("End of stream"));
    407        break;
    408      }
    409      default:
    410        FFMPEG_LOG("  avcodec_receive_packet error.");
    411        NS_WARNING("FFmpeg audio decoder error (avcodec_receive_packet).");
    412        rv = MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
    413                         RESULT_DETAIL("FFmpeg audio error"));
    414    }
    415    if (aDecoded) {
    416      PostProcessOutput(aDecoded, aSample, aResults, aGotFrame, submitted);
    417    }
    418  }
    419 
    420  return NS_OK;
    421 }
    422 #endif
    423 
    424 MediaResult FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
    425                                                    uint8_t* aData, int aSize,
    426                                                    bool* aGotFrame,
    427                                                    DecodedData& aResults) {
    428  MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
    429  PROCESS_DECODE_LOG(aSample);
    430  AVPacket* packet;
    431 #if LIBAVCODEC_VERSION_MAJOR >= 61
    432  packet = mLib->av_packet_alloc();
    433  auto freePacket = MakeScopeExit([&] { mLib->av_packet_free(&packet); });
    434 #else
    435  AVPacket packet_mem;
    436  packet = &packet_mem;
    437  mLib->av_init_packet(packet);
    438 #endif
    439 
    440 #if defined(MOZ_WIDGET_ANDROID) && defined(USING_MOZFFVPX)
    441  MediaResult ret = MaybeAttachCryptoInfo(aSample, packet);
    442  if (NS_FAILED(ret)) {
    443    return ret;
    444  }
    445 #endif
    446 
    447  FFMPEG_LOG("FFmpegAudioDecoder::DoDecode: %d bytes, [%s,%s] (Duration: %s)",
    448             aSize, aSample->mTime.ToString().get(),
    449             aSample->GetEndTime().ToString().get(),
    450             aSample->mDuration.ToString().get());
    451 
    452  packet->data = const_cast<uint8_t*>(aData);
    453  packet->size = aSize;
    454  packet->pts = aSample->mTime.ToMicroseconds();
    455 
    456  if (aGotFrame) {
    457    *aGotFrame = false;
    458  }
    459 
    460  if (!PrepareFrame()) {
    461    FFMPEG_LOG("FFmpegAudioDecoder: OOM in PrepareFrame");
    462    return MediaResult(
    463        NS_ERROR_OUT_OF_MEMORY,
    464        RESULT_DETAIL("FFmpeg audio decoder failed to allocate frame"));
    465  }
    466 
    467  bool decoded = false;
    468  auto rv = DecodeUsingFFmpeg(packet, decoded, aSample, aResults, aGotFrame);
    469  NS_ENSURE_SUCCESS(rv, rv);
    470 
    471  return NS_OK;
    472 }
    473 
    474 AVCodecID FFmpegAudioDecoder<LIBAV_VER>::GetCodecId(const nsACString& aMimeType,
    475                                                    const AudioInfo& aInfo) {
    476  if (aMimeType.EqualsLiteral("audio/mp4a-latm")) {
    477    return AV_CODEC_ID_AAC;
    478  }
    479 #ifdef FFVPX_VERSION
    480  if (aMimeType.EqualsLiteral("audio/mpeg")) {
    481    return AV_CODEC_ID_MP3;
    482  }
    483  if (aMimeType.EqualsLiteral("audio/flac")) {
    484    return AV_CODEC_ID_FLAC;
    485  }
    486  if (aMimeType.EqualsLiteral("audio/vorbis")) {
    487    return AV_CODEC_ID_VORBIS;
    488  }
    489  if (aMimeType.EqualsLiteral("audio/opus")) {
    490    return AV_CODEC_ID_OPUS;
    491  }
    492  if (aMimeType.Find("wav") != kNotFound) {
    493    if (aMimeType.EqualsLiteral("audio/x-wav") ||
    494        aMimeType.EqualsLiteral("audio/wave; codecs=1") ||
    495        aMimeType.EqualsLiteral("audio/wave; codecs=65534")) {
    496      // find the pcm format
    497      switch (aInfo.mBitDepth) {
    498        case 8:
    499          return AV_CODEC_ID_PCM_U8;
    500        case 16:
    501          return AV_CODEC_ID_PCM_S16LE;
    502        case 24:
    503          return AV_CODEC_ID_PCM_S24LE;
    504        case 32:
    505          return AV_CODEC_ID_PCM_S32LE;
    506        case 0:
    507          // ::Init will find and use the right type here, this is just
    508          // returning something that means that this media type can be decoded.
    509          // This happens when attempting to find what decoder to use for a
    510          // media type, without actually having looked at the actual
    511          // bytestream. This decoder can decode all usual PCM bytestream
    512          // anyway.
    513          return AV_CODEC_ID_PCM_S16LE;
    514        default:
    515          return AV_CODEC_ID_NONE;
    516      };
    517    }
    518    if (aMimeType.EqualsLiteral("audio/wave; codecs=3")) {
    519      return AV_CODEC_ID_PCM_F32LE;
    520    }
    521    // A-law
    522    if (aMimeType.EqualsLiteral("audio/wave; codecs=6")) {
    523      return AV_CODEC_ID_PCM_ALAW;
    524    }
    525    // Mu-law
    526    if (aMimeType.EqualsLiteral("audio/wave; codecs=7")) {
    527      return AV_CODEC_ID_PCM_MULAW;
    528    }
    529  }
    530 #endif
    531 
    532  return AV_CODEC_ID_NONE;
    533 }
    534 
    535 nsCString FFmpegAudioDecoder<LIBAV_VER>::GetCodecName() const {
    536 #if LIBAVCODEC_VERSION_MAJOR > 53
    537  return nsCString(mLib->avcodec_descriptor_get(mCodecID)->name);
    538 #else
    539  return "unknown"_ns;
    540 #endif
    541 }
    542 
    543 FFmpegAudioDecoder<LIBAV_VER>::~FFmpegAudioDecoder() {
    544  MOZ_COUNT_DTOR(FFmpegAudioDecoder);
    545 }
    546 
    547 }  // namespace mozilla