video_rtp_depacketizer_vp9.cc (7958B)
1 /* 2 * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 #include "modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h" 12 13 #include <cstdint> 14 #include <cstring> 15 #include <optional> 16 #include <utility> 17 18 #include "api/array_view.h" 19 #include "api/video/video_codec_constants.h" 20 #include "api/video/video_codec_type.h" 21 #include "api/video/video_frame_type.h" 22 #include "modules/rtp_rtcp/source/rtp_video_header.h" 23 #include "modules/rtp_rtcp/source/video_rtp_depacketizer.h" 24 #include "modules/video_coding/codecs/interface/common_constants.h" 25 #include "modules/video_coding/codecs/vp9/include/vp9_globals.h" 26 #include "rtc_base/bitstream_reader.h" 27 #include "rtc_base/checks.h" 28 #include "rtc_base/copy_on_write_buffer.h" 29 30 namespace webrtc { 31 namespace { 32 33 // Picture ID: 34 // 35 // +-+-+-+-+-+-+-+-+ 36 // I: |M| PICTURE ID | M:0 => picture id is 7 bits. 37 // +-+-+-+-+-+-+-+-+ M:1 => picture id is 15 bits. 38 // M: | EXTENDED PID | 39 // +-+-+-+-+-+-+-+-+ 40 // 41 void ParsePictureId(BitstreamReader& parser, RTPVideoHeaderVP9* vp9) { 42 if (parser.ReadBit()) { // m_bit 43 vp9->picture_id = parser.ReadBits(15); 44 vp9->max_picture_id = kMaxTwoBytePictureId; 45 } else { 46 vp9->picture_id = parser.ReadBits(7); 47 vp9->max_picture_id = kMaxOneBytePictureId; 48 } 49 } 50 51 // Layer indices : 52 // 53 // +-+-+-+-+-+-+-+-+ 54 // L: | T |U| S |D| 55 // +-+-+-+-+-+-+-+-+ 56 // | TL0PICIDX | (non-flexible mode only) 57 // +-+-+-+-+-+-+-+-+ 58 // 59 void ParseLayerInfo(BitstreamReader& parser, RTPVideoHeaderVP9* vp9) { 60 vp9->temporal_idx = parser.ReadBits(3); 61 vp9->temporal_up_switch = parser.Read<bool>(); 62 vp9->spatial_idx = parser.ReadBits(3); 63 vp9->inter_layer_predicted = parser.Read<bool>(); 64 if (vp9->spatial_idx >= kMaxSpatialLayers) { 65 parser.Invalidate(); 66 return; 67 } 68 69 if (!vp9->flexible_mode) { 70 vp9->tl0_pic_idx = parser.Read<uint8_t>(); 71 } 72 } 73 74 // Reference indices: 75 // 76 // +-+-+-+-+-+-+-+-+ P=1,F=1: At least one reference index 77 // P,F: | P_DIFF |N| up to 3 times has to be specified. 78 // +-+-+-+-+-+-+-+-+ N=1: An additional P_DIFF follows 79 // current P_DIFF. 80 // 81 void ParseRefIndices(BitstreamReader& parser, RTPVideoHeaderVP9* vp9) { 82 if (vp9->picture_id == kNoPictureId) { 83 parser.Invalidate(); 84 return; 85 } 86 87 vp9->num_ref_pics = 0; 88 bool n_bit; 89 do { 90 if (vp9->num_ref_pics == kMaxVp9RefPics) { 91 parser.Invalidate(); 92 return; 93 } 94 95 uint8_t p_diff = parser.ReadBits(7); 96 n_bit = parser.Read<bool>(); 97 98 vp9->pid_diff[vp9->num_ref_pics] = p_diff; 99 uint32_t scaled_pid = vp9->picture_id; 100 if (p_diff > scaled_pid) { 101 // TODO(asapersson): Max should correspond to the picture id of last wrap. 102 scaled_pid += vp9->max_picture_id + 1; 103 } 104 vp9->ref_picture_id[vp9->num_ref_pics++] = scaled_pid - p_diff; 105 } while (n_bit); 106 } 107 108 // Scalability structure (SS). 109 // 110 // +-+-+-+-+-+-+-+-+ 111 // V: | N_S |Y|G|-|-|-| 112 // +-+-+-+-+-+-+-+-+ -| 113 // Y: | WIDTH | (OPTIONAL) . 114 // + + . 115 // | | (OPTIONAL) . 116 // +-+-+-+-+-+-+-+-+ . N_S + 1 times 117 // | HEIGHT | (OPTIONAL) . 118 // + + . 119 // | | (OPTIONAL) . 120 // +-+-+-+-+-+-+-+-+ -| 121 // G: | N_G | (OPTIONAL) 122 // +-+-+-+-+-+-+-+-+ -| 123 // N_G: | T |U| R |-|-| (OPTIONAL) . 124 // +-+-+-+-+-+-+-+-+ -| . N_G times 125 // | P_DIFF | (OPTIONAL) . R times . 126 // +-+-+-+-+-+-+-+-+ -| -| 127 // 128 void ParseSsData(BitstreamReader& parser, RTPVideoHeaderVP9* vp9) { 129 vp9->num_spatial_layers = parser.ReadBits(3) + 1; 130 vp9->spatial_layer_resolution_present = parser.Read<bool>(); 131 bool g_bit = parser.Read<bool>(); 132 parser.ConsumeBits(3); 133 vp9->gof.num_frames_in_gof = 0; 134 135 if (vp9->spatial_layer_resolution_present) { 136 for (size_t i = 0; i < vp9->num_spatial_layers; ++i) { 137 vp9->width[i] = parser.Read<uint16_t>(); 138 vp9->height[i] = parser.Read<uint16_t>(); 139 } 140 } 141 if (g_bit) { 142 vp9->gof.num_frames_in_gof = parser.Read<uint8_t>(); 143 } 144 for (size_t i = 0; i < vp9->gof.num_frames_in_gof; ++i) { 145 vp9->gof.temporal_idx[i] = parser.ReadBits(3); 146 vp9->gof.temporal_up_switch[i] = parser.Read<bool>(); 147 vp9->gof.num_ref_pics[i] = parser.ReadBits(2); 148 parser.ConsumeBits(2); 149 150 for (uint8_t p = 0; p < vp9->gof.num_ref_pics[i]; ++p) { 151 vp9->gof.pid_diff[i][p] = parser.Read<uint8_t>(); 152 } 153 } 154 } 155 } // namespace 156 157 std::optional<VideoRtpDepacketizer::ParsedRtpPayload> 158 VideoRtpDepacketizerVp9::Parse(CopyOnWriteBuffer rtp_payload) { 159 std::optional<ParsedRtpPayload> result(std::in_place); 160 int offset = ParseRtpPayload(rtp_payload, &result->video_header); 161 if (offset == 0) 162 return std::nullopt; 163 RTC_DCHECK_LT(offset, rtp_payload.size()); 164 result->video_payload = 165 rtp_payload.Slice(offset, rtp_payload.size() - offset); 166 return result; 167 } 168 169 int VideoRtpDepacketizerVp9::ParseRtpPayload( 170 ArrayView<const uint8_t> rtp_payload, 171 RTPVideoHeader* video_header) { 172 RTC_DCHECK(video_header); 173 // Parse mandatory first byte of payload descriptor. 174 BitstreamReader parser(rtp_payload); 175 uint8_t first_byte = parser.Read<uint8_t>(); 176 bool i_bit = first_byte & 0b1000'0000; // PictureId present . 177 bool p_bit = first_byte & 0b0100'0000; // Inter-picture predicted. 178 bool l_bit = first_byte & 0b0010'0000; // Layer indices present. 179 bool f_bit = first_byte & 0b0001'0000; // Flexible mode. 180 bool b_bit = first_byte & 0b0000'1000; // Begins frame flag. 181 bool e_bit = first_byte & 0b0000'0100; // Ends frame flag. 182 bool v_bit = first_byte & 0b0000'0010; // Scalability structure present. 183 bool z_bit = first_byte & 0b0000'0001; // Not used for inter-layer prediction 184 185 // Parsed payload. 186 video_header->width = 0; 187 video_header->height = 0; 188 video_header->simulcastIdx = 0; 189 video_header->codec = kVideoCodecVP9; 190 191 auto& vp9_header = 192 video_header->video_type_header.emplace<RTPVideoHeaderVP9>(); 193 vp9_header.InitRTPVideoHeaderVP9(); 194 vp9_header.inter_pic_predicted = p_bit; 195 vp9_header.flexible_mode = f_bit; 196 vp9_header.beginning_of_frame = b_bit; 197 vp9_header.end_of_frame = e_bit; 198 vp9_header.ss_data_available = v_bit; 199 vp9_header.non_ref_for_inter_layer_pred = z_bit; 200 201 // Parse fields that are present. 202 if (i_bit) { 203 ParsePictureId(parser, &vp9_header); 204 } 205 if (l_bit) { 206 ParseLayerInfo(parser, &vp9_header); 207 } 208 if (p_bit && f_bit) { 209 ParseRefIndices(parser, &vp9_header); 210 } 211 if (v_bit) { 212 ParseSsData(parser, &vp9_header); 213 if (vp9_header.spatial_layer_resolution_present) { 214 // TODO(asapersson): Add support for spatial layers. 215 video_header->width = vp9_header.width[0]; 216 video_header->height = vp9_header.height[0]; 217 } 218 } 219 video_header->frame_type = p_bit || vp9_header.inter_layer_predicted 220 ? VideoFrameType::kVideoFrameDelta 221 : VideoFrameType::kVideoFrameKey; 222 video_header->is_first_packet_in_frame = b_bit; 223 video_header->is_last_packet_in_frame = e_bit; 224 225 int num_remaining_bits = parser.RemainingBitCount(); 226 if (num_remaining_bits <= 0) { 227 // Failed to parse or empty vp9 payload data. 228 return 0; 229 } 230 // vp9 descriptor is byte aligned. 231 RTC_DCHECK_EQ(num_remaining_bits % 8, 0); 232 return rtp_payload.size() - num_remaining_bits / 8; 233 } 234 } // namespace webrtc