intreadwrite.h (19426B)
1 /* 2 * This file is part of FFmpeg. 3 * 4 * FFmpeg is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU Lesser General Public 6 * License as published by the Free Software Foundation; either 7 * version 2.1 of the License, or (at your option) any later version. 8 * 9 * FFmpeg is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * Lesser General Public License for more details. 13 * 14 * You should have received a copy of the GNU Lesser General Public 15 * License along with FFmpeg; if not, write to the Free Software 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 19 #ifndef AVUTIL_INTREADWRITE_H 20 #define AVUTIL_INTREADWRITE_H 21 22 #include <stdint.h> 23 #include "libavutil/avconfig.h" 24 #include "attributes.h" 25 #include "bswap.h" 26 27 typedef union { 28 uint64_t u64; 29 uint32_t u32[2]; 30 uint16_t u16[4]; 31 uint8_t u8 [8]; 32 double f64; 33 float f32[2]; 34 } av_alias av_alias64; 35 36 typedef union { 37 uint32_t u32; 38 uint16_t u16[2]; 39 uint8_t u8 [4]; 40 float f32; 41 } av_alias av_alias32; 42 43 typedef union { 44 uint16_t u16; 45 uint8_t u8 [2]; 46 } av_alias av_alias16; 47 48 /* 49 * Arch-specific headers can provide any combination of 50 * AV_[RW][BLN](16|24|32|48|64) and AV_(COPY|SWAP|ZERO)(64|128) macros. 51 * Preprocessor symbols must be defined, even if these are implemented 52 * as inline functions. 53 * 54 * R/W means read/write, B/L/N means big/little/native endianness. 55 * The following macros require aligned access, compared to their 56 * unaligned variants: AV_(COPY|SWAP|ZERO)(64|128), AV_[RW]N[8-64]A. 57 * Incorrect usage may range from abysmal performance to crash 58 * depending on the platform. 59 * 60 * The unaligned variants are AV_[RW][BLN][8-64] and AV_COPY*U. 61 */ 62 63 #ifdef HAVE_AV_CONFIG_H 64 65 #include "config.h" 66 67 #if ARCH_AARCH64 68 # include "aarch64/intreadwrite.h" 69 #elif ARCH_MIPS 70 # include "mips/intreadwrite.h" 71 #elif ARCH_PPC 72 # include "ppc/intreadwrite.h" 73 #elif ARCH_X86 74 # include "x86/intreadwrite.h" 75 #endif 76 77 #endif /* HAVE_AV_CONFIG_H */ 78 79 /* 80 * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers. 81 */ 82 83 #if AV_HAVE_BIGENDIAN 84 85 # if defined(AV_RN16) && !defined(AV_RB16) 86 # define AV_RB16(p) AV_RN16(p) 87 # elif !defined(AV_RN16) && defined(AV_RB16) 88 # define AV_RN16(p) AV_RB16(p) 89 # endif 90 91 # if defined(AV_WN16) && !defined(AV_WB16) 92 # define AV_WB16(p, v) AV_WN16(p, v) 93 # elif !defined(AV_WN16) && defined(AV_WB16) 94 # define AV_WN16(p, v) AV_WB16(p, v) 95 # endif 96 97 # if defined(AV_RN24) && !defined(AV_RB24) 98 # define AV_RB24(p) AV_RN24(p) 99 # elif !defined(AV_RN24) && defined(AV_RB24) 100 # define AV_RN24(p) AV_RB24(p) 101 # endif 102 103 # if defined(AV_WN24) && !defined(AV_WB24) 104 # define AV_WB24(p, v) AV_WN24(p, v) 105 # elif !defined(AV_WN24) && defined(AV_WB24) 106 # define AV_WN24(p, v) AV_WB24(p, v) 107 # endif 108 109 # if defined(AV_RN32) && !defined(AV_RB32) 110 # define AV_RB32(p) AV_RN32(p) 111 # elif !defined(AV_RN32) && defined(AV_RB32) 112 # define AV_RN32(p) AV_RB32(p) 113 # endif 114 115 # if defined(AV_WN32) && !defined(AV_WB32) 116 # define AV_WB32(p, v) AV_WN32(p, v) 117 # elif !defined(AV_WN32) && defined(AV_WB32) 118 # define AV_WN32(p, v) AV_WB32(p, v) 119 # endif 120 121 # if defined(AV_RN48) && !defined(AV_RB48) 122 # define AV_RB48(p) AV_RN48(p) 123 # elif !defined(AV_RN48) && defined(AV_RB48) 124 # define AV_RN48(p) AV_RB48(p) 125 # endif 126 127 # if defined(AV_WN48) && !defined(AV_WB48) 128 # define AV_WB48(p, v) AV_WN48(p, v) 129 # elif !defined(AV_WN48) && defined(AV_WB48) 130 # define AV_WN48(p, v) AV_WB48(p, v) 131 # endif 132 133 # if defined(AV_RN64) && !defined(AV_RB64) 134 # define AV_RB64(p) AV_RN64(p) 135 # elif !defined(AV_RN64) && defined(AV_RB64) 136 # define AV_RN64(p) AV_RB64(p) 137 # endif 138 139 # if defined(AV_WN64) && !defined(AV_WB64) 140 # define AV_WB64(p, v) AV_WN64(p, v) 141 # elif !defined(AV_WN64) && defined(AV_WB64) 142 # define AV_WN64(p, v) AV_WB64(p, v) 143 # endif 144 145 #else /* AV_HAVE_BIGENDIAN */ 146 147 # if defined(AV_RN16) && !defined(AV_RL16) 148 # define AV_RL16(p) AV_RN16(p) 149 # elif !defined(AV_RN16) && defined(AV_RL16) 150 # define AV_RN16(p) AV_RL16(p) 151 # endif 152 153 # if defined(AV_WN16) && !defined(AV_WL16) 154 # define AV_WL16(p, v) AV_WN16(p, v) 155 # elif !defined(AV_WN16) && defined(AV_WL16) 156 # define AV_WN16(p, v) AV_WL16(p, v) 157 # endif 158 159 # if defined(AV_RN24) && !defined(AV_RL24) 160 # define AV_RL24(p) AV_RN24(p) 161 # elif !defined(AV_RN24) && defined(AV_RL24) 162 # define AV_RN24(p) AV_RL24(p) 163 # endif 164 165 # if defined(AV_WN24) && !defined(AV_WL24) 166 # define AV_WL24(p, v) AV_WN24(p, v) 167 # elif !defined(AV_WN24) && defined(AV_WL24) 168 # define AV_WN24(p, v) AV_WL24(p, v) 169 # endif 170 171 # if defined(AV_RN32) && !defined(AV_RL32) 172 # define AV_RL32(p) AV_RN32(p) 173 # elif !defined(AV_RN32) && defined(AV_RL32) 174 # define AV_RN32(p) AV_RL32(p) 175 # endif 176 177 # if defined(AV_WN32) && !defined(AV_WL32) 178 # define AV_WL32(p, v) AV_WN32(p, v) 179 # elif !defined(AV_WN32) && defined(AV_WL32) 180 # define AV_WN32(p, v) AV_WL32(p, v) 181 # endif 182 183 # if defined(AV_RN48) && !defined(AV_RL48) 184 # define AV_RL48(p) AV_RN48(p) 185 # elif !defined(AV_RN48) && defined(AV_RL48) 186 # define AV_RN48(p) AV_RL48(p) 187 # endif 188 189 # if defined(AV_WN48) && !defined(AV_WL48) 190 # define AV_WL48(p, v) AV_WN48(p, v) 191 # elif !defined(AV_WN48) && defined(AV_WL48) 192 # define AV_WN48(p, v) AV_WL48(p, v) 193 # endif 194 195 # if defined(AV_RN64) && !defined(AV_RL64) 196 # define AV_RL64(p) AV_RN64(p) 197 # elif !defined(AV_RN64) && defined(AV_RL64) 198 # define AV_RN64(p) AV_RL64(p) 199 # endif 200 201 # if defined(AV_WN64) && !defined(AV_WL64) 202 # define AV_WL64(p, v) AV_WN64(p, v) 203 # elif !defined(AV_WN64) && defined(AV_WL64) 204 # define AV_WN64(p, v) AV_WL64(p, v) 205 # endif 206 207 #endif /* !AV_HAVE_BIGENDIAN */ 208 209 /* 210 * Define AV_[RW]N helper macros to simplify definitions not provided 211 * by per-arch headers. 212 */ 213 214 #if defined(__GNUC__) || defined(__clang__) 215 216 union unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias; 217 union unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias; 218 union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias; 219 220 # define AV_RN(s, p) (((const union unaligned_##s *) (p))->l) 221 # define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v)) 222 223 #elif defined(_MSC_VER) && (defined(_M_ARM) || defined(_M_X64) || defined(_M_ARM64)) && AV_HAVE_FAST_UNALIGNED 224 225 # define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p))) 226 # define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v)) 227 228 #elif AV_HAVE_FAST_UNALIGNED 229 230 # define AV_RN(s, p) (((const av_alias##s*)(p))->u##s) 231 # define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v)) 232 233 #else 234 235 #ifndef AV_RB16 236 # define AV_RB16(x) \ 237 ((((const uint8_t*)(x))[0] << 8) | \ 238 ((const uint8_t*)(x))[1]) 239 #endif 240 #ifndef AV_WB16 241 # define AV_WB16(p, val) do { \ 242 uint16_t d = (val); \ 243 ((uint8_t*)(p))[1] = (d); \ 244 ((uint8_t*)(p))[0] = (d)>>8; \ 245 } while(0) 246 #endif 247 248 #ifndef AV_RL16 249 # define AV_RL16(x) \ 250 ((((const uint8_t*)(x))[1] << 8) | \ 251 ((const uint8_t*)(x))[0]) 252 #endif 253 #ifndef AV_WL16 254 # define AV_WL16(p, val) do { \ 255 uint16_t d = (val); \ 256 ((uint8_t*)(p))[0] = (d); \ 257 ((uint8_t*)(p))[1] = (d)>>8; \ 258 } while(0) 259 #endif 260 261 #ifndef AV_RB32 262 # define AV_RB32(x) \ 263 (((uint32_t)((const uint8_t*)(x))[0] << 24) | \ 264 (((const uint8_t*)(x))[1] << 16) | \ 265 (((const uint8_t*)(x))[2] << 8) | \ 266 ((const uint8_t*)(x))[3]) 267 #endif 268 #ifndef AV_WB32 269 # define AV_WB32(p, val) do { \ 270 uint32_t d = (val); \ 271 ((uint8_t*)(p))[3] = (d); \ 272 ((uint8_t*)(p))[2] = (d)>>8; \ 273 ((uint8_t*)(p))[1] = (d)>>16; \ 274 ((uint8_t*)(p))[0] = (d)>>24; \ 275 } while(0) 276 #endif 277 278 #ifndef AV_RL32 279 # define AV_RL32(x) \ 280 (((uint32_t)((const uint8_t*)(x))[3] << 24) | \ 281 (((const uint8_t*)(x))[2] << 16) | \ 282 (((const uint8_t*)(x))[1] << 8) | \ 283 ((const uint8_t*)(x))[0]) 284 #endif 285 #ifndef AV_WL32 286 # define AV_WL32(p, val) do { \ 287 uint32_t d = (val); \ 288 ((uint8_t*)(p))[0] = (d); \ 289 ((uint8_t*)(p))[1] = (d)>>8; \ 290 ((uint8_t*)(p))[2] = (d)>>16; \ 291 ((uint8_t*)(p))[3] = (d)>>24; \ 292 } while(0) 293 #endif 294 295 #ifndef AV_RB64 296 # define AV_RB64(x) \ 297 (((uint64_t)((const uint8_t*)(x))[0] << 56) | \ 298 ((uint64_t)((const uint8_t*)(x))[1] << 48) | \ 299 ((uint64_t)((const uint8_t*)(x))[2] << 40) | \ 300 ((uint64_t)((const uint8_t*)(x))[3] << 32) | \ 301 ((uint64_t)((const uint8_t*)(x))[4] << 24) | \ 302 ((uint64_t)((const uint8_t*)(x))[5] << 16) | \ 303 ((uint64_t)((const uint8_t*)(x))[6] << 8) | \ 304 (uint64_t)((const uint8_t*)(x))[7]) 305 #endif 306 #ifndef AV_WB64 307 # define AV_WB64(p, val) do { \ 308 uint64_t d = (val); \ 309 ((uint8_t*)(p))[7] = (d); \ 310 ((uint8_t*)(p))[6] = (d)>>8; \ 311 ((uint8_t*)(p))[5] = (d)>>16; \ 312 ((uint8_t*)(p))[4] = (d)>>24; \ 313 ((uint8_t*)(p))[3] = (d)>>32; \ 314 ((uint8_t*)(p))[2] = (d)>>40; \ 315 ((uint8_t*)(p))[1] = (d)>>48; \ 316 ((uint8_t*)(p))[0] = (d)>>56; \ 317 } while(0) 318 #endif 319 320 #ifndef AV_RL64 321 # define AV_RL64(x) \ 322 (((uint64_t)((const uint8_t*)(x))[7] << 56) | \ 323 ((uint64_t)((const uint8_t*)(x))[6] << 48) | \ 324 ((uint64_t)((const uint8_t*)(x))[5] << 40) | \ 325 ((uint64_t)((const uint8_t*)(x))[4] << 32) | \ 326 ((uint64_t)((const uint8_t*)(x))[3] << 24) | \ 327 ((uint64_t)((const uint8_t*)(x))[2] << 16) | \ 328 ((uint64_t)((const uint8_t*)(x))[1] << 8) | \ 329 (uint64_t)((const uint8_t*)(x))[0]) 330 #endif 331 #ifndef AV_WL64 332 # define AV_WL64(p, val) do { \ 333 uint64_t d = (val); \ 334 ((uint8_t*)(p))[0] = (d); \ 335 ((uint8_t*)(p))[1] = (d)>>8; \ 336 ((uint8_t*)(p))[2] = (d)>>16; \ 337 ((uint8_t*)(p))[3] = (d)>>24; \ 338 ((uint8_t*)(p))[4] = (d)>>32; \ 339 ((uint8_t*)(p))[5] = (d)>>40; \ 340 ((uint8_t*)(p))[6] = (d)>>48; \ 341 ((uint8_t*)(p))[7] = (d)>>56; \ 342 } while(0) 343 #endif 344 345 #if AV_HAVE_BIGENDIAN 346 # define AV_RN(s, p) AV_RB##s(p) 347 # define AV_WN(s, p, v) AV_WB##s(p, v) 348 #else 349 # define AV_RN(s, p) AV_RL##s(p) 350 # define AV_WN(s, p, v) AV_WL##s(p, v) 351 #endif 352 353 #endif /* HAVE_FAST_UNALIGNED */ 354 355 #ifndef AV_RN16 356 # define AV_RN16(p) AV_RN(16, p) 357 #endif 358 359 #ifndef AV_RN32 360 # define AV_RN32(p) AV_RN(32, p) 361 #endif 362 363 #ifndef AV_RN64 364 # define AV_RN64(p) AV_RN(64, p) 365 #endif 366 367 #ifndef AV_WN16 368 # define AV_WN16(p, v) AV_WN(16, p, v) 369 #endif 370 371 #ifndef AV_WN32 372 # define AV_WN32(p, v) AV_WN(32, p, v) 373 #endif 374 375 #ifndef AV_WN64 376 # define AV_WN64(p, v) AV_WN(64, p, v) 377 #endif 378 379 #if AV_HAVE_BIGENDIAN 380 # define AV_RB(s, p) AV_RN##s(p) 381 # define AV_WB(s, p, v) AV_WN##s(p, v) 382 # define AV_RL(s, p) av_bswap##s(AV_RN##s(p)) 383 # define AV_WL(s, p, v) AV_WN##s(p, av_bswap##s(v)) 384 #else 385 # define AV_RB(s, p) av_bswap##s(AV_RN##s(p)) 386 # define AV_WB(s, p, v) AV_WN##s(p, av_bswap##s(v)) 387 # define AV_RL(s, p) AV_RN##s(p) 388 # define AV_WL(s, p, v) AV_WN##s(p, v) 389 #endif 390 391 #define AV_RB8(x) (((const uint8_t*)(x))[0]) 392 #define AV_WB8(p, d) do { ((uint8_t*)(p))[0] = (d); } while(0) 393 394 #define AV_RL8(x) AV_RB8(x) 395 #define AV_WL8(p, d) AV_WB8(p, d) 396 397 #ifndef AV_RB16 398 # define AV_RB16(p) AV_RB(16, p) 399 #endif 400 #ifndef AV_WB16 401 # define AV_WB16(p, v) AV_WB(16, p, v) 402 #endif 403 404 #ifndef AV_RL16 405 # define AV_RL16(p) AV_RL(16, p) 406 #endif 407 #ifndef AV_WL16 408 # define AV_WL16(p, v) AV_WL(16, p, v) 409 #endif 410 411 #ifndef AV_RB32 412 # define AV_RB32(p) AV_RB(32, p) 413 #endif 414 #ifndef AV_WB32 415 # define AV_WB32(p, v) AV_WB(32, p, v) 416 #endif 417 418 #ifndef AV_RL32 419 # define AV_RL32(p) AV_RL(32, p) 420 #endif 421 #ifndef AV_WL32 422 # define AV_WL32(p, v) AV_WL(32, p, v) 423 #endif 424 425 #ifndef AV_RB64 426 # define AV_RB64(p) AV_RB(64, p) 427 #endif 428 #ifndef AV_WB64 429 # define AV_WB64(p, v) AV_WB(64, p, v) 430 #endif 431 432 #ifndef AV_RL64 433 # define AV_RL64(p) AV_RL(64, p) 434 #endif 435 #ifndef AV_WL64 436 # define AV_WL64(p, v) AV_WL(64, p, v) 437 #endif 438 439 #ifndef AV_RB24 440 # define AV_RB24(x) \ 441 ((((const uint8_t*)(x))[0] << 16) | \ 442 (((const uint8_t*)(x))[1] << 8) | \ 443 ((const uint8_t*)(x))[2]) 444 #endif 445 #ifndef AV_WB24 446 # define AV_WB24(p, d) do { \ 447 ((uint8_t*)(p))[2] = (d); \ 448 ((uint8_t*)(p))[1] = (d)>>8; \ 449 ((uint8_t*)(p))[0] = (d)>>16; \ 450 } while(0) 451 #endif 452 453 #ifndef AV_RL24 454 # define AV_RL24(x) \ 455 ((((const uint8_t*)(x))[2] << 16) | \ 456 (((const uint8_t*)(x))[1] << 8) | \ 457 ((const uint8_t*)(x))[0]) 458 #endif 459 #ifndef AV_WL24 460 # define AV_WL24(p, d) do { \ 461 ((uint8_t*)(p))[0] = (d); \ 462 ((uint8_t*)(p))[1] = (d)>>8; \ 463 ((uint8_t*)(p))[2] = (d)>>16; \ 464 } while(0) 465 #endif 466 467 #ifndef AV_RB48 468 # define AV_RB48(x) \ 469 (((uint64_t)((const uint8_t*)(x))[0] << 40) | \ 470 ((uint64_t)((const uint8_t*)(x))[1] << 32) | \ 471 ((uint64_t)((const uint8_t*)(x))[2] << 24) | \ 472 ((uint64_t)((const uint8_t*)(x))[3] << 16) | \ 473 ((uint64_t)((const uint8_t*)(x))[4] << 8) | \ 474 (uint64_t)((const uint8_t*)(x))[5]) 475 #endif 476 #ifndef AV_WB48 477 # define AV_WB48(p, darg) do { \ 478 uint64_t d = (darg); \ 479 ((uint8_t*)(p))[5] = (d); \ 480 ((uint8_t*)(p))[4] = (d)>>8; \ 481 ((uint8_t*)(p))[3] = (d)>>16; \ 482 ((uint8_t*)(p))[2] = (d)>>24; \ 483 ((uint8_t*)(p))[1] = (d)>>32; \ 484 ((uint8_t*)(p))[0] = (d)>>40; \ 485 } while(0) 486 #endif 487 488 #ifndef AV_RL48 489 # define AV_RL48(x) \ 490 (((uint64_t)((const uint8_t*)(x))[5] << 40) | \ 491 ((uint64_t)((const uint8_t*)(x))[4] << 32) | \ 492 ((uint64_t)((const uint8_t*)(x))[3] << 24) | \ 493 ((uint64_t)((const uint8_t*)(x))[2] << 16) | \ 494 ((uint64_t)((const uint8_t*)(x))[1] << 8) | \ 495 (uint64_t)((const uint8_t*)(x))[0]) 496 #endif 497 #ifndef AV_WL48 498 # define AV_WL48(p, darg) do { \ 499 uint64_t d = (darg); \ 500 ((uint8_t*)(p))[0] = (d); \ 501 ((uint8_t*)(p))[1] = (d)>>8; \ 502 ((uint8_t*)(p))[2] = (d)>>16; \ 503 ((uint8_t*)(p))[3] = (d)>>24; \ 504 ((uint8_t*)(p))[4] = (d)>>32; \ 505 ((uint8_t*)(p))[5] = (d)>>40; \ 506 } while(0) 507 #endif 508 509 /* 510 * The AV_[RW]NA macros access naturally aligned data 511 * in a type-safe way. 512 */ 513 514 #define AV_RNA(s, p) (((const av_alias##s*)(p))->u##s) 515 #define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v)) 516 517 #ifndef AV_RN16A 518 # define AV_RN16A(p) AV_RNA(16, p) 519 #endif 520 521 #ifndef AV_RN32A 522 # define AV_RN32A(p) AV_RNA(32, p) 523 #endif 524 525 #ifndef AV_RN64A 526 # define AV_RN64A(p) AV_RNA(64, p) 527 #endif 528 529 #ifndef AV_WN16A 530 # define AV_WN16A(p, v) AV_WNA(16, p, v) 531 #endif 532 533 #ifndef AV_WN32A 534 # define AV_WN32A(p, v) AV_WNA(32, p, v) 535 #endif 536 537 #ifndef AV_WN64A 538 # define AV_WN64A(p, v) AV_WNA(64, p, v) 539 #endif 540 541 #if AV_HAVE_BIGENDIAN 542 # define AV_RLA(s, p) av_bswap##s(AV_RN##s##A(p)) 543 # define AV_WLA(s, p, v) AV_WN##s##A(p, av_bswap##s(v)) 544 # define AV_RBA(s, p) AV_RN##s##A(p) 545 # define AV_WBA(s, p, v) AV_WN##s##A(p, v) 546 #else 547 # define AV_RLA(s, p) AV_RN##s##A(p) 548 # define AV_WLA(s, p, v) AV_WN##s##A(p, v) 549 # define AV_RBA(s, p) av_bswap##s(AV_RN##s##A(p)) 550 # define AV_WBA(s, p, v) AV_WN##s##A(p, av_bswap##s(v)) 551 #endif 552 553 #ifndef AV_RL16A 554 # define AV_RL16A(p) AV_RLA(16, p) 555 #endif 556 #ifndef AV_WL16A 557 # define AV_WL16A(p, v) AV_WLA(16, p, v) 558 #endif 559 560 #ifndef AV_RB16A 561 # define AV_RB16A(p) AV_RBA(16, p) 562 #endif 563 #ifndef AV_WB16A 564 # define AV_WB16A(p, v) AV_WBA(16, p, v) 565 #endif 566 567 #ifndef AV_RL32A 568 # define AV_RL32A(p) AV_RLA(32, p) 569 #endif 570 #ifndef AV_WL32A 571 # define AV_WL32A(p, v) AV_WLA(32, p, v) 572 #endif 573 574 #ifndef AV_RB32A 575 # define AV_RB32A(p) AV_RBA(32, p) 576 #endif 577 #ifndef AV_WB32A 578 # define AV_WB32A(p, v) AV_WBA(32, p, v) 579 #endif 580 581 #ifndef AV_RL64A 582 # define AV_RL64A(p) AV_RLA(64, p) 583 #endif 584 #ifndef AV_WL64A 585 # define AV_WL64A(p, v) AV_WLA(64, p, v) 586 #endif 587 588 #ifndef AV_RB64A 589 # define AV_RB64A(p) AV_RBA(64, p) 590 #endif 591 #ifndef AV_WB64A 592 # define AV_WB64A(p, v) AV_WBA(64, p, v) 593 #endif 594 595 /* 596 * The AV_COPYxxU macros are suitable for copying data to/from unaligned 597 * memory locations. 598 */ 599 600 #define AV_COPYU(n, d, s) AV_WN##n(d, AV_RN##n(s)); 601 602 #ifndef AV_COPY16U 603 # define AV_COPY16U(d, s) AV_COPYU(16, d, s) 604 #endif 605 606 #ifndef AV_COPY32U 607 # define AV_COPY32U(d, s) AV_COPYU(32, d, s) 608 #endif 609 610 #ifndef AV_COPY64U 611 # define AV_COPY64U(d, s) AV_COPYU(64, d, s) 612 #endif 613 614 #ifndef AV_COPY128U 615 # define AV_COPY128U(d, s) \ 616 do { \ 617 AV_COPY64U(d, s); \ 618 AV_COPY64U((char *)(d) + 8, (const char *)(s) + 8); \ 619 } while(0) 620 #endif 621 622 /* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be 623 * naturally aligned. 624 */ 625 626 #define AV_COPY(n, d, s) \ 627 (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n) 628 629 #ifndef AV_COPY16 630 # define AV_COPY16(d, s) AV_COPY(16, d, s) 631 #endif 632 633 #ifndef AV_COPY32 634 # define AV_COPY32(d, s) AV_COPY(32, d, s) 635 #endif 636 637 #ifndef AV_COPY64 638 # define AV_COPY64(d, s) AV_COPY(64, d, s) 639 #endif 640 641 #ifndef AV_COPY128 642 # define AV_COPY128(d, s) \ 643 do { \ 644 AV_COPY64(d, s); \ 645 AV_COPY64((char*)(d)+8, (char*)(s)+8); \ 646 } while(0) 647 #endif 648 649 #define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b)) 650 651 #ifndef AV_SWAP64 652 # define AV_SWAP64(a, b) AV_SWAP(64, a, b) 653 #endif 654 655 #define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0) 656 657 #ifndef AV_ZERO16 658 # define AV_ZERO16(d) AV_ZERO(16, d) 659 #endif 660 661 #ifndef AV_ZERO32 662 # define AV_ZERO32(d) AV_ZERO(32, d) 663 #endif 664 665 #ifndef AV_ZERO64 666 # define AV_ZERO64(d) AV_ZERO(64, d) 667 #endif 668 669 #ifndef AV_ZERO128 670 # define AV_ZERO128(d) \ 671 do { \ 672 AV_ZERO64(d); \ 673 AV_ZERO64((char*)(d)+8); \ 674 } while(0) 675 #endif 676 677 #endif /* AVUTIL_INTREADWRITE_H */