tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

v64_intrinsics.h (9847B)


      1 /*
      2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved.
      3 *
      4 * This source code is subject to the terms of the BSD 2 Clause License and
      5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
      6 * was not distributed with this source code in the LICENSE file, you can
      7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
      8 * Media Patent License 1.0 was not distributed with this source code in the
      9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
     10 */
     11 
     12 #ifndef AOM_AOM_DSP_SIMD_V64_INTRINSICS_H_
     13 #define AOM_AOM_DSP_SIMD_V64_INTRINSICS_H_
     14 
     15 #include <stdio.h>
     16 #include <stdlib.h>
     17 
     18 #include "aom_dsp/simd/v64_intrinsics_c.h"
     19 
     20 /* Fallback to plain, unoptimised C. */
     21 
     22 typedef c_v64 v64;
     23 
     24 SIMD_INLINE uint32_t v64_low_u32(v64 a) { return c_v64_low_u32(a); }
     25 SIMD_INLINE uint32_t v64_high_u32(v64 a) { return c_v64_high_u32(a); }
     26 SIMD_INLINE int32_t v64_low_s32(v64 a) { return c_v64_low_s32(a); }
     27 SIMD_INLINE int32_t v64_high_s32(v64 a) { return c_v64_high_s32(a); }
     28 SIMD_INLINE v64 v64_from_32(uint32_t x, uint32_t y) {
     29  return c_v64_from_32(x, y);
     30 }
     31 SIMD_INLINE v64 v64_from_64(uint64_t x) { return c_v64_from_64(x); }
     32 SIMD_INLINE uint64_t v64_u64(v64 x) { return c_v64_u64(x); }
     33 SIMD_INLINE v64 v64_from_16(uint16_t a, uint16_t b, uint16_t c, uint16_t d) {
     34  return c_v64_from_16(a, b, c, d);
     35 }
     36 
     37 SIMD_INLINE uint32_t u32_load_unaligned(const void *p) {
     38  return c_u32_load_unaligned(p);
     39 }
     40 SIMD_INLINE uint32_t u32_load_aligned(const void *p) {
     41  return c_u32_load_aligned(p);
     42 }
     43 SIMD_INLINE void u32_store_unaligned(void *p, uint32_t a) {
     44  c_u32_store_unaligned(p, a);
     45 }
     46 SIMD_INLINE void u32_store_aligned(void *p, uint32_t a) {
     47  c_u32_store_aligned(p, a);
     48 }
     49 
     50 SIMD_INLINE v64 v64_load_unaligned(const void *p) {
     51  return c_v64_load_unaligned(p);
     52 }
     53 SIMD_INLINE v64 v64_load_aligned(const void *p) {
     54  return c_v64_load_aligned(p);
     55 }
     56 
     57 SIMD_INLINE void v64_store_unaligned(void *p, v64 a) {
     58  c_v64_store_unaligned(p, a);
     59 }
     60 SIMD_INLINE void v64_store_aligned(void *p, v64 a) {
     61  c_v64_store_aligned(p, a);
     62 }
     63 
     64 SIMD_INLINE v64 v64_align(v64 a, v64 b, unsigned int c) {
     65  return c_v64_align(a, b, c);
     66 }
     67 
     68 SIMD_INLINE v64 v64_zero(void) { return c_v64_zero(); }
     69 SIMD_INLINE v64 v64_dup_8(uint8_t x) { return c_v64_dup_8(x); }
     70 SIMD_INLINE v64 v64_dup_16(uint16_t x) { return c_v64_dup_16(x); }
     71 SIMD_INLINE v64 v64_dup_32(uint32_t x) { return c_v64_dup_32(x); }
     72 
     73 SIMD_INLINE v64 v64_add_8(v64 a, v64 b) { return c_v64_add_8(a, b); }
     74 SIMD_INLINE v64 v64_add_16(v64 a, v64 b) { return c_v64_add_16(a, b); }
     75 SIMD_INLINE v64 v64_sadd_u8(v64 a, v64 b) { return c_v64_sadd_u8(a, b); }
     76 SIMD_INLINE v64 v64_sadd_s8(v64 a, v64 b) { return c_v64_sadd_s8(a, b); }
     77 SIMD_INLINE v64 v64_sadd_s16(v64 a, v64 b) { return c_v64_sadd_s16(a, b); }
     78 SIMD_INLINE v64 v64_add_32(v64 a, v64 b) { return c_v64_add_32(a, b); }
     79 SIMD_INLINE v64 v64_sub_8(v64 a, v64 b) { return c_v64_sub_8(a, b); }
     80 SIMD_INLINE v64 v64_ssub_u8(v64 a, v64 b) { return c_v64_ssub_u8(a, b); }
     81 SIMD_INLINE v64 v64_ssub_s8(v64 a, v64 b) { return c_v64_ssub_s8(a, b); }
     82 SIMD_INLINE v64 v64_sub_16(v64 a, v64 b) { return c_v64_sub_16(a, b); }
     83 SIMD_INLINE v64 v64_ssub_s16(v64 a, v64 b) { return c_v64_ssub_s16(a, b); }
     84 SIMD_INLINE v64 v64_ssub_u16(v64 a, v64 b) { return c_v64_ssub_u16(a, b); }
     85 SIMD_INLINE v64 v64_sub_32(v64 a, v64 b) { return c_v64_sub_32(a, b); }
     86 SIMD_INLINE v64 v64_abs_s16(v64 a) { return c_v64_abs_s16(a); }
     87 SIMD_INLINE v64 v64_abs_s8(v64 a) { return c_v64_abs_s8(a); }
     88 
     89 SIMD_INLINE v64 v64_ziplo_8(v64 a, v64 b) { return c_v64_ziplo_8(a, b); }
     90 SIMD_INLINE v64 v64_ziphi_8(v64 a, v64 b) { return c_v64_ziphi_8(a, b); }
     91 SIMD_INLINE v64 v64_ziplo_16(v64 a, v64 b) { return c_v64_ziplo_16(a, b); }
     92 SIMD_INLINE v64 v64_ziphi_16(v64 a, v64 b) { return c_v64_ziphi_16(a, b); }
     93 SIMD_INLINE v64 v64_ziplo_32(v64 a, v64 b) { return c_v64_ziplo_32(a, b); }
     94 SIMD_INLINE v64 v64_ziphi_32(v64 a, v64 b) { return c_v64_ziphi_32(a, b); }
     95 SIMD_INLINE v64 v64_unziplo_8(v64 a, v64 b) { return c_v64_unziplo_8(a, b); }
     96 SIMD_INLINE v64 v64_unziphi_8(v64 a, v64 b) { return c_v64_unziphi_8(a, b); }
     97 SIMD_INLINE v64 v64_unziplo_16(v64 a, v64 b) { return c_v64_unziplo_16(a, b); }
     98 SIMD_INLINE v64 v64_unziphi_16(v64 a, v64 b) { return c_v64_unziphi_16(a, b); }
     99 SIMD_INLINE v64 v64_unpacklo_u8_s16(v64 a) { return c_v64_unpacklo_u8_s16(a); }
    100 SIMD_INLINE v64 v64_unpackhi_u8_s16(v64 a) { return c_v64_unpackhi_u8_s16(a); }
    101 SIMD_INLINE v64 v64_unpacklo_s8_s16(v64 a) { return c_v64_unpacklo_s8_s16(a); }
    102 SIMD_INLINE v64 v64_unpackhi_s8_s16(v64 a) { return c_v64_unpackhi_s8_s16(a); }
    103 SIMD_INLINE v64 v64_pack_s32_s16(v64 a, v64 b) {
    104  return c_v64_pack_s32_s16(a, b);
    105 }
    106 SIMD_INLINE v64 v64_pack_s32_u16(v64 a, v64 b) {
    107  return c_v64_pack_s32_u16(a, b);
    108 }
    109 SIMD_INLINE v64 v64_pack_s16_u8(v64 a, v64 b) {
    110  return c_v64_pack_s16_u8(a, b);
    111 }
    112 SIMD_INLINE v64 v64_pack_s16_s8(v64 a, v64 b) {
    113  return c_v64_pack_s16_s8(a, b);
    114 }
    115 SIMD_INLINE v64 v64_unpacklo_u16_s32(v64 a) {
    116  return c_v64_unpacklo_u16_s32(a);
    117 }
    118 SIMD_INLINE v64 v64_unpacklo_s16_s32(v64 a) {
    119  return c_v64_unpacklo_s16_s32(a);
    120 }
    121 SIMD_INLINE v64 v64_unpackhi_u16_s32(v64 a) {
    122  return c_v64_unpackhi_u16_s32(a);
    123 }
    124 SIMD_INLINE v64 v64_unpackhi_s16_s32(v64 a) {
    125  return c_v64_unpackhi_s16_s32(a);
    126 }
    127 SIMD_INLINE v64 v64_shuffle_8(v64 a, v64 pattern) {
    128  return c_v64_shuffle_8(a, pattern);
    129 }
    130 
    131 SIMD_INLINE c_sad64_internal v64_sad_u8_init(void) {
    132  return c_v64_sad_u8_init();
    133 }
    134 SIMD_INLINE c_sad64_internal v64_sad_u8(c_sad64_internal s, v64 a, v64 b) {
    135  return c_v64_sad_u8(s, a, b);
    136 }
    137 SIMD_INLINE uint32_t v64_sad_u8_sum(c_sad64_internal s) {
    138  return c_v64_sad_u8_sum(s);
    139 }
    140 SIMD_INLINE c_ssd64_internal v64_ssd_u8_init(void) {
    141  return c_v64_ssd_u8_init();
    142 }
    143 SIMD_INLINE c_ssd64_internal v64_ssd_u8(c_ssd64_internal s, v64 a, v64 b) {
    144  return c_v64_ssd_u8(s, a, b);
    145 }
    146 SIMD_INLINE uint32_t v64_ssd_u8_sum(c_ssd64_internal s) {
    147  return c_v64_ssd_u8_sum(s);
    148 }
    149 SIMD_INLINE int64_t v64_dotp_su8(v64 a, v64 b) { return c_v64_dotp_su8(a, b); }
    150 SIMD_INLINE int64_t v64_dotp_s16(v64 a, v64 b) { return c_v64_dotp_s16(a, b); }
    151 SIMD_INLINE uint64_t v64_hadd_u8(v64 a) { return c_v64_hadd_u8(a); }
    152 SIMD_INLINE int64_t v64_hadd_s16(v64 a) { return c_v64_hadd_s16(a); }
    153 
    154 SIMD_INLINE v64 v64_or(v64 a, v64 b) { return c_v64_or(a, b); }
    155 SIMD_INLINE v64 v64_xor(v64 a, v64 b) { return c_v64_xor(a, b); }
    156 SIMD_INLINE v64 v64_and(v64 a, v64 b) { return c_v64_and(a, b); }
    157 SIMD_INLINE v64 v64_andn(v64 a, v64 b) { return c_v64_andn(a, b); }
    158 
    159 SIMD_INLINE v64 v64_mullo_s16(v64 a, v64 b) { return c_v64_mullo_s16(a, b); }
    160 SIMD_INLINE v64 v64_mulhi_s16(v64 a, v64 b) { return c_v64_mulhi_s16(a, b); }
    161 SIMD_INLINE v64 v64_mullo_s32(v64 a, v64 b) { return c_v64_mullo_s32(a, b); }
    162 SIMD_INLINE v64 v64_madd_s16(v64 a, v64 b) { return c_v64_madd_s16(a, b); }
    163 SIMD_INLINE v64 v64_madd_us8(v64 a, v64 b) { return c_v64_madd_us8(a, b); }
    164 
    165 SIMD_INLINE v64 v64_avg_u8(v64 a, v64 b) { return c_v64_avg_u8(a, b); }
    166 SIMD_INLINE v64 v64_rdavg_u8(v64 a, v64 b) { return c_v64_rdavg_u8(a, b); }
    167 SIMD_INLINE v64 v64_rdavg_u16(v64 a, v64 b) { return c_v64_rdavg_u16(a, b); }
    168 SIMD_INLINE v64 v64_avg_u16(v64 a, v64 b) { return c_v64_avg_u16(a, b); }
    169 SIMD_INLINE v64 v64_min_u8(v64 a, v64 b) { return c_v64_min_u8(a, b); }
    170 SIMD_INLINE v64 v64_max_u8(v64 a, v64 b) { return c_v64_max_u8(a, b); }
    171 SIMD_INLINE v64 v64_min_s8(v64 a, v64 b) { return c_v64_min_s8(a, b); }
    172 SIMD_INLINE v64 v64_max_s8(v64 a, v64 b) { return c_v64_max_s8(a, b); }
    173 SIMD_INLINE v64 v64_min_s16(v64 a, v64 b) { return c_v64_min_s16(a, b); }
    174 SIMD_INLINE v64 v64_max_s16(v64 a, v64 b) { return c_v64_max_s16(a, b); }
    175 
    176 SIMD_INLINE v64 v64_cmpgt_s8(v64 a, v64 b) { return c_v64_cmpgt_s8(a, b); }
    177 SIMD_INLINE v64 v64_cmplt_s8(v64 a, v64 b) { return c_v64_cmplt_s8(a, b); }
    178 SIMD_INLINE v64 v64_cmpeq_8(v64 a, v64 b) { return c_v64_cmpeq_8(a, b); }
    179 SIMD_INLINE v64 v64_cmpgt_s16(v64 a, v64 b) { return c_v64_cmpgt_s16(a, b); }
    180 SIMD_INLINE v64 v64_cmplt_s16(v64 a, v64 b) { return c_v64_cmplt_s16(a, b); }
    181 SIMD_INLINE v64 v64_cmpeq_16(v64 a, v64 b) { return c_v64_cmpeq_16(a, b); }
    182 
    183 SIMD_INLINE v64 v64_shl_8(v64 a, unsigned int n) { return c_v64_shl_8(a, n); }
    184 SIMD_INLINE v64 v64_shr_u8(v64 a, unsigned int n) { return c_v64_shr_u8(a, n); }
    185 SIMD_INLINE v64 v64_shr_s8(v64 a, unsigned int n) { return c_v64_shr_s8(a, n); }
    186 SIMD_INLINE v64 v64_shl_16(v64 a, unsigned int n) { return c_v64_shl_16(a, n); }
    187 SIMD_INLINE v64 v64_shr_u16(v64 a, unsigned int n) {
    188  return c_v64_shr_u16(a, n);
    189 }
    190 SIMD_INLINE v64 v64_shr_s16(v64 a, unsigned int n) {
    191  return c_v64_shr_s16(a, n);
    192 }
    193 SIMD_INLINE v64 v64_shl_32(v64 a, unsigned int n) { return c_v64_shl_32(a, n); }
    194 SIMD_INLINE v64 v64_shr_u32(v64 a, unsigned int n) {
    195  return c_v64_shr_u32(a, n);
    196 }
    197 SIMD_INLINE v64 v64_shr_s32(v64 a, unsigned int n) {
    198  return c_v64_shr_s32(a, n);
    199 }
    200 SIMD_INLINE v64 v64_shr_n_byte(v64 a, unsigned int n) {
    201  return c_v64_shr_n_byte(a, n);
    202 }
    203 SIMD_INLINE v64 v64_shl_n_byte(v64 a, unsigned int n) {
    204  return c_v64_shl_n_byte(a, n);
    205 }
    206 SIMD_INLINE v64 v64_shl_n_8(v64 a, unsigned int c) {
    207  return c_v64_shl_n_8(a, c);
    208 }
    209 SIMD_INLINE v64 v64_shr_n_u8(v64 a, unsigned int c) {
    210  return c_v64_shr_n_u8(a, c);
    211 }
    212 SIMD_INLINE v64 v64_shr_n_s8(v64 a, unsigned int c) {
    213  return c_v64_shr_n_s8(a, c);
    214 }
    215 SIMD_INLINE v64 v64_shl_n_16(v64 a, unsigned int c) {
    216  return c_v64_shl_n_16(a, c);
    217 }
    218 SIMD_INLINE v64 v64_shr_n_u16(v64 a, unsigned int c) {
    219  return c_v64_shr_n_u16(a, c);
    220 }
    221 SIMD_INLINE v64 v64_shr_n_s16(v64 a, unsigned int c) {
    222  return c_v64_shr_n_s16(a, c);
    223 }
    224 SIMD_INLINE v64 v64_shl_n_32(v64 a, unsigned int c) {
    225  return c_v64_shl_n_32(a, c);
    226 }
    227 SIMD_INLINE v64 v64_shr_n_u32(v64 a, unsigned int c) {
    228  return c_v64_shr_n_u32(a, c);
    229 }
    230 SIMD_INLINE v64 v64_shr_n_s32(v64 a, unsigned int c) {
    231  return c_v64_shr_n_s32(a, c);
    232 }
    233 
    234 #endif  // AOM_AOM_DSP_SIMD_V64_INTRINSICS_H_