tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

refstruct.c (11345B)


      1 /*
      2 * This file is part of FFmpeg.
      3 *
      4 * FFmpeg is free software; you can redistribute it and/or
      5 * modify it under the terms of the GNU Lesser General Public
      6 * License as published by the Free Software Foundation; either
      7 * version 2.1 of the License, or (at your option) any later version.
      8 *
      9 * FFmpeg is distributed in the hope that it will be useful,
     10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
     11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     12 * Lesser General Public License for more details.
     13 *
     14 * You should have received a copy of the GNU Lesser General Public
     15 * License along with FFmpeg; if not, write to the Free Software
     16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
     17 */
     18 
     19 #include <stdatomic.h>
     20 #include <stdint.h>
     21 #include <string.h>
     22 
     23 #include "refstruct.h"
     24 
     25 #include "avassert.h"
     26 #include "error.h"
     27 #include "macros.h"
     28 #include "mem.h"
     29 #include "mem_internal.h"
     30 #include "thread.h"
     31 
     32 #ifndef REFSTRUCT_CHECKED
     33 #ifndef ASSERT_LEVEL
     34 #define ASSERT_LEVEL 0
     35 #endif
     36 #define REFSTRUCT_CHECKED (ASSERT_LEVEL >= 1)
     37 #endif
     38 
     39 #if REFSTRUCT_CHECKED
     40 #define ff_assert(cond) av_assert0(cond)
     41 #else
     42 #define ff_assert(cond) ((void)0)
     43 #endif
     44 
     45 #define REFSTRUCT_COOKIE AV_NE((uint64_t)MKBETAG('R', 'e', 'f', 'S') << 32 | MKBETAG('t', 'r', 'u', 'c'), \
     46                               MKTAG('R', 'e', 'f', 'S') | (uint64_t)MKTAG('t', 'r', 'u', 'c') << 32)
     47 
     48 #if __STDC_VERSION__ >= 201112L && !defined(_MSC_VER)
     49 #define REFCOUNT_OFFSET FFALIGN(sizeof(RefCount), FFMAX(ALIGN_64, _Alignof(max_align_t)))
     50 #else
     51 #define REFCOUNT_OFFSET FFALIGN(sizeof(RefCount), ALIGN_64)
     52 #endif
     53 
     54 typedef struct RefCount {
     55    /**
     56     * An uintptr_t is big enough to hold the address of every reference,
     57     * so no overflow can happen when incrementing the refcount as long as
     58     * the user does not throw away references.
     59     */
     60    atomic_uintptr_t  refcount;
     61    AVRefStructOpaque opaque;
     62    void (*free_cb)(AVRefStructOpaque opaque, void *obj);
     63    void (*free)(void *ref);
     64 
     65 #if REFSTRUCT_CHECKED
     66    uint64_t cookie;
     67 #endif
     68 } RefCount;
     69 
     70 static RefCount *get_refcount(void *obj)
     71 {
     72    RefCount *ref = (RefCount*)((char*)obj - REFCOUNT_OFFSET);
     73    ff_assert(ref->cookie == REFSTRUCT_COOKIE);
     74    return ref;
     75 }
     76 
     77 static const RefCount *cget_refcount(const void *obj)
     78 {
     79    const RefCount *ref = (const RefCount*)((const char*)obj - REFCOUNT_OFFSET);
     80    ff_assert(ref->cookie == REFSTRUCT_COOKIE);
     81    return ref;
     82 }
     83 
     84 static void *get_userdata(void *buf)
     85 {
     86    return (char*)buf + REFCOUNT_OFFSET;
     87 }
     88 
     89 static void refcount_init(RefCount *ref, AVRefStructOpaque opaque,
     90                          void (*free_cb)(AVRefStructOpaque opaque, void *obj))
     91 {
     92    atomic_init(&ref->refcount, 1);
     93    ref->opaque  = opaque;
     94    ref->free_cb = free_cb;
     95    ref->free    = av_free;
     96 
     97 #if REFSTRUCT_CHECKED
     98    ref->cookie  = REFSTRUCT_COOKIE;
     99 #endif
    100 }
    101 
    102 void *av_refstruct_alloc_ext_c(size_t size, unsigned flags, AVRefStructOpaque opaque,
    103                               void (*free_cb)(AVRefStructOpaque opaque, void *obj))
    104 {
    105    void *buf, *obj;
    106 
    107    if (size > SIZE_MAX - REFCOUNT_OFFSET)
    108        return NULL;
    109    buf = av_malloc(size + REFCOUNT_OFFSET);
    110    if (!buf)
    111        return NULL;
    112    refcount_init(buf, opaque, free_cb);
    113    obj = get_userdata(buf);
    114    if (!(flags & AV_REFSTRUCT_FLAG_NO_ZEROING))
    115        memset(obj, 0, size);
    116 
    117    return obj;
    118 }
    119 
    120 void av_refstruct_unref(void *objp)
    121 {
    122    void *obj;
    123    RefCount *ref;
    124 
    125    memcpy(&obj, objp, sizeof(obj));
    126    if (!obj)
    127        return;
    128    memcpy(objp, &(void *){ NULL }, sizeof(obj));
    129 
    130    ref = get_refcount(obj);
    131    if (atomic_fetch_sub_explicit(&ref->refcount, 1, memory_order_acq_rel) == 1) {
    132        if (ref->free_cb)
    133            ref->free_cb(ref->opaque, obj);
    134        ref->free(ref);
    135    }
    136 
    137    return;
    138 }
    139 
    140 void *av_refstruct_ref(void *obj)
    141 {
    142    RefCount *ref = get_refcount(obj);
    143 
    144    atomic_fetch_add_explicit(&ref->refcount, 1, memory_order_relaxed);
    145 
    146    return obj;
    147 }
    148 
    149 const void *av_refstruct_ref_c(const void *obj)
    150 {
    151    /* Casting const away here is fine, as it is only supposed
    152     * to apply to the user's data and not our bookkeeping data. */
    153    RefCount *ref = get_refcount((void*)obj);
    154 
    155    atomic_fetch_add_explicit(&ref->refcount, 1, memory_order_relaxed);
    156 
    157    return obj;
    158 }
    159 
    160 void av_refstruct_replace(void *dstp, const void *src)
    161 {
    162    const void *dst;
    163    memcpy(&dst, dstp, sizeof(dst));
    164 
    165    if (src == dst)
    166        return;
    167    av_refstruct_unref(dstp);
    168    if (src) {
    169        dst = av_refstruct_ref_c(src);
    170        memcpy(dstp, &dst, sizeof(dst));
    171    }
    172 }
    173 
    174 int av_refstruct_exclusive(const void *obj)
    175 {
    176    const RefCount *ref = cget_refcount(obj);
    177    /* Casting const away here is safe, because it is a load.
    178     * It is necessary because atomic_load_explicit() does not
    179     * accept const atomics in C11 (see also N1807). */
    180    return atomic_load_explicit((atomic_uintptr_t*)&ref->refcount, memory_order_acquire) == 1;
    181 }
    182 
    183 struct AVRefStructPool {
    184    size_t size;
    185    AVRefStructOpaque opaque;
    186    int  (*init_cb)(AVRefStructOpaque opaque, void *obj);
    187    void (*reset_cb)(AVRefStructOpaque opaque, void *obj);
    188    void (*free_entry_cb)(AVRefStructOpaque opaque, void *obj);
    189    void (*free_cb)(AVRefStructOpaque opaque);
    190 
    191    int uninited;
    192    unsigned entry_flags;
    193    unsigned pool_flags;
    194 
    195    /** The number of outstanding entries not in available_entries. */
    196    atomic_uintptr_t refcount;
    197    /**
    198     * This is a linked list of available entries;
    199     * the RefCount's opaque pointer is used as next pointer
    200     * for available entries.
    201     * While the entries are in use, the opaque is a pointer
    202     * to the corresponding AVRefStructPool.
    203     */
    204    RefCount *available_entries;
    205    AVMutex mutex;
    206 };
    207 
    208 static void pool_free(AVRefStructPool *pool)
    209 {
    210    ff_mutex_destroy(&pool->mutex);
    211    if (pool->free_cb)
    212        pool->free_cb(pool->opaque);
    213    av_free(get_refcount(pool));
    214 }
    215 
    216 static void pool_free_entry(AVRefStructPool *pool, RefCount *ref)
    217 {
    218    if (pool->free_entry_cb)
    219        pool->free_entry_cb(pool->opaque, get_userdata(ref));
    220    av_free(ref);
    221 }
    222 
    223 static void pool_return_entry(void *ref_)
    224 {
    225    RefCount *ref = ref_;
    226    AVRefStructPool *pool = ref->opaque.nc;
    227 
    228    ff_mutex_lock(&pool->mutex);
    229    if (!pool->uninited) {
    230        ref->opaque.nc = pool->available_entries;
    231        pool->available_entries = ref;
    232        ref = NULL;
    233    }
    234    ff_mutex_unlock(&pool->mutex);
    235 
    236    if (ref)
    237        pool_free_entry(pool, ref);
    238 
    239    if (atomic_fetch_sub_explicit(&pool->refcount, 1, memory_order_acq_rel) == 1)
    240        pool_free(pool);
    241 }
    242 
    243 static void pool_reset_entry(AVRefStructOpaque opaque, void *entry)
    244 {
    245    AVRefStructPool *pool = opaque.nc;
    246 
    247    pool->reset_cb(pool->opaque, entry);
    248 }
    249 
    250 static int refstruct_pool_get_ext(void *datap, AVRefStructPool *pool)
    251 {
    252    void *ret = NULL;
    253 
    254    memcpy(datap, &(void *){ NULL }, sizeof(void*));
    255 
    256    ff_mutex_lock(&pool->mutex);
    257    ff_assert(!pool->uninited);
    258    if (pool->available_entries) {
    259        RefCount *ref = pool->available_entries;
    260        ret = get_userdata(ref);
    261        pool->available_entries = ref->opaque.nc;
    262        ref->opaque.nc = pool;
    263        atomic_init(&ref->refcount, 1);
    264    }
    265    ff_mutex_unlock(&pool->mutex);
    266 
    267    if (!ret) {
    268        RefCount *ref;
    269        ret = av_refstruct_alloc_ext(pool->size, pool->entry_flags, pool,
    270                                     pool->reset_cb ? pool_reset_entry : NULL);
    271        if (!ret)
    272            return AVERROR(ENOMEM);
    273        ref = get_refcount(ret);
    274        ref->free = pool_return_entry;
    275        if (pool->init_cb) {
    276            int err = pool->init_cb(pool->opaque, ret);
    277            if (err < 0) {
    278                if (pool->pool_flags & AV_REFSTRUCT_POOL_FLAG_RESET_ON_INIT_ERROR)
    279                    pool->reset_cb(pool->opaque, ret);
    280                if (pool->pool_flags & AV_REFSTRUCT_POOL_FLAG_FREE_ON_INIT_ERROR)
    281                    pool->free_entry_cb(pool->opaque, ret);
    282                av_free(ref);
    283                return err;
    284            }
    285        }
    286    }
    287    atomic_fetch_add_explicit(&pool->refcount, 1, memory_order_relaxed);
    288 
    289    if (pool->pool_flags & AV_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME)
    290        memset(ret, 0, pool->size);
    291 
    292    memcpy(datap, &ret, sizeof(ret));
    293 
    294    return 0;
    295 }
    296 
    297 void *av_refstruct_pool_get(AVRefStructPool *pool)
    298 {
    299    void *ret;
    300    refstruct_pool_get_ext(&ret, pool);
    301    return ret;
    302 }
    303 
    304 /**
    305 * Hint: The content of pool_unref() and refstruct_pool_uninit()
    306 * could currently be merged; they are only separate functions
    307 * in case we would ever introduce weak references.
    308 */
    309 static void pool_unref(void *ref)
    310 {
    311    AVRefStructPool *pool = get_userdata(ref);
    312    if (atomic_fetch_sub_explicit(&pool->refcount, 1, memory_order_acq_rel) == 1)
    313        pool_free(pool);
    314 }
    315 
    316 static void refstruct_pool_uninit(AVRefStructOpaque unused, void *obj)
    317 {
    318    AVRefStructPool *pool = obj;
    319    RefCount *entry;
    320 
    321    ff_mutex_lock(&pool->mutex);
    322    ff_assert(!pool->uninited);
    323    pool->uninited = 1;
    324    entry = pool->available_entries;
    325    pool->available_entries = NULL;
    326    ff_mutex_unlock(&pool->mutex);
    327 
    328    while (entry) {
    329        void *next = entry->opaque.nc;
    330        pool_free_entry(pool, entry);
    331        entry = next;
    332    }
    333 }
    334 
    335 AVRefStructPool *av_refstruct_pool_alloc(size_t size, unsigned flags)
    336 {
    337    return av_refstruct_pool_alloc_ext(size, flags, NULL, NULL, NULL, NULL, NULL);
    338 }
    339 
    340 AVRefStructPool *av_refstruct_pool_alloc_ext_c(size_t size, unsigned flags,
    341                                               AVRefStructOpaque opaque,
    342                                               int  (*init_cb)(AVRefStructOpaque opaque, void *obj),
    343                                               void (*reset_cb)(AVRefStructOpaque opaque, void *obj),
    344                                               void (*free_entry_cb)(AVRefStructOpaque opaque, void *obj),
    345                                               void (*free_cb)(AVRefStructOpaque opaque))
    346 {
    347    AVRefStructPool *pool = av_refstruct_alloc_ext(sizeof(*pool), 0, NULL,
    348                                                   refstruct_pool_uninit);
    349    int err;
    350 
    351    if (!pool)
    352        return NULL;
    353    get_refcount(pool)->free = pool_unref;
    354 
    355    pool->size          = size;
    356    pool->opaque        = opaque;
    357    pool->init_cb       = init_cb;
    358    pool->reset_cb      = reset_cb;
    359    pool->free_entry_cb = free_entry_cb;
    360    pool->free_cb       = free_cb;
    361 #define COMMON_FLAGS AV_REFSTRUCT_POOL_FLAG_NO_ZEROING
    362    pool->entry_flags   = flags & COMMON_FLAGS;
    363    // Filter out nonsense combinations to avoid checks later.
    364    if (!pool->reset_cb)
    365        flags &= ~AV_REFSTRUCT_POOL_FLAG_RESET_ON_INIT_ERROR;
    366    if (!pool->free_entry_cb)
    367        flags &= ~AV_REFSTRUCT_POOL_FLAG_FREE_ON_INIT_ERROR;
    368    pool->pool_flags    = flags;
    369 
    370    if (flags & AV_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME) {
    371        // We will zero the buffer before every use, so zeroing
    372        // upon allocating the buffer is unnecessary.
    373        pool->entry_flags |= AV_REFSTRUCT_FLAG_NO_ZEROING;
    374    }
    375 
    376    atomic_init(&pool->refcount, 1);
    377 
    378    err = ff_mutex_init(&pool->mutex, NULL);
    379    if (err) {
    380        // Don't call av_refstruct_uninit() on pool, as it hasn't been properly
    381        // set up and is just a POD right now.
    382        av_free(get_refcount(pool));
    383        return NULL;
    384    }
    385    return pool;
    386 }