tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

ml.c (6423B)


      1 /*
      2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved.
      3 *
      4 * This source code is subject to the terms of the BSD 2 Clause License and
      5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
      6 * was not distributed with this source code in the LICENSE file, you can
      7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
      8 * Media Patent License 1.0 was not distributed with this source code in the
      9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
     10 */
     11 
     12 #include <assert.h>
     13 #include <math.h>
     14 
     15 #include "aom_dsp/aom_dsp_common.h"
     16 #include "aom_dsp/mathutils.h"
     17 #include "av1/encoder/ml.h"
     18 
     19 void av1_nn_output_prec_reduce(float *const output, int num_output) {
     20  const int prec_bits = 9;
     21  const int prec = 1 << prec_bits;
     22  const float inv_prec = (float)(1.0 / prec);
     23  for (int i = 0; i < num_output; i++) {
     24    output[i] = ((int)(output[i] * prec + 0.5)) * inv_prec;
     25  }
     26 }
     27 
     28 // Calculate prediction based on the given input features and neural net config.
     29 // Assume there are no more than NN_MAX_NODES_PER_LAYER nodes in each hidden
     30 // layer.
     31 void av1_nn_predict_c(const float *input_nodes,
     32                      const NN_CONFIG *const nn_config, int reduce_prec,
     33                      float *const output) {
     34  int num_input_nodes = nn_config->num_inputs;
     35  int buf_index = 0;
     36  float buf[2][NN_MAX_NODES_PER_LAYER];
     37 
     38  // Propagate hidden layers.
     39  const int num_layers = nn_config->num_hidden_layers;
     40  assert(num_layers <= NN_MAX_HIDDEN_LAYERS);
     41  for (int layer = 0; layer < num_layers; ++layer) {
     42    const float *layer_weights = nn_config->weights[layer];
     43    const float *layer_bias = nn_config->bias[layer];
     44    float *output_nodes = buf[buf_index];
     45    const int num_output_nodes = nn_config->num_hidden_nodes[layer];
     46    assert(num_output_nodes < NN_MAX_NODES_PER_LAYER);
     47    for (int node = 0; node < num_output_nodes; ++node) {
     48      float val = layer_bias[node];
     49      for (int i = 0; i < num_input_nodes; ++i)
     50        val += layer_weights[node * num_input_nodes + i] * input_nodes[i];
     51      // ReLU as activation function.
     52      val = val > 0.0f ? val : 0.0f;  // Could use AOMMAX().
     53      output_nodes[node] = val;
     54    }
     55    num_input_nodes = num_output_nodes;
     56    input_nodes = output_nodes;
     57    buf_index = 1 - buf_index;
     58  }
     59 
     60  // Final output layer.
     61  const float *layer_weights = nn_config->weights[num_layers];
     62  const float *layer_bias = nn_config->bias[num_layers];
     63  for (int node = 0; node < nn_config->num_outputs; ++node) {
     64    float val = layer_bias[node];
     65    for (int i = 0; i < num_input_nodes; ++i)
     66      val += layer_weights[node * num_input_nodes + i] * input_nodes[i];
     67    output[node] = val;
     68  }
     69  if (reduce_prec) av1_nn_output_prec_reduce(output, nn_config->num_outputs);
     70 }
     71 
     72 #if CONFIG_NN_V2
     73 // Applies the ReLu activation to one fc layer
     74 // output[i] = Max(input[i],0.0f)
     75 static float *nn_relu(const float *input, FC_LAYER *layer) {
     76  for (int i = 0; i < layer->num_outputs; ++i) {
     77    layer->output[i] = AOMMAX(input[i], 0.0f);
     78  }
     79 
     80  return layer->output;
     81 }
     82 
     83 // Applies the Sigmoid activation to one fc layer
     84 // output[i] = 1/(1+exp(input[i]))
     85 static float *nn_sigmoid(const float *input, FC_LAYER *layer) {
     86  for (int i = 0; i < layer->num_outputs; ++i) {
     87    const float tmp = AOMMIN(AOMMAX(input[i], -10.0f), 10.0f);
     88    layer->output[i] = 1.0f / (1.0f + expf(-tmp));
     89  }
     90 
     91  return layer->output;
     92 }
     93 
     94 // Forward prediction in one fc layer, used in function av1_nn_predict_V2
     95 static float *nn_fc_forward(const float *input, FC_LAYER *layer) {
     96  const float *weights = layer->weights;
     97  const float *bias = layer->bias;
     98  assert(layer->num_outputs < NN_MAX_NODES_PER_LAYER);
     99  // fc
    100  for (int node = 0; node < layer->num_outputs; ++node) {
    101    float val = bias[node];
    102    for (int i = 0; i < layer->num_inputs; ++i) val += weights[i] * input[i];
    103    layer->output[node] = val;
    104    weights += layer->num_inputs;
    105  }
    106 
    107  // activation
    108  switch (layer->activation) {
    109    case NONE: return layer->output;
    110    case RELU: return nn_relu(layer->output, layer);
    111    case SIGMOID: return nn_sigmoid(layer->output, layer);
    112    case SOFTSIGN:
    113      assert(0 && "Softsign has not been supported in NN.");  // TO DO
    114      return NULL;
    115    default:
    116      assert(0 && "Unknown activation");  // Unknown activation
    117      return NULL;
    118  }
    119 }
    120 
    121 void av1_nn_predict_v2(const float *feature, NN_CONFIG_V2 *nn_config,
    122                       int reduce_prec, float *output) {
    123  const float *input_nodes = feature;
    124 
    125  // Propagate the layers.
    126  const int num_layers = nn_config->num_hidden_layers;
    127  assert(num_layers <= NN_MAX_HIDDEN_LAYERS);
    128  for (int i = 0; i < num_layers; ++i) {
    129    input_nodes = nn_fc_forward(input_nodes, nn_config->layer + i);
    130    assert(nn_config->layer[i + 1].num_inputs ==
    131           nn_config->layer[i].num_outputs);
    132  }
    133 
    134  // Final layer
    135  input_nodes = nn_fc_forward(input_nodes, nn_config->layer + num_layers);
    136  assert(nn_config->layer[num_layers].num_outputs == nn_config->num_logits);
    137  // Copy the final layer output
    138  memcpy(output, input_nodes, sizeof(*input_nodes) * nn_config->num_logits);
    139  if (reduce_prec) av1_nn_output_prec_reduce(output, nn_config->num_logits);
    140 }
    141 #endif  // CONFIG_NN_V2
    142 
    143 void av1_nn_softmax(const float *input, float *output, int n) {
    144  // Softmax function is invariant to adding the same constant
    145  // to all input values, so we subtract the maximum input to avoid
    146  // possible overflow.
    147  float max_input = input[0];
    148  for (int i = 1; i < n; i++) max_input = AOMMAX(max_input, input[i]);
    149  float sum_out = 0.0f;
    150  for (int i = 0; i < n; i++) {
    151    // Clamp to range [-10.0, 0.0] to prevent FE_UNDERFLOW errors.
    152    const float normalized_input = AOMMAX(input[i] - max_input, -10.0f);
    153    output[i] = expf(normalized_input);
    154    sum_out += output[i];
    155  }
    156  for (int i = 0; i < n; i++) output[i] /= sum_out;
    157 }
    158 
    159 void av1_nn_fast_softmax_16_c(const float *input, float *output) {
    160  const int kNumClasses = 16;
    161  float max_input = input[0];
    162  for (int i = 1; i < kNumClasses; i++) max_input = AOMMAX(max_input, input[i]);
    163  float sum_out = 0.0f;
    164  for (int i = 0; i < kNumClasses; i++) {
    165    // Clamp to range [-10.0, 0.0] to prevent FE_UNDERFLOW errors.
    166    const float normalized_input = AOMMAX(input[i] - max_input, -10.0f);
    167    output[i] = approx_exp(normalized_input);
    168    sum_out += output[i];
    169  }
    170  for (int i = 0; i < kNumClasses; i++) output[i] /= sum_out;
    171 }