tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

0002-Replace-gsl-narrow-by-static_cast-as-the-former-can-.patch (10455B)


      1 From 0f02f0a222923dde8f7d81a8f2667dc6c15e8330 Mon Sep 17 00:00:00 2001
      2 From: serge-sans-paille <sguelton@mozilla.com>
      3 Date: Tue, 13 May 2025 12:13:22 +0200
      4 Subject: [PATCH 2/5] Replace gsl::narrow by static_cast, as the former can
      5 throw exception
      6 
      7 ---
      8 .../core/framework/kernel_type_str_resolver.cc       |  2 +-
      9 .../graph/runtime_optimization_record_container.cc   |  2 +-
     10 onnxruntime/core/optimizer/attention_fusion.cc       |  8 ++++----
     11 .../core/optimizer/embed_layer_norm_fusion.cc        |  6 +++---
     12 onnxruntime/core/optimizer/nchwc_transformer.cc      | 12 ++++++------
     13 onnxruntime/core/optimizer/utils.cc                  |  4 ++--
     14 6 files changed, 17 insertions(+), 17 deletions(-)
     15 
     16 diff --git a/onnxruntime/core/framework/kernel_type_str_resolver.cc b/onnxruntime/core/framework/kernel_type_str_resolver.cc
     17 index 3142f94f28..b64ed160da 100644
     18 --- a/onnxruntime/core/framework/kernel_type_str_resolver.cc
     19 +++ b/onnxruntime/core/framework/kernel_type_str_resolver.cc
     20 @@ -167,7 +167,7 @@ Status KernelTypeStrResolver::SaveToOrtFormat(
     21         auto fbs_arg = fbs::CreateArgTypeAndIndex(
     22             builder,
     23             arg.first == ArgType::kInput ? fbs::ArgType::INPUT : fbs::ArgType::OUTPUT,
     24 -            gsl::narrow<uint32_t>(arg.second));
     25 +            static_cast<uint32_t>(arg.second));
     26         fbs_args.push_back(fbs_arg);
     27       }
     28 
     29 diff --git a/onnxruntime/core/graph/runtime_optimization_record_container.cc b/onnxruntime/core/graph/runtime_optimization_record_container.cc
     30 index 2d0e1076ee..36a0f37a27 100644
     31 --- a/onnxruntime/core/graph/runtime_optimization_record_container.cc
     32 +++ b/onnxruntime/core/graph/runtime_optimization_record_container.cc
     33 @@ -57,7 +57,7 @@ static Status SaveRuntimeOptimizationRecordToOrtFormat(
     34 
     35   const auto fbs_node_indices = builder.CreateVector<uint32_t>(
     36       nodes_to_optimize_indices.nodes.size(),
     37 -      [&](size_t i) { return gsl::narrow<uint32_t>(nodes_to_optimize_indices.nodes[i]); });
     38 +      [&](size_t i) { return static_cast<uint32_t>(nodes_to_optimize_indices.nodes[i]); });
     39 
     40   const auto fbs_nodes_to_optimize =
     41       fbs::CreateNodesToOptimizeIndices(builder,
     42 diff --git a/onnxruntime/core/optimizer/attention_fusion.cc b/onnxruntime/core/optimizer/attention_fusion.cc
     43 index ff8943de79..6186fb542f 100644
     44 --- a/onnxruntime/core/optimizer/attention_fusion.cc
     45 +++ b/onnxruntime/core/optimizer/attention_fusion.cc
     46 @@ -121,25 +121,25 @@ static NodeArg& MergeQkvWeights(Graph& graph, int64_t hidden_size,
     47     const float* k_weight = k_initializer.data<float>();
     48     const float* v_weight = v_initializer.data<float>();
     49     std::vector<float> result;
     50 -    result.reserve(gsl::narrow<size_t>(element_count));
     51 +    result.reserve(static_cast<size_t>(element_count));
     52     if (is_matmul) {
     53       MergeMatMulWeights<float>(q_weight, k_weight, v_weight, result, hidden_size);
     54     } else {
     55       MergeWeights<float>(q_weight, k_weight, v_weight, result, hidden_size);
     56     }
     57 -    utils::SetRawDataInTensorProto(initializer, result.data(), gsl::narrow<size_t>(element_count) * sizeof(float));
     58 +    utils::SetRawDataInTensorProto(initializer, result.data(), static_cast<size_t>(element_count) * sizeof(float));
     59   } else {  // data_type == ONNX_NAMESPACE::TensorProto_DataType_FLOAT16
     60     const MLFloat16* q_weight = q_initializer.data<MLFloat16>();
     61     const MLFloat16* k_weight = k_initializer.data<MLFloat16>();
     62     const MLFloat16* v_weight = v_initializer.data<MLFloat16>();
     63     std::vector<MLFloat16> result;
     64 -    result.reserve(gsl::narrow<size_t>(element_count));
     65 +    result.reserve(static_cast<size_t>(element_count));
     66     if (is_matmul) {
     67       MergeMatMulWeights<MLFloat16>(q_weight, k_weight, v_weight, result, hidden_size);
     68     } else {
     69       MergeWeights<MLFloat16>(q_weight, k_weight, v_weight, result, hidden_size);
     70     }
     71 -    utils::SetRawDataInTensorProto(initializer, result.data(), gsl::narrow<size_t>(element_count) * sizeof(MLFloat16));
     72 +    utils::SetRawDataInTensorProto(initializer, result.data(), static_cast<size_t>(element_count) * sizeof(MLFloat16));
     73   }
     74 
     75   return graph_utils::AddInitializer(graph, initializer);
     76 diff --git a/onnxruntime/core/optimizer/embed_layer_norm_fusion.cc b/onnxruntime/core/optimizer/embed_layer_norm_fusion.cc
     77 index 103e72072f..d91529273e 100644
     78 --- a/onnxruntime/core/optimizer/embed_layer_norm_fusion.cc
     79 +++ b/onnxruntime/core/optimizer/embed_layer_norm_fusion.cc
     80 @@ -431,7 +431,7 @@ template <typename T>
     81 bool CheckEmbeddingData(const T* data, int64_t batch_size, int64_t element_count) {
     82   // check that all batches has same data.
     83   size_t data_length = SafeInt<size_t>(batch_size) * element_count;
     84 -  for (size_t i = gsl::narrow<size_t>(element_count); i < data_length; i++) {
     85 +  for (size_t i = static_cast<size_t>(element_count); i < data_length; i++) {
     86     if (data[i] != data[i % element_count]) {
     87       return false;
     88     }
     89 @@ -465,13 +465,13 @@ static NodeArg* ExtractEmbedding(Graph& graph,
     90     if (!CheckEmbeddingData(data, batch_size, element_count)) {
     91       return nullptr;
     92     }
     93 -    utils::SetRawDataInTensorProto(initializer, data, gsl::narrow<size_t>(element_count) * sizeof(float));
     94 +    utils::SetRawDataInTensorProto(initializer, data, static_cast<size_t>(element_count) * sizeof(float));
     95   } else {  // data_type == ONNX_NAMESPACE::TensorProto_DataType_FLOAT16
     96     const MLFloat16* data = old_initializer.data<MLFloat16>();
     97     if (!CheckEmbeddingData(data, batch_size, element_count)) {
     98       return nullptr;
     99     }
    100 -    utils::SetRawDataInTensorProto(initializer, data, gsl::narrow<size_t>(element_count) * sizeof(MLFloat16));
    101 +    utils::SetRawDataInTensorProto(initializer, data, static_cast<size_t>(element_count) * sizeof(MLFloat16));
    102   }
    103 
    104   NodeArg& node_arg = graph_utils::AddInitializer(graph, initializer);
    105 diff --git a/onnxruntime/core/optimizer/nchwc_transformer.cc b/onnxruntime/core/optimizer/nchwc_transformer.cc
    106 index 46f306b92b..436f16661f 100644
    107 --- a/onnxruntime/core/optimizer/nchwc_transformer.cc
    108 +++ b/onnxruntime/core/optimizer/nchwc_transformer.cc
    109 @@ -415,7 +415,7 @@ void NchwcTransformerImpl::TransformConv(Node& node) {
    110     for (size_t i = 2; i < 4; i++) {
    111       reordered_filter_size *= conv_W_dims[i];
    112     }
    113 -    InlinedVector<float> reordered_filter(gsl::narrow<size_t>(reordered_filter_size));
    114 +    InlinedVector<float> reordered_filter(static_cast<size_t>(reordered_filter_size));
    115 
    116     // Reorder the weights tensor statically.
    117     if (reorder_filter_OIHWBo) {
    118 @@ -451,7 +451,7 @@ void NchwcTransformerImpl::TransformConv(Node& node) {
    119     } else {
    120       Initializer conv_B{*conv_B_tensor_proto, graph_.ModelPath()};
    121 
    122 -      InlinedVector<float> aligned_bias(gsl::narrow<size_t>(nchwc_output_channels));
    123 +      InlinedVector<float> aligned_bias(static_cast<size_t>(nchwc_output_channels));
    124       ORT_ENFORCE(output_channels <= nchwc_output_channels, "Buffer overflow");
    125       std::copy_n(conv_B.data<float>(), output_channels, aligned_bias.data());
    126 
    127 @@ -460,7 +460,7 @@ void NchwcTransformerImpl::TransformConv(Node& node) {
    128       nchwc_conv_B_tensor_proto.set_data_type(ONNX_NAMESPACE::TensorProto_DataType_FLOAT);
    129       nchwc_conv_B_tensor_proto.set_name(graph_.GenerateNodeArgName("reorder"));
    130       utils::SetRawDataInTensorProto(nchwc_conv_B_tensor_proto, aligned_bias.data(),
    131 -                                     gsl::narrow<size_t>(nchwc_output_channels) * sizeof(float));
    132 +                                     static_cast<size_t>(nchwc_output_channels) * sizeof(float));
    133 
    134       nchwc_conv_B_tensor_proto.add_dims(nchwc_output_channels);
    135 
    136 @@ -878,7 +878,7 @@ void NchwcTransformerImpl::TransformBatchNormalization(Node& node) {
    137   const size_t nchwc_block_size = MlasNchwcGetBlockSize();
    138   const int64_t nchwc_channels = (channels + nchwc_block_size - 1) & ~(nchwc_block_size - 1);
    139 
    140 -  InlinedVector<float> padded_buffer(gsl::narrow<size_t>(nchwc_channels));
    141 +  InlinedVector<float> padded_buffer(static_cast<size_t>(nchwc_channels));
    142 
    143   std::copy_n(bn_scale.data<float>(), channels, padded_buffer.data());
    144 
    145 @@ -886,7 +886,7 @@ void NchwcTransformerImpl::TransformBatchNormalization(Node& node) {
    146   nchwc_conv_W_tensor_proto.set_data_type(ONNX_NAMESPACE::TensorProto_DataType_FLOAT);
    147   nchwc_conv_W_tensor_proto.set_name(graph_.GenerateNodeArgName("bn_scale"));
    148   utils::SetRawDataInTensorProto(nchwc_conv_W_tensor_proto, padded_buffer.data(),
    149 -                                 gsl::narrow<size_t>(nchwc_channels) * sizeof(float));
    150 +                                 static_cast<size_t>(nchwc_channels) * sizeof(float));
    151   nchwc_conv_W_tensor_proto.add_dims(nchwc_channels);
    152   nchwc_conv_W_tensor_proto.add_dims(1);
    153   nchwc_conv_W_tensor_proto.add_dims(1);
    154 @@ -900,7 +900,7 @@ void NchwcTransformerImpl::TransformBatchNormalization(Node& node) {
    155   nchwc_conv_B_tensor_proto.set_data_type(ONNX_NAMESPACE::TensorProto_DataType_FLOAT);
    156   nchwc_conv_B_tensor_proto.set_name(graph_.GenerateNodeArgName("bn_B"));
    157   utils::SetRawDataInTensorProto(nchwc_conv_B_tensor_proto, padded_buffer.data(),
    158 -                                 gsl::narrow<size_t>(nchwc_channels) * sizeof(float));
    159 +                                 static_cast<size_t>(nchwc_channels) * sizeof(float));
    160   nchwc_conv_B_tensor_proto.add_dims(nchwc_channels);
    161 
    162   auto* nchwc_conv_B_arg = &graph_utils::AddInitializer(graph_, nchwc_conv_B_tensor_proto);
    163 diff --git a/onnxruntime/core/optimizer/utils.cc b/onnxruntime/core/optimizer/utils.cc
    164 index c7e11de348..047d095aaf 100644
    165 --- a/onnxruntime/core/optimizer/utils.cc
    166 +++ b/onnxruntime/core/optimizer/utils.cc
    167 @@ -175,11 +175,11 @@ bool AppendTensorFromInitializer(const Graph& graph, const NodeArg& input_arg, I
    168   const auto data_type = tensor_proto->data_type();
    169   if (data_type == ONNX_NAMESPACE::TensorProto_DataType_INT64) {
    170     const int64_t* val = init_const.data<int64_t>();
    171 -    data.reserve(data.size() + gsl::narrow<size_t>(init_const.size()));
    172 +    data.reserve(data.size() + static_cast<size_t>(init_const.size()));
    173     data.insert(data.end(), val, val + init_const.size());
    174   } else if (data_type == ONNX_NAMESPACE::TensorProto_DataType_INT32) {
    175     const int32_t* val = init_const.data<int32_t>();
    176 -    data.reserve(data.size() + gsl::narrow<size_t>(init_const.size()));
    177 +    data.reserve(data.size() + static_cast<size_t>(init_const.size()));
    178     for (size_t i = 0; i < init_const.size(); i++) {
    179       data.push_back(static_cast<int64_t>(val[i]));
    180     }
    181 -- 
    182 2.49.0