Skip to content

Commit ff36efd

Browse files
authored
Address various warnings as errors (#8581)
Address various warnings as errors (#8581) Summary: Pull Request resolved: #8581 Some projects uses more restrictive build options than currently used in ET CI. This means we encountered a number of errors when enabling for a microcontroller. Reviewed By: digantdesai, swolchok Differential Revision: D69139962
1 parent 9841e54 commit ff36efd

39 files changed

+186
-143
lines changed

extension/threadpool/cpuinfo_utils.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <c10/util/irange.h>
910
#include <executorch/extension/threadpool/cpuinfo_utils.h>
1011

1112
#include <fstream>
@@ -84,7 +85,7 @@ bool populate_available_cpu_mids() {
8485
cpu_midrs->resize(num_possible_cores);
8586
const std::string kMidrFilePathPrefix = "/sys/devices/system/cpu/cpu";
8687
const std::string kMidrFilePathSuffix = "/regs/identification/midr_el1";
87-
for (int32_t i = 0; i < num_possible_cores; ++i) {
88+
for (const auto i : c10::irange(num_possible_cores)) {
8889
std::string midr_file_path =
8990
kMidrFilePathPrefix + std::to_string(i) + kMidrFilePathSuffix;
9091
ET_LOG(Info, "Reading file %s", midr_file_path.c_str());
@@ -115,7 +116,7 @@ uint32_t _get_num_performant_cores() {
115116
ET_LOG(Info, "CPU info and manual query on # of cpus dont match.");
116117
return 0;
117118
}
118-
for (int32_t i = 0; i < cpu_midrs->size(); ++i) {
119+
for (const auto i : c10::irange(cpu_midrs->size())) {
119120
uint32_t masked_midr = (*cpu_midrs)[i] & RIVISION_MASK;
120121
switch (masked_midr) {
121122
case CPUINFO_ARM_MIDR_CORTEX_A520:
@@ -148,7 +149,7 @@ uint32_t get_num_performant_cores() {
148149
uint32_t num_possible_cores = cpuinfo_get_processors_count();
149150
uint32_t num_non_performant_core = 0;
150151
if (uarch_count > 1) {
151-
for (int32_t i = 0; i < uarch_count; ++i) {
152+
for (const auto i : c10::irange(uarch_count)) {
152153
const struct cpuinfo_uarch_info* uarch_info = cpuinfo_get_uarch(i);
153154
if (is_non_performant_core(uarch_info)) {
154155
num_non_performant_core += uarch_info->processor_count;

extension/threadpool/targets.bzl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ def define_common_targets():
2323
srcs = _THREADPOOL_SRCS,
2424
deps = [
2525
"//executorch/runtime/core:core",
26+
"//executorch/runtime/core/portable_type/c10/c10:c10",
2627
],
2728
exported_headers = _THREADPOOL_HEADERS,
2829
exported_deps = [

kernels/portable/cpu/op__to_dim_order_copy.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <c10/util/irange.h>
10+
911
#include <executorch/kernels/portable/cpu/scalar_utils.h>
1012
#include <executorch/kernels/portable/cpu/util/copy_ops_util.h>
1113
#include <executorch/runtime/core/exec_aten/util/dim_order_util.h>
@@ -41,7 +43,7 @@ int64_t coordinateToIndexWithDimOrder(
4143

4244
dim_order_to_stride_nocheck(
4345
sizes.data(), dim_order.data(), sizes.size(), strides);
44-
for (size_t i = 0; i < self.dim(); ++i) {
46+
for (const auto i : c10::irange(self.dim())) {
4547
index += cur_indices[i] * strides[i];
4648
}
4749
return index;
@@ -59,7 +61,7 @@ void _to_dim_order_copy_impl(const Tensor& self, Tensor& out) {
5961
for (ssize_t i = 0; i < self.numel(); i++) {
6062
// Update the current indices.
6163
for (ssize_t j = self.dim() - 1; j >= 0; j--) {
62-
if (coordinate[j] + 1 < self.size(j)) {
64+
if (coordinate[j] + 1 < static_cast<size_t>(self.size(j))) {
6365
coordinate[j]++;
6466
break;
6567
} else {

kernels/portable/cpu/op_amax.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <c10/util/irange.h>
910
#include <cmath>
1011

1112
#include <executorch/kernels/portable/cpu/util/reduce_util.h>
@@ -44,7 +45,7 @@ Tensor& amax_out(
4445

4546
ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, "amax.out", CTYPE, [&]() {
4647
CTYPE* out_data = out.mutable_data_ptr<CTYPE>();
47-
for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {
48+
for (const auto out_ix : c10::irange(out.numel())) {
4849
out_data[out_ix] = reduce_over_dim_list<CTYPE>(
4950
[](CTYPE v, CTYPE max_v) {
5051
return std::isnan(v) || v > max_v ? v : max_v;

kernels/portable/cpu/op_amin.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
* This source code is licensed under the BSD-style license found in the
66
* LICENSE file in the root directory of this source tree.
77
*/
8-
8+
#include <c10/util/irange.h>
99
#include <cmath>
1010

1111
#include <executorch/kernels/portable/cpu/util/reduce_util.h>
@@ -44,7 +44,7 @@ Tensor& amin_out(
4444

4545
ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, "amin.out", CTYPE, [&]() {
4646
CTYPE* out_data = out.mutable_data_ptr<CTYPE>();
47-
for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {
47+
for (const auto out_ix : c10::irange(out.numel())) {
4848
out_data[out_ix] = reduce_over_dim_list<CTYPE>(
4949
[](CTYPE v, CTYPE min_v) {
5050
return std::isnan(v) || v < min_v ? v : min_v;

kernels/portable/cpu/op_argmax.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <c10/util/irange.h>
910
#include <cmath>
1011
#include <tuple>
1112

@@ -46,7 +47,7 @@ Tensor& argmax_out(
4647
ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, "argmax.out", CTYPE, [&] {
4748
long* out_data = out.mutable_data_ptr<long>();
4849

49-
for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {
50+
for (const auto out_ix : c10::irange(out.numel())) {
5051
std::tuple<CTYPE, long> acc = reduce_over_dim<CTYPE>(
5152
[](CTYPE v, long ix, CTYPE acc_val, long acc_ix) {
5253
if (!std::isnan(acc_val) && (std::isnan(v) || v > acc_val)) {

kernels/portable/cpu/op_argmin.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <c10/util/irange.h>
910
#include <cmath>
1011
#include <tuple>
1112

@@ -46,7 +47,7 @@ Tensor& argmin_out(
4647
ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, "argmin.out", CTYPE, [&] {
4748
long* out_data = out.mutable_data_ptr<long>();
4849

49-
for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {
50+
for (const auto out_ix : c10::irange(out.numel())) {
5051
std::tuple<CTYPE, long> acc = reduce_over_dim<CTYPE>(
5152
[](CTYPE v, long ix, CTYPE acc_val, long acc_ix) {
5253
if (!std::isnan(acc_val) && (std::isnan(v) || v < acc_val)) {

kernels/portable/cpu/op_expand_copy.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,8 @@ Tensor& expand_copy_out(
9696

9797
ET_KERNEL_CHECK(
9898
ctx,
99-
repeat_tensor(self, {repeats, repeats_size}, out) == Error::Ok,
99+
repeat_tensor(self, makeArrayRef(repeats, repeats_size), out) ==
100+
Error::Ok,
100101
InvalidArgument,
101102
out);
102103

kernels/portable/cpu/util/activation_ops_util.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ bool check_glu_args(const Tensor& in, int64_t dim, Tensor& out) {
3131
ET_LOG_AND_RETURN_IF_FALSE(tensor_is_floating_type(in));
3232

3333
const size_t non_negative_dim = dim < 0 ? dim + in.dim() : dim;
34-
const size_t dim_size = in.size(non_negative_dim);
34+
const ssize_t dim_size = in.size(non_negative_dim);
3535

3636
ET_CHECK_OR_RETURN_FALSE(
3737
dim_size % 2 == 0,

kernels/portable/cpu/util/broadcast_util.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <executorch/kernels/portable/cpu/util/broadcast_util.h>
910
#include <executorch/kernels/portable/cpu/util/repeat_util.h>
1011
#include <executorch/runtime/core/exec_aten/exec_aten.h>
1112
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
@@ -274,7 +275,7 @@ void delinearize_index(
274275
size_t* out_indexes,
275276
const size_t out_indexes_len) {
276277
ET_CHECK(shape.size() <= out_indexes_len);
277-
for (auto i = 0; i < shape.size(); ++i) {
278+
for (size_t i = 0; i < shape.size(); ++i) {
278279
auto dim = shape.size() - 1 - i;
279280
auto dim_size = shape[dim];
280281
out_indexes[dim] = linear_index % dim_size;
@@ -304,7 +305,8 @@ size_t linearize_access_indexes(
304305
size_t linear_index = 0;
305306
for (size_t i = 0; i < indexes_broadcast_from.size(); ++i) {
306307
// If this dimension is broadcasted, add zero to the linear address.
307-
if (indexes_broadcast_from[i] >= broadcast_from_shape[i]) {
308+
if (indexes_broadcast_from[i] >=
309+
static_cast<size_t>(broadcast_from_shape[i])) {
308310
ET_CHECK_MSG(
309311
broadcast_from_shape[i] == 1,
310312
"Expected dim size == 1 if broadcasted, but actual dim size is %zu",

0 commit comments

Comments
 (0)