From 3ec466fa541be49e0f1f9f6ddac0e1c5216e4fbc Mon Sep 17 00:00:00 2001 From: Esun Kim Date: Tue, 19 Aug 2025 09:35:07 -0700 Subject: [PATCH] clang-format some existing files --- signal/micro/kernels/stacker_test.cc | 18 +- .../compiler/mlir/lite/schema/schema_utils.h | 4 +- .../microfrontend/lib/filterbank_test.cc | 34 +- .../microfrontend/lib/noise_reduction_test.cc | 10 +- .../lib/pcan_gain_control_test.cc | 5 +- .../internal/reference/integer_ops/conv.h | 1 - .../lite/micro/compression/metadata_saved.h | 399 ++++++++++++------ .../cortex_m_corstone_300/system_setup.cc | 4 +- .../arc_mli/mli_function_specializations.h | 38 +- .../micro/kernels/arc_mli/mli_interface.h | 2 +- .../lite/micro/kernels/circular_buffer.cc | 4 +- .../lite/micro/kernels/reduce_common.cc | 2 +- .../micro/kernels/resize_bilinear_test.cc | 12 +- .../kernels/resize_nearest_neighbor_test.cc | 18 +- .../micro/kernels/xtensa/lstm_eval_hifi.cc | 20 +- 15 files changed, 368 insertions(+), 203 deletions(-) diff --git a/signal/micro/kernels/stacker_test.cc b/signal/micro/kernels/stacker_test.cc index d236c7539a8..e45eab39b03 100644 --- a/signal/micro/kernels/stacker_test.cc +++ b/signal/micro/kernels/stacker_test.cc @@ -32,15 +32,15 @@ class StackerKernelRunner { StackerKernelRunner(int* input_dims_data, const int16_t* input_data, int* output_dims_data, int16_t* output_data, int* output_ready_dims_data, bool* ouput_ready_data) - : tensors_{testing::CreateTensor( - input_data, - tflite::testing::IntArrayFromInts(input_dims_data)), - testing::CreateTensor( - output_data, - tflite::testing::IntArrayFromInts(output_dims_data)), - testing::CreateTensor( - ouput_ready_data, - testing::IntArrayFromInts(output_ready_dims_data))}, + : tensors_{ + testing::CreateTensor( + input_data, tflite::testing::IntArrayFromInts(input_dims_data)), + testing::CreateTensor( + output_data, + tflite::testing::IntArrayFromInts(output_dims_data)), + testing::CreateTensor( + ouput_ready_data, + testing::IntArrayFromInts(output_ready_dims_data))}, inputs_array_{testing::IntArrayFromInts(inputs_array_data_)}, outputs_array_{testing::IntArrayFromInts(outputs_array_data_)}, kernel_runner_{*registration_, tensors_, kTensorsSize, diff --git a/tensorflow/compiler/mlir/lite/schema/schema_utils.h b/tensorflow/compiler/mlir/lite/schema/schema_utils.h index 7498aa02ebe..4a71a878430 100644 --- a/tensorflow/compiler/mlir/lite/schema/schema_utils.h +++ b/tensorflow/compiler/mlir/lite/schema/schema_utils.h @@ -24,9 +24,9 @@ namespace tflite { // problem. The new builtin operator will be assigned to the extended builtin // code field in the flatbuffer schema. Those methods helps to hide builtin code // details. -BuiltinOperator GetBuiltinCode(const OperatorCode *op_code); +BuiltinOperator GetBuiltinCode(const OperatorCode* op_code); -BuiltinOperator GetBuiltinCode(const OperatorCodeT *op_code); +BuiltinOperator GetBuiltinCode(const OperatorCodeT* op_code); } // namespace tflite diff --git a/tensorflow/lite/experimental/microfrontend/lib/filterbank_test.cc b/tensorflow/lite/experimental/microfrontend/lib/filterbank_test.cc index 050d43359b6..350a34d3783 100644 --- a/tensorflow/lite/experimental/microfrontend/lib/filterbank_test.cc +++ b/tensorflow/lite/experimental/microfrontend/lib/filterbank_test.cc @@ -76,8 +76,9 @@ TF_LITE_MICRO_TEST(FilterbankTest_CheckChannelFrequencyStarts) { kSampleRate, kSpectrumSize)); const int16_t expected[] = {0, 4, 8}; - TF_LITE_MICRO_EXPECT_EQ(state.num_channels + 1, - static_cast(sizeof(expected) / sizeof(expected[0]))); + TF_LITE_MICRO_EXPECT_EQ( + state.num_channels + 1, + static_cast(sizeof(expected) / sizeof(expected[0]))); int i; for (i = 0; i <= state.num_channels; ++i) { TF_LITE_MICRO_EXPECT_EQ(state.channel_frequency_starts[i], expected[i]); @@ -93,8 +94,9 @@ TF_LITE_MICRO_TEST(FilterbankTest_CheckChannelWeightStarts) { kSampleRate, kSpectrumSize)); const int16_t expected[] = {0, 8, 16}; - TF_LITE_MICRO_EXPECT_EQ(state.num_channels + 1, - static_cast(sizeof(expected) / sizeof(expected[0]))); + TF_LITE_MICRO_EXPECT_EQ( + state.num_channels + 1, + static_cast(sizeof(expected) / sizeof(expected[0]))); int i; for (i = 0; i <= state.num_channels; ++i) { TF_LITE_MICRO_EXPECT_EQ(state.channel_weight_starts[i], expected[i]); @@ -110,8 +112,9 @@ TF_LITE_MICRO_TEST(FilterbankTest_CheckChannelWidths) { kSampleRate, kSpectrumSize)); const int16_t expected[] = {8, 8, 8}; - TF_LITE_MICRO_EXPECT_EQ(state.num_channels + 1, - static_cast(sizeof(expected) / sizeof(expected[0]))); + TF_LITE_MICRO_EXPECT_EQ( + state.num_channels + 1, + static_cast(sizeof(expected) / sizeof(expected[0]))); int i; for (i = 0; i <= state.num_channels; ++i) { TF_LITE_MICRO_EXPECT_EQ(state.channel_widths[i], expected[i]); @@ -129,9 +132,10 @@ TF_LITE_MICRO_TEST(FilterbankTest_CheckWeights) { const int16_t expected[] = {0, 3277, 2217, 1200, 222, 0, 0, 0, 0, 3376, 2468, 1591, 744, 0, 0, 0, 0, 4020, 3226, 2456, 1708, 983, 277, 0}; - TF_LITE_MICRO_EXPECT_EQ(state.channel_weight_starts[state.num_channels] + - state.channel_widths[state.num_channels], - static_cast(sizeof(expected) / sizeof(expected[0]))); + TF_LITE_MICRO_EXPECT_EQ( + state.channel_weight_starts[state.num_channels] + + state.channel_widths[state.num_channels], + static_cast(sizeof(expected) / sizeof(expected[0]))); for (size_t i = 0; i < sizeof(expected) / sizeof(expected[0]); ++i) { TF_LITE_MICRO_EXPECT_EQ(state.weights[i], expected[i]); } @@ -148,9 +152,10 @@ TF_LITE_MICRO_TEST(FilterbankTest_CheckUnweights) { const int16_t expected[] = {0, 819, 1879, 2896, 3874, 0, 0, 0, 0, 720, 1628, 2505, 3352, 0, 0, 0, 0, 76, 870, 1640, 2388, 3113, 3819, 0}; - TF_LITE_MICRO_EXPECT_EQ(state.channel_weight_starts[state.num_channels] + - state.channel_widths[state.num_channels], - static_cast(sizeof(expected) / sizeof(expected[0]))); + TF_LITE_MICRO_EXPECT_EQ( + state.channel_weight_starts[state.num_channels] + + state.channel_widths[state.num_channels], + static_cast(sizeof(expected) / sizeof(expected[0]))); for (size_t i = 0; i < sizeof(expected) / sizeof(expected[0]); ++i) { TF_LITE_MICRO_EXPECT_EQ(state.unweights[i], expected[i]); } @@ -204,8 +209,9 @@ TF_LITE_MICRO_TEST(FilterbankTest_CheckSqrt) { uint32_t* scaled_filterbank = FilterbankSqrt(&state, kScaleShift); const uint32_t expected[] = {247311, 508620}; - TF_LITE_MICRO_EXPECT_EQ(state.num_channels, - static_cast(sizeof(expected) / sizeof(expected[0]))); + TF_LITE_MICRO_EXPECT_EQ( + state.num_channels, + static_cast(sizeof(expected) / sizeof(expected[0]))); int i; for (i = 0; i < state.num_channels; ++i) { TF_LITE_MICRO_EXPECT_EQ(scaled_filterbank[i], expected[i]); diff --git a/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_test.cc b/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_test.cc index cdfb4c97248..e2941010864 100644 --- a/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_test.cc +++ b/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_test.cc @@ -48,8 +48,9 @@ TF_LITE_MICRO_TEST(NoiseReductionTest_TestNoiseReductionEstimate) { NoiseReductionApply(&state, signal); const uint32_t expected[] = {6321887, 31248341}; - TF_LITE_MICRO_EXPECT_EQ(state.num_channels, - static_cast(sizeof(expected) / sizeof(expected[0]))); + TF_LITE_MICRO_EXPECT_EQ( + state.num_channels, + static_cast(sizeof(expected) / sizeof(expected[0]))); int i; for (i = 0; i < state.num_channels; ++i) { TF_LITE_MICRO_EXPECT_EQ(state.estimate[i], expected[i]); @@ -68,8 +69,9 @@ TF_LITE_MICRO_TEST(NoiseReductionTest_TestNoiseReduction) { NoiseReductionApply(&state, signal); const uint32_t expected[] = {241137, 478104}; - TF_LITE_MICRO_EXPECT_EQ(state.num_channels, - static_cast(sizeof(expected) / sizeof(expected[0]))); + TF_LITE_MICRO_EXPECT_EQ( + state.num_channels, + static_cast(sizeof(expected) / sizeof(expected[0]))); int i; for (i = 0; i < state.num_channels; ++i) { TF_LITE_MICRO_EXPECT_EQ(signal[i], expected[i]); diff --git a/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_test.cc b/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_test.cc index 21f9aef6c04..ead6cccbb58 100644 --- a/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_test.cc +++ b/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_test.cc @@ -52,8 +52,9 @@ TF_LITE_MICRO_TEST(PcanGainControlTest_TestPcanGainControl) { PcanGainControlApply(&state, signal); const uint32_t expected[] = {3578, 1533}; - TF_LITE_MICRO_EXPECT_EQ(state.num_channels, - static_cast(sizeof(expected) / sizeof(expected[0]))); + TF_LITE_MICRO_EXPECT_EQ( + state.num_channels, + static_cast(sizeof(expected) / sizeof(expected[0]))); int i; for (i = 0; i < state.num_channels; ++i) { TF_LITE_MICRO_EXPECT_EQ(signal[i], expected[i]); diff --git a/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h b/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h index eac00576a25..5638c7eee2f 100644 --- a/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +++ b/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h @@ -135,7 +135,6 @@ inline void ConvPerChannel( } } - // Fixed-point per-channel-quantization convolution reference kernel. // 16-bit data and 8-bit filter template diff --git a/tensorflow/lite/micro/compression/metadata_saved.h b/tensorflow/lite/micro/compression/metadata_saved.h index fe23fe42d48..b022e383af6 100644 --- a/tensorflow/lite/micro/compression/metadata_saved.h +++ b/tensorflow/lite/micro/compression/metadata_saved.h @@ -1,6 +1,5 @@ // automatically generated by the FlatBuffers compiler, do not modify - #ifndef FLATBUFFERS_GENERATED_METADATA_TFLITE_MICRO_COMPRESSION_H_ #define FLATBUFFERS_GENERATED_METADATA_TFLITE_MICRO_COMPRESSION_H_ @@ -9,9 +8,9 @@ // Ensure the included flatbuffers.h is the same version as when this file was // generated, otherwise it may not be compatible. static_assert(FLATBUFFERS_VERSION_MAJOR == 23 && - FLATBUFFERS_VERSION_MINOR == 5 && - FLATBUFFERS_VERSION_REVISION == 26, - "Non-compatible flatbuffers version included"); + FLATBUFFERS_VERSION_MINOR == 5 && + FLATBUFFERS_VERSION_REVISION == 26, + "Non-compatible flatbuffers version included"); namespace tflite { namespace micro { @@ -32,11 +31,12 @@ struct LutTensorT; struct MetadataT : public ::flatbuffers::NativeTable { typedef Metadata TableType; uint32_t schema_version = 1; - std::vector> subgraphs{}; + std::vector> + subgraphs{}; MetadataT() = default; - MetadataT(const MetadataT &o); + MetadataT(const MetadataT& o); MetadataT(MetadataT&&) FLATBUFFERS_NOEXCEPT = default; - MetadataT &operator=(MetadataT o) FLATBUFFERS_NOEXCEPT; + MetadataT& operator=(MetadataT o) FLATBUFFERS_NOEXCEPT; }; struct Metadata FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { @@ -49,34 +49,45 @@ struct Metadata FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { uint32_t schema_version() const { return GetField(VT_SCHEMA_VERSION, 1); } - const ::flatbuffers::Vector<::flatbuffers::Offset> *subgraphs() const { - return GetPointer> *>(VT_SUBGRAPHS); + const ::flatbuffers::Vector< + ::flatbuffers::Offset>* + subgraphs() const { + return GetPointer>*>( + VT_SUBGRAPHS); } - bool Verify(::flatbuffers::Verifier &verifier) const { + bool Verify(::flatbuffers::Verifier& verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_SCHEMA_VERSION, 4) && VerifyOffset(verifier, VT_SUBGRAPHS) && verifier.VerifyVector(subgraphs()) && - verifier.VerifyVectorOfTables(subgraphs()) && - verifier.EndTable(); + verifier.VerifyVectorOfTables(subgraphs()) && verifier.EndTable(); } - MetadataT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MetadataT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; - static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MetadataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + MetadataT* UnPack( + const ::flatbuffers::resolver_function_t* _resolver = nullptr) const; + void UnPackTo( + MetadataT* _o, + const ::flatbuffers::resolver_function_t* _resolver = nullptr) const; + static ::flatbuffers::Offset Pack( + ::flatbuffers::FlatBufferBuilder& _fbb, const MetadataT* _o, + const ::flatbuffers::rehasher_function_t* _rehasher = nullptr); }; struct MetadataBuilder { typedef Metadata Table; - ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::FlatBufferBuilder& fbb_; ::flatbuffers::uoffset_t start_; void add_schema_version(uint32_t schema_version) { fbb_.AddElement(Metadata::VT_SCHEMA_VERSION, schema_version, 1); } - void add_subgraphs(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> subgraphs) { + void add_subgraphs( + ::flatbuffers::Offset<::flatbuffers::Vector< + ::flatbuffers::Offset>> + subgraphs) { fbb_.AddOffset(Metadata::VT_SUBGRAPHS, subgraphs); } - explicit MetadataBuilder(::flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { + explicit MetadataBuilder(::flatbuffers::FlatBufferBuilder& _fbb) + : fbb_(_fbb) { start_ = fbb_.StartTable(); } ::flatbuffers::Offset Finish() { @@ -87,9 +98,10 @@ struct MetadataBuilder { }; inline ::flatbuffers::Offset CreateMetadata( - ::flatbuffers::FlatBufferBuilder &_fbb, - uint32_t schema_version = 1, - ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> subgraphs = 0) { + ::flatbuffers::FlatBufferBuilder& _fbb, uint32_t schema_version = 1, + ::flatbuffers::Offset<::flatbuffers::Vector< + ::flatbuffers::Offset>> + subgraphs = 0) { MetadataBuilder builder_(_fbb); builder_.add_subgraphs(subgraphs); builder_.add_schema_version(schema_version); @@ -97,25 +109,31 @@ inline ::flatbuffers::Offset CreateMetadata( } inline ::flatbuffers::Offset CreateMetadataDirect( - ::flatbuffers::FlatBufferBuilder &_fbb, - uint32_t schema_version = 1, - const std::vector<::flatbuffers::Offset> *subgraphs = nullptr) { - auto subgraphs__ = subgraphs ? _fbb.CreateVector<::flatbuffers::Offset>(*subgraphs) : 0; - return tflite::micro::compression::CreateMetadata( - _fbb, - schema_version, - subgraphs__); + ::flatbuffers::FlatBufferBuilder& _fbb, uint32_t schema_version = 1, + const std::vector<::flatbuffers::Offset< + tflite::micro::compression::Subgraph>>* subgraphs = nullptr) { + auto subgraphs__ = + subgraphs + ? _fbb.CreateVector< + ::flatbuffers::Offset>( + *subgraphs) + : 0; + return tflite::micro::compression::CreateMetadata(_fbb, schema_version, + subgraphs__); } -::flatbuffers::Offset CreateMetadata(::flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateMetadata( + ::flatbuffers::FlatBufferBuilder& _fbb, const MetadataT* _o, + const ::flatbuffers::rehasher_function_t* _rehasher = nullptr); struct SubgraphT : public ::flatbuffers::NativeTable { typedef Subgraph TableType; - std::vector> lut_tensors{}; + std::vector> + lut_tensors{}; SubgraphT() = default; - SubgraphT(const SubgraphT &o); + SubgraphT(const SubgraphT& o); SubgraphT(SubgraphT&&) FLATBUFFERS_NOEXCEPT = default; - SubgraphT &operator=(SubgraphT o) FLATBUFFERS_NOEXCEPT; + SubgraphT& operator=(SubgraphT o) FLATBUFFERS_NOEXCEPT; }; struct Subgraph FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { @@ -124,30 +142,41 @@ struct Subgraph FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_LUT_TENSORS = 4 }; - const ::flatbuffers::Vector<::flatbuffers::Offset> *lut_tensors() const { - return GetPointer> *>(VT_LUT_TENSORS); + const ::flatbuffers::Vector< + ::flatbuffers::Offset>* + lut_tensors() const { + return GetPointer>*>( + VT_LUT_TENSORS); } - bool Verify(::flatbuffers::Verifier &verifier) const { + bool Verify(::flatbuffers::Verifier& verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_LUT_TENSORS) && verifier.VerifyVector(lut_tensors()) && - verifier.VerifyVectorOfTables(lut_tensors()) && - verifier.EndTable(); + verifier.VerifyVectorOfTables(lut_tensors()) && verifier.EndTable(); } - SubgraphT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SubgraphT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; - static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SubgraphT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + SubgraphT* UnPack( + const ::flatbuffers::resolver_function_t* _resolver = nullptr) const; + void UnPackTo( + SubgraphT* _o, + const ::flatbuffers::resolver_function_t* _resolver = nullptr) const; + static ::flatbuffers::Offset Pack( + ::flatbuffers::FlatBufferBuilder& _fbb, const SubgraphT* _o, + const ::flatbuffers::rehasher_function_t* _rehasher = nullptr); }; struct SubgraphBuilder { typedef Subgraph Table; - ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::FlatBufferBuilder& fbb_; ::flatbuffers::uoffset_t start_; - void add_lut_tensors(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> lut_tensors) { + void add_lut_tensors( + ::flatbuffers::Offset<::flatbuffers::Vector< + ::flatbuffers::Offset>> + lut_tensors) { fbb_.AddOffset(Subgraph::VT_LUT_TENSORS, lut_tensors); } - explicit SubgraphBuilder(::flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { + explicit SubgraphBuilder(::flatbuffers::FlatBufferBuilder& _fbb) + : fbb_(_fbb) { start_ = fbb_.StartTable(); } ::flatbuffers::Offset Finish() { @@ -158,23 +187,31 @@ struct SubgraphBuilder { }; inline ::flatbuffers::Offset CreateSubgraph( - ::flatbuffers::FlatBufferBuilder &_fbb, - ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> lut_tensors = 0) { + ::flatbuffers::FlatBufferBuilder& _fbb, + ::flatbuffers::Offset<::flatbuffers::Vector< + ::flatbuffers::Offset>> + lut_tensors = 0) { SubgraphBuilder builder_(_fbb); builder_.add_lut_tensors(lut_tensors); return builder_.Finish(); } inline ::flatbuffers::Offset CreateSubgraphDirect( - ::flatbuffers::FlatBufferBuilder &_fbb, - const std::vector<::flatbuffers::Offset> *lut_tensors = nullptr) { - auto lut_tensors__ = lut_tensors ? _fbb.CreateVector<::flatbuffers::Offset>(*lut_tensors) : 0; - return tflite::micro::compression::CreateSubgraph( - _fbb, - lut_tensors__); + ::flatbuffers::FlatBufferBuilder& _fbb, + const std::vector<::flatbuffers::Offset< + tflite::micro::compression::LutTensor>>* lut_tensors = nullptr) { + auto lut_tensors__ = + lut_tensors + ? _fbb.CreateVector< + ::flatbuffers::Offset>( + *lut_tensors) + : 0; + return tflite::micro::compression::CreateSubgraph(_fbb, lut_tensors__); } -::flatbuffers::Offset CreateSubgraph(::flatbuffers::FlatBufferBuilder &_fbb, const SubgraphT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateSubgraph( + ::flatbuffers::FlatBufferBuilder& _fbb, const SubgraphT* _o, + const ::flatbuffers::rehasher_function_t* _rehasher = nullptr); struct LutTensorT : public ::flatbuffers::NativeTable { typedef LutTensor TableType; @@ -191,30 +228,33 @@ struct LutTensor FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { VT_VALUE_BUFFER = 6, VT_INDEX_BITWIDTH = 8 }; - int32_t tensor() const { - return GetField(VT_TENSOR, 0); - } + int32_t tensor() const { return GetField(VT_TENSOR, 0); } uint32_t value_buffer() const { return GetField(VT_VALUE_BUFFER, 0); } uint8_t index_bitwidth() const { return GetField(VT_INDEX_BITWIDTH, 0); } - bool Verify(::flatbuffers::Verifier &verifier) const { + bool Verify(::flatbuffers::Verifier& verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_TENSOR, 4) && VerifyField(verifier, VT_VALUE_BUFFER, 4) && VerifyField(verifier, VT_INDEX_BITWIDTH, 1) && verifier.EndTable(); } - LutTensorT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LutTensorT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; - static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LutTensorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + LutTensorT* UnPack( + const ::flatbuffers::resolver_function_t* _resolver = nullptr) const; + void UnPackTo( + LutTensorT* _o, + const ::flatbuffers::resolver_function_t* _resolver = nullptr) const; + static ::flatbuffers::Offset Pack( + ::flatbuffers::FlatBufferBuilder& _fbb, const LutTensorT* _o, + const ::flatbuffers::rehasher_function_t* _rehasher = nullptr); }; struct LutTensorBuilder { typedef LutTensor Table; - ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::FlatBufferBuilder& fbb_; ::flatbuffers::uoffset_t start_; void add_tensor(int32_t tensor) { fbb_.AddElement(LutTensor::VT_TENSOR, tensor, 0); @@ -225,8 +265,8 @@ struct LutTensorBuilder { void add_index_bitwidth(uint8_t index_bitwidth) { fbb_.AddElement(LutTensor::VT_INDEX_BITWIDTH, index_bitwidth, 0); } - explicit LutTensorBuilder(::flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { + explicit LutTensorBuilder(::flatbuffers::FlatBufferBuilder& _fbb) + : fbb_(_fbb) { start_ = fbb_.StartTable(); } ::flatbuffers::Offset Finish() { @@ -237,10 +277,8 @@ struct LutTensorBuilder { }; inline ::flatbuffers::Offset CreateLutTensor( - ::flatbuffers::FlatBufferBuilder &_fbb, - int32_t tensor = 0, - uint32_t value_buffer = 0, - uint8_t index_bitwidth = 0) { + ::flatbuffers::FlatBufferBuilder& _fbb, int32_t tensor = 0, + uint32_t value_buffer = 0, uint8_t index_bitwidth = 0) { LutTensorBuilder builder_(_fbb); builder_.add_value_buffer(value_buffer); builder_.add_tensor(tensor); @@ -248,157 +286,264 @@ inline ::flatbuffers::Offset CreateLutTensor( return builder_.Finish(); } -::flatbuffers::Offset CreateLutTensor(::flatbuffers::FlatBufferBuilder &_fbb, const LutTensorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateLutTensor( + ::flatbuffers::FlatBufferBuilder& _fbb, const LutTensorT* _o, + const ::flatbuffers::rehasher_function_t* _rehasher = nullptr); -inline MetadataT::MetadataT(const MetadataT &o) - : schema_version(o.schema_version) { +inline MetadataT::MetadataT(const MetadataT& o) + : schema_version(o.schema_version) { subgraphs.reserve(o.subgraphs.size()); - for (const auto &subgraphs_ : o.subgraphs) { subgraphs.emplace_back((subgraphs_) ? new tflite::micro::compression::SubgraphT(*subgraphs_) : nullptr); } + for (const auto& subgraphs_ : o.subgraphs) { + subgraphs.emplace_back( + (subgraphs_) ? new tflite::micro::compression::SubgraphT(*subgraphs_) + : nullptr); + } } -inline MetadataT &MetadataT::operator=(MetadataT o) FLATBUFFERS_NOEXCEPT { +inline MetadataT& MetadataT::operator=(MetadataT o) FLATBUFFERS_NOEXCEPT { std::swap(schema_version, o.schema_version); std::swap(subgraphs, o.subgraphs); return *this; } -inline MetadataT *Metadata::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { +inline MetadataT* Metadata::UnPack( + const ::flatbuffers::resolver_function_t* _resolver) const { auto _o = std::unique_ptr(new MetadataT()); UnPackTo(_o.get(), _resolver); return _o.release(); } -inline void Metadata::UnPackTo(MetadataT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { +inline void Metadata::UnPackTo( + MetadataT* _o, const ::flatbuffers::resolver_function_t* _resolver) const { (void)_o; (void)_resolver; - { auto _e = schema_version(); _o->schema_version = _e; } - { auto _e = subgraphs(); if (_e) { _o->subgraphs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->subgraphs[_i]) { _e->Get(_i)->UnPackTo(_o->subgraphs[_i].get(), _resolver); } else { _o->subgraphs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->subgraphs.resize(0); } } + { + auto _e = schema_version(); + _o->schema_version = _e; + } + { + auto _e = subgraphs(); + if (_e) { + _o->subgraphs.resize(_e->size()); + for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { + if (_o->subgraphs[_i]) { + _e->Get(_i)->UnPackTo(_o->subgraphs[_i].get(), _resolver); + } else { + _o->subgraphs[_i] = + std::unique_ptr( + _e->Get(_i)->UnPack(_resolver)); + }; + } + } else { + _o->subgraphs.resize(0); + } + } } -inline ::flatbuffers::Offset Metadata::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MetadataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset Metadata::Pack( + ::flatbuffers::FlatBufferBuilder& _fbb, const MetadataT* _o, + const ::flatbuffers::rehasher_function_t* _rehasher) { return CreateMetadata(_fbb, _o, _rehasher); } -inline ::flatbuffers::Offset CreateMetadata(::flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateMetadata( + ::flatbuffers::FlatBufferBuilder& _fbb, const MetadataT* _o, + const ::flatbuffers::rehasher_function_t* _rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const MetadataT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + struct _VectorArgs { + ::flatbuffers::FlatBufferBuilder* __fbb; + const MetadataT* __o; + const ::flatbuffers::rehasher_function_t* __rehasher; + } _va = {&_fbb, _o, _rehasher}; + (void)_va; auto _schema_version = _o->schema_version; - auto _subgraphs = _o->subgraphs.size() ? _fbb.CreateVector<::flatbuffers::Offset> (_o->subgraphs.size(), [](size_t i, _VectorArgs *__va) { return CreateSubgraph(*__va->__fbb, __va->__o->subgraphs[i].get(), __va->__rehasher); }, &_va ) : 0; - return tflite::micro::compression::CreateMetadata( - _fbb, - _schema_version, - _subgraphs); + auto _subgraphs = + _o->subgraphs.size() + ? _fbb.CreateVector< + ::flatbuffers::Offset>( + _o->subgraphs.size(), + [](size_t i, _VectorArgs* __va) { + return CreateSubgraph(*__va->__fbb, + __va->__o->subgraphs[i].get(), + __va->__rehasher); + }, + &_va) + : 0; + return tflite::micro::compression::CreateMetadata(_fbb, _schema_version, + _subgraphs); } -inline SubgraphT::SubgraphT(const SubgraphT &o) { +inline SubgraphT::SubgraphT(const SubgraphT& o) { lut_tensors.reserve(o.lut_tensors.size()); - for (const auto &lut_tensors_ : o.lut_tensors) { lut_tensors.emplace_back((lut_tensors_) ? new tflite::micro::compression::LutTensorT(*lut_tensors_) : nullptr); } + for (const auto& lut_tensors_ : o.lut_tensors) { + lut_tensors.emplace_back( + (lut_tensors_) + ? new tflite::micro::compression::LutTensorT(*lut_tensors_) + : nullptr); + } } -inline SubgraphT &SubgraphT::operator=(SubgraphT o) FLATBUFFERS_NOEXCEPT { +inline SubgraphT& SubgraphT::operator=(SubgraphT o) FLATBUFFERS_NOEXCEPT { std::swap(lut_tensors, o.lut_tensors); return *this; } -inline SubgraphT *Subgraph::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { +inline SubgraphT* Subgraph::UnPack( + const ::flatbuffers::resolver_function_t* _resolver) const { auto _o = std::unique_ptr(new SubgraphT()); UnPackTo(_o.get(), _resolver); return _o.release(); } -inline void Subgraph::UnPackTo(SubgraphT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { +inline void Subgraph::UnPackTo( + SubgraphT* _o, const ::flatbuffers::resolver_function_t* _resolver) const { (void)_o; (void)_resolver; - { auto _e = lut_tensors(); if (_e) { _o->lut_tensors.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->lut_tensors[_i]) { _e->Get(_i)->UnPackTo(_o->lut_tensors[_i].get(), _resolver); } else { _o->lut_tensors[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->lut_tensors.resize(0); } } + { + auto _e = lut_tensors(); + if (_e) { + _o->lut_tensors.resize(_e->size()); + for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { + if (_o->lut_tensors[_i]) { + _e->Get(_i)->UnPackTo(_o->lut_tensors[_i].get(), _resolver); + } else { + _o->lut_tensors[_i] = + std::unique_ptr( + _e->Get(_i)->UnPack(_resolver)); + }; + } + } else { + _o->lut_tensors.resize(0); + } + } } -inline ::flatbuffers::Offset Subgraph::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SubgraphT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset Subgraph::Pack( + ::flatbuffers::FlatBufferBuilder& _fbb, const SubgraphT* _o, + const ::flatbuffers::rehasher_function_t* _rehasher) { return CreateSubgraph(_fbb, _o, _rehasher); } -inline ::flatbuffers::Offset CreateSubgraph(::flatbuffers::FlatBufferBuilder &_fbb, const SubgraphT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateSubgraph( + ::flatbuffers::FlatBufferBuilder& _fbb, const SubgraphT* _o, + const ::flatbuffers::rehasher_function_t* _rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SubgraphT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _lut_tensors = _o->lut_tensors.size() ? _fbb.CreateVector<::flatbuffers::Offset> (_o->lut_tensors.size(), [](size_t i, _VectorArgs *__va) { return CreateLutTensor(*__va->__fbb, __va->__o->lut_tensors[i].get(), __va->__rehasher); }, &_va ) : 0; - return tflite::micro::compression::CreateSubgraph( - _fbb, - _lut_tensors); + struct _VectorArgs { + ::flatbuffers::FlatBufferBuilder* __fbb; + const SubgraphT* __o; + const ::flatbuffers::rehasher_function_t* __rehasher; + } _va = {&_fbb, _o, _rehasher}; + (void)_va; + auto _lut_tensors = + _o->lut_tensors.size() + ? _fbb.CreateVector< + ::flatbuffers::Offset>( + _o->lut_tensors.size(), + [](size_t i, _VectorArgs* __va) { + return CreateLutTensor(*__va->__fbb, + __va->__o->lut_tensors[i].get(), + __va->__rehasher); + }, + &_va) + : 0; + return tflite::micro::compression::CreateSubgraph(_fbb, _lut_tensors); } -inline LutTensorT *LutTensor::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { +inline LutTensorT* LutTensor::UnPack( + const ::flatbuffers::resolver_function_t* _resolver) const { auto _o = std::unique_ptr(new LutTensorT()); UnPackTo(_o.get(), _resolver); return _o.release(); } -inline void LutTensor::UnPackTo(LutTensorT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { +inline void LutTensor::UnPackTo( + LutTensorT* _o, const ::flatbuffers::resolver_function_t* _resolver) const { (void)_o; (void)_resolver; - { auto _e = tensor(); _o->tensor = _e; } - { auto _e = value_buffer(); _o->value_buffer = _e; } - { auto _e = index_bitwidth(); _o->index_bitwidth = _e; } + { + auto _e = tensor(); + _o->tensor = _e; + } + { + auto _e = value_buffer(); + _o->value_buffer = _e; + } + { + auto _e = index_bitwidth(); + _o->index_bitwidth = _e; + } } -inline ::flatbuffers::Offset LutTensor::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LutTensorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset LutTensor::Pack( + ::flatbuffers::FlatBufferBuilder& _fbb, const LutTensorT* _o, + const ::flatbuffers::rehasher_function_t* _rehasher) { return CreateLutTensor(_fbb, _o, _rehasher); } -inline ::flatbuffers::Offset CreateLutTensor(::flatbuffers::FlatBufferBuilder &_fbb, const LutTensorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateLutTensor( + ::flatbuffers::FlatBufferBuilder& _fbb, const LutTensorT* _o, + const ::flatbuffers::rehasher_function_t* _rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const LutTensorT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + struct _VectorArgs { + ::flatbuffers::FlatBufferBuilder* __fbb; + const LutTensorT* __o; + const ::flatbuffers::rehasher_function_t* __rehasher; + } _va = {&_fbb, _o, _rehasher}; + (void)_va; auto _tensor = _o->tensor; auto _value_buffer = _o->value_buffer; auto _index_bitwidth = _o->index_bitwidth; return tflite::micro::compression::CreateLutTensor( - _fbb, - _tensor, - _value_buffer, - _index_bitwidth); + _fbb, _tensor, _value_buffer, _index_bitwidth); } -inline const tflite::micro::compression::Metadata *GetMetadata(const void *buf) { +inline const tflite::micro::compression::Metadata* GetMetadata( + const void* buf) { return ::flatbuffers::GetRoot(buf); } -inline const tflite::micro::compression::Metadata *GetSizePrefixedMetadata(const void *buf) { - return ::flatbuffers::GetSizePrefixedRoot(buf); +inline const tflite::micro::compression::Metadata* GetSizePrefixedMetadata( + const void* buf) { + return ::flatbuffers::GetSizePrefixedRoot< + tflite::micro::compression::Metadata>(buf); } -inline bool VerifyMetadataBuffer( - ::flatbuffers::Verifier &verifier) { +inline bool VerifyMetadataBuffer(::flatbuffers::Verifier& verifier) { return verifier.VerifyBuffer(nullptr); } inline bool VerifySizePrefixedMetadataBuffer( - ::flatbuffers::Verifier &verifier) { - return verifier.VerifySizePrefixedBuffer(nullptr); + ::flatbuffers::Verifier& verifier) { + return verifier + .VerifySizePrefixedBuffer(nullptr); } inline void FinishMetadataBuffer( - ::flatbuffers::FlatBufferBuilder &fbb, + ::flatbuffers::FlatBufferBuilder& fbb, ::flatbuffers::Offset root) { fbb.Finish(root); } inline void FinishSizePrefixedMetadataBuffer( - ::flatbuffers::FlatBufferBuilder &fbb, + ::flatbuffers::FlatBufferBuilder& fbb, ::flatbuffers::Offset root) { fbb.FinishSizePrefixed(root); } inline std::unique_ptr UnPackMetadata( - const void *buf, - const ::flatbuffers::resolver_function_t *res = nullptr) { - return std::unique_ptr(GetMetadata(buf)->UnPack(res)); + const void* buf, const ::flatbuffers::resolver_function_t* res = nullptr) { + return std::unique_ptr( + GetMetadata(buf)->UnPack(res)); } -inline std::unique_ptr UnPackSizePrefixedMetadata( - const void *buf, - const ::flatbuffers::resolver_function_t *res = nullptr) { - return std::unique_ptr(GetSizePrefixedMetadata(buf)->UnPack(res)); +inline std::unique_ptr +UnPackSizePrefixedMetadata( + const void* buf, const ::flatbuffers::resolver_function_t* res = nullptr) { + return std::unique_ptr( + GetSizePrefixedMetadata(buf)->UnPack(res)); } } // namespace compression diff --git a/tensorflow/lite/micro/cortex_m_corstone_300/system_setup.cc b/tensorflow/lite/micro/cortex_m_corstone_300/system_setup.cc index 3ff84214309..cc87200c1c3 100644 --- a/tensorflow/lite/micro/cortex_m_corstone_300/system_setup.cc +++ b/tensorflow/lite/micro/cortex_m_corstone_300/system_setup.cc @@ -104,8 +104,8 @@ uint32_t GetCurrentTimeTicks() { #ifdef ETHOS_U #if defined(ETHOSU_FAST_MEMORY_SIZE) && ETHOSU_FAST_MEMORY_SIZE > 0 -__attribute__((aligned(16), section(".bss.ethosu_scratch"))) -uint8_t ethosu0_scratch[ETHOSU_FAST_MEMORY_SIZE]; +__attribute__((aligned(16), section(".bss.ethosu_scratch"))) uint8_t + ethosu0_scratch[ETHOSU_FAST_MEMORY_SIZE]; #else #define ethosu0_scratch 0 #define ETHOSU_FAST_MEMORY_SIZE 0 diff --git a/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h b/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h index 6276fe73380..94d2a48cf27 100644 --- a/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h +++ b/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h @@ -25,8 +25,8 @@ typedef mli_status (*conv_func_ptr)(const mli_tensor* /*in*/, mli_tensor* /*out*/); #ifdef MLI_2_0 -conv_func_ptr __attribute__((weak)) -mli_krn_conv2d_hwcn(const mli_tensor* weights) { +conv_func_ptr + __attribute__((weak)) mli_krn_conv2d_hwcn(const mli_tensor* weights) { int filter_w = weights->shape[KRNL_W_DIM_HWCN]; int filter_h = weights->shape[KRNL_H_DIM_HWCN]; @@ -41,8 +41,9 @@ mli_krn_conv2d_hwcn(const mli_tensor* weights) { } } #else -conv_func_ptr __attribute__((weak)) -mli_krn_conv2d_hwcn(const mli_tensor* weights, const mli_conv2d_cfg* cfg) { +conv_func_ptr + __attribute__((weak)) mli_krn_conv2d_hwcn(const mli_tensor* weights, + const mli_conv2d_cfg* cfg) { return mli_krn_conv2d_nhwc_sa8_sa8_sa32; } #endif @@ -55,8 +56,8 @@ typedef mli_status (*depthwise_func_ptr)(const mli_tensor* /*in*/, mli_tensor* /*out*/); #ifdef MLI_2_0 -depthwise_func_ptr __attribute__((weak)) -mli_krn_depthwise_conv2d(const mli_tensor* weights) { +depthwise_func_ptr + __attribute__((weak)) mli_krn_depthwise_conv2d(const mli_tensor* weights) { int filter_w = weights->shape[KRNL_DW_W_DIM_HW1N]; int filter_h = weights->shape[KRNL_DW_H_DIM_HW1N]; @@ -69,15 +70,16 @@ mli_krn_depthwise_conv2d(const mli_tensor* weights) { } } #else -depthwise_func_ptr __attribute__((weak)) -mli_krn_depthwise_conv2d(const mli_tensor* weights, const mli_conv2d_cfg* cfg) { +depthwise_func_ptr + __attribute__((weak)) mli_krn_depthwise_conv2d(const mli_tensor* weights, + const mli_conv2d_cfg* cfg) { return mli_krn_depthwise_conv2d_hwcn_sa8_sa8_sa32; } #endif #ifdef MLI_2_0 -depthwise_func_ptr __attribute__((weak)) -mli_krn_group_conv2d(const mli_tensor* weights) { +depthwise_func_ptr + __attribute__((weak)) mli_krn_group_conv2d(const mli_tensor* weights) { int filter_w = weights->shape[KRNL_DW_W_DIM_HW1N]; int filter_h = weights->shape[KRNL_DW_H_DIM_HW1N]; @@ -97,8 +99,8 @@ typedef mli_status (*pooling_func_ptr)(const mli_tensor* /*in*/, mli_tensor* /*out*/); #ifdef MLI_2_0 -pooling_func_ptr __attribute__((weak)) -mli_krn_avepool(const mli_pool_cfg* cfg) { +pooling_func_ptr + __attribute__((weak)) mli_krn_avepool(const mli_pool_cfg* cfg) { int filter_w = cfg->kernel_width; int filter_h = cfg->kernel_height; @@ -111,15 +113,15 @@ mli_krn_avepool(const mli_pool_cfg* cfg) { } } #else -pooling_func_ptr __attribute__((weak)) -mli_krn_avepool(const mli_pool_cfg* cfg) { +pooling_func_ptr + __attribute__((weak)) mli_krn_avepool(const mli_pool_cfg* cfg) { return mli_krn_avepool_hwc_sa8; } #endif #ifdef MLI_2_0 -pooling_func_ptr __attribute__((weak)) -mli_krn_maxpool(const mli_pool_cfg* cfg) { +pooling_func_ptr + __attribute__((weak)) mli_krn_maxpool(const mli_pool_cfg* cfg) { int filter_w = cfg->kernel_width; int filter_h = cfg->kernel_height; @@ -132,8 +134,8 @@ mli_krn_maxpool(const mli_pool_cfg* cfg) { } } #else -pooling_func_ptr __attribute__((weak)) -mli_krn_maxpool(const mli_pool_cfg* cfg) { +pooling_func_ptr + __attribute__((weak)) mli_krn_maxpool(const mli_pool_cfg* cfg) { return mli_krn_maxpool_hwc_sa8; } #endif diff --git a/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h b/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h index b4087f3b87b..e260774570f 100644 --- a/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h +++ b/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h @@ -33,7 +33,7 @@ class MliTensorInterface { public: // Make sure that lifetime of MliTensorInterface instance isn't bigger than // related mli_tensor. - MliTensorInterface(mli_tensor* tensor) : tensor_(tensor){}; + MliTensorInterface(mli_tensor* tensor) : tensor_(tensor) {}; MliTensorInterface() = default; ~MliTensorInterface() = default; diff --git a/tensorflow/lite/micro/kernels/circular_buffer.cc b/tensorflow/lite/micro/kernels/circular_buffer.cc index 3e901047bbc..0d3d4b41b90 100644 --- a/tensorflow/lite/micro/kernels/circular_buffer.cc +++ b/tensorflow/lite/micro/kernels/circular_buffer.cc @@ -91,8 +91,8 @@ TfLiteStatus CircularBufferEval(TfLiteContext* context, TfLiteNode* node) { EvalInt8(tflite::micro::GetTensorData(input), num_slots, depth, tflite::micro::GetTensorData(output)); } else { - MicroPrintf("Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); return kTfLiteError; } diff --git a/tensorflow/lite/micro/kernels/reduce_common.cc b/tensorflow/lite/micro/kernels/reduce_common.cc index 8aeb529d757..cdc95d89424 100644 --- a/tensorflow/lite/micro/kernels/reduce_common.cc +++ b/tensorflow/lite/micro/kernels/reduce_common.cc @@ -120,7 +120,7 @@ enum MinMaxEvalType { kEvalMin, kEvalMax }; template struct MinMaxReducerCompare { MinMaxReducerCompare() = delete; - MinMaxReducerCompare(MinMaxEvalType evalType) : type_(evalType){}; + MinMaxReducerCompare(MinMaxEvalType evalType) : type_(evalType) {}; constexpr T initialValue() const { return (type_ == kEvalMin) ? std::numeric_limits::max() diff --git a/tensorflow/lite/micro/kernels/resize_bilinear_test.cc b/tensorflow/lite/micro/kernels/resize_bilinear_test.cc index b52cebeb668..3208acde5c6 100644 --- a/tensorflow/lite/micro/kernels/resize_bilinear_test.cc +++ b/tensorflow/lite/micro/kernels/resize_bilinear_test.cc @@ -168,8 +168,10 @@ TF_LITE_MICRO_TEST(VerticalResizeInt8) { TF_LITE_MICRO_TEST(TwoDimensionalResize) { int input_dims[] = {4, 1, 2, 2, 1}; const float input_data[] = { - 3, 6, // - 9, 12, // + 3, + 6, // + 9, + 12, // }; const int32_t expected_size_data[] = {3, 3}; const float expected_output_data[] = { @@ -194,8 +196,10 @@ TF_LITE_MICRO_TEST(TwoDimensionalResize) { TF_LITE_MICRO_TEST(TwoDimensionalResizeInt8) { int input_dims[] = {4, 1, 2, 2, 1}; const int8_t input_data[] = { - 3, 6, // - 9, 12, // + 3, + 6, // + 9, + 12, // }; const int32_t expected_size_data[] = {3, 3}; const int8_t expected_output_data[] = { diff --git a/tensorflow/lite/micro/kernels/resize_nearest_neighbor_test.cc b/tensorflow/lite/micro/kernels/resize_nearest_neighbor_test.cc index 3e06da8fac0..818cafc2fa8 100644 --- a/tensorflow/lite/micro/kernels/resize_nearest_neighbor_test.cc +++ b/tensorflow/lite/micro/kernels/resize_nearest_neighbor_test.cc @@ -156,8 +156,10 @@ TF_LITE_MICRO_TEST(VerticalResizeInt16) { TF_LITE_MICRO_TEST(TwoDimensionalResize) { int input_dims[] = {4, 1, 2, 2, 1}; const float input_data[] = { - 3, 6, // - 9, 12, // + 3, + 6, // + 9, + 12, // }; const int32_t expected_size_data[] = {3, 3}; const float expected_output_data[] = { @@ -177,8 +179,10 @@ TF_LITE_MICRO_TEST(TwoDimensionalResize) { TF_LITE_MICRO_TEST(TwoDimensionalResizeInt8) { int input_dims[] = {4, 1, 2, 2, 1}; const int8_t input_data[] = { - 3, -6, // - 9, 12, // + 3, + -6, // + 9, + 12, // }; const int32_t expected_size_data[] = {3, 3}; const int8_t expected_output_data[] = { @@ -197,8 +201,10 @@ TF_LITE_MICRO_TEST(TwoDimensionalResizeInt8) { TF_LITE_MICRO_TEST(TwoDimensionalResizeInt16) { int input_dims[] = {4, 1, 2, 2, 1}; const int16_t input_data[] = { - 3, -6, // - 9, 12, // + 3, + -6, // + 9, + 12, // }; const int32_t expected_size_data[] = {3, 3}; const int16_t expected_output_data[] = { diff --git a/tensorflow/lite/micro/kernels/xtensa/lstm_eval_hifi.cc b/tensorflow/lite/micro/kernels/xtensa/lstm_eval_hifi.cc index f8b6fd806f0..84aa63228c3 100644 --- a/tensorflow/lite/micro/kernels/xtensa/lstm_eval_hifi.cc +++ b/tensorflow/lite/micro/kernels/xtensa/lstm_eval_hifi.cc @@ -162,7 +162,7 @@ void calc_cell_state_without_cifg(int16_t* cell_state, // residue iterations #pragma concurrent #pragma loop_count max = 7 - for (i = 0; i < ((num_elms)&7); i++) { + for (i = 0; i < ((num_elms) & 7); i++) { d_cs_r_0 = p16_cs_r[i]; d_fg_0 = p16_fg_r[i]; d_cg_0 = p16_cg_r[i]; @@ -227,7 +227,7 @@ void calc_cell_state_without_cifg(int16_t* cell_state, // residue iterations #pragma concurrent #pragma loop_count max = 7 - for (i = 0; i < ((num_elms)&7); i++) { + for (i = 0; i < ((num_elms) & 7); i++) { d_cs_r_0 = p16_cs_r[i]; d_fg_0 = p16_fg_r[i]; d_cg_0 = p16_cg_r[i]; @@ -336,7 +336,7 @@ void calc_cell_state_with_cifg(int16_t* cell_state, const int16_t* forget_gate, // residue iterations #pragma concurrent #pragma loop_count max = 7 - for (i = 0; i < ((num_elms)&7); i++) { + for (i = 0; i < ((num_elms) & 7); i++) { d_cs_r_0 = p16_cs_r[i]; d_fg_0 = p16_fg_r[i]; d_cg_0 = p16_cg_r[i]; @@ -401,7 +401,7 @@ void calc_cell_state_with_cifg(int16_t* cell_state, const int16_t* forget_gate, // residue iterations #pragma concurrent #pragma loop_count max = 7 - for (i = 0; i < ((num_elms)&7); i++) { + for (i = 0; i < ((num_elms) & 7); i++) { d_cs_r_0 = p16_cs_r[i]; d_fg_0 = p16_fg_r[i]; d_cg_0 = p16_cg_r[i]; @@ -481,7 +481,7 @@ void xa_nn_elm_mul_16x16_asym8s(int8_t* output, const int16_t* input_1, // residue iterations #pragma concurrent #pragma loop_count max = 7 - for (int j = 0; j < ((num_elms)&7); j++) { + for (int j = 0; j < ((num_elms) & 7); j++) { AE_L16_IP(data_a_0, (ae_int16*)tmp_input_1, 2); AE_L16_IP(data_b_0, (ae_int16*)tmp_input_2, 2); @@ -668,7 +668,7 @@ void calc_cell_state_without_cifg(int16_t* cell_state, // residue iterations #pragma concurrent #pragma loop_count max = 7 - for (i = 0; i < ((num_elms)&7); i++) { + for (i = 0; i < ((num_elms) & 7); i++) { d_cs_r_0 = p16_cs_r[i]; d_fg_0 = p16_fg_r[i]; d_cg_0 = p16_cg_r[i]; @@ -738,7 +738,7 @@ void calc_cell_state_without_cifg(int16_t* cell_state, // residue iterations #pragma concurrent #pragma loop_count max = 7 - for (i = 0; i < ((num_elms)&7); i++) { + for (i = 0; i < ((num_elms) & 7); i++) { d_cs_r_0 = p16_cs_r[i]; d_fg_0 = p16_fg_r[i]; d_cg_0 = p16_cg_r[i]; @@ -851,7 +851,7 @@ void calc_cell_state_with_cifg(int16_t* cell_state, const int16_t* forget_gate, // residue iterations #pragma concurrent #pragma loop_count max = 7 - for (i = 0; i < ((num_elms)&7); i++) { + for (i = 0; i < ((num_elms) & 7); i++) { d_cs_r_0 = p16_cs_r[i]; d_fg_0 = p16_fg_r[i]; d_cg_0 = p16_cg_r[i]; @@ -920,7 +920,7 @@ void calc_cell_state_with_cifg(int16_t* cell_state, const int16_t* forget_gate, // residue iterations #pragma concurrent #pragma loop_count max = 7 - for (i = 0; i < ((num_elms)&7); i++) { + for (i = 0; i < ((num_elms) & 7); i++) { d_cs_r_0 = p16_cs_r[i]; d_fg_0 = p16_fg_r[i]; d_cg_0 = p16_cg_r[i]; @@ -996,7 +996,7 @@ void xa_nn_elm_mul_16x16_asym8s(int8_t* output, const int16_t* input_1, // residue iterations #pragma concurrent #pragma loop_count max = 3 - for (int j = 0; j < ((num_elms)&3); j++) { + for (int j = 0; j < ((num_elms) & 3); j++) { AE_L16_IP(data_a_0, (ae_int16*)tmp_input_1, 2); AE_L16_IP(data_b_0, (ae_int16*)tmp_input_2, 2);