Skip to content

Commit 3986f55

Browse files
[core] Support OpenVINO compilation with cpp20 (openvinotoolkit#22784)
### Details: - Add support to OpenVINO to build it with standards newer than cpp11 ### Tickets: - [CVS-128282](https://jira.devtools.intel.com/browse/CVS-128282) --------- Co-authored-by: Ilya Lavrenov <ilya.lavrenov@intel.com>
1 parent 9b22324 commit 3986f55

File tree

28 files changed

+120
-43
lines changed

28 files changed

+120
-43
lines changed

.github/workflows/linux_conditional_compilation.yml

+1
Original file line numberDiff line numberDiff line change
@@ -152,6 +152,7 @@ jobs:
152152
run: |
153153
cmake \
154154
-G "${{ env.CMAKE_GENERATOR }}" \
155+
-DCMAKE_CXX_STANDARD=20 \
155156
-DBUILD_SHARED_LIBS=OFF \
156157
-DENABLE_TESTS=ON \
157158
-DENABLE_CPPLINT=OFF \

.github/workflows/windows_conditional_compilation.yml

+1
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,7 @@ jobs:
147147
run: |
148148
cmake -G "${{ env.CMAKE_GENERATOR }}" `
149149
-DBUILD_SHARED_LIBS=OFF `
150+
-DCMAKE_CXX_STANDARD=20 `
150151
-DENABLE_TESTS=ON `
151152
-DENABLE_CPPLINT=OFF `
152153
-DENABLE_NCC_STYLE=OFF `

docs/snippets/CMakeLists.txt

+4
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,10 @@ if(UNUSED_BUT_SET_VARIABLE_SUPPORTED)
1515
ov_add_compiler_flags(-Wno-unused-but-set-variable)
1616
endif()
1717

18+
if((CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG) AND CMAKE_CXX_STANDARD GREATER_EQUAL 20)
19+
set(CMAKE_CXX_FLAGS "-Wno-error=deprecated ${CMAKE_CXX_FLAGS}")
20+
endif()
21+
1822
file(GLOB SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/*.cpp"
1923
"${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp"
2024
"${CMAKE_CURRENT_SOURCE_DIR}/src/*.c")

samples/cpp/hello_classification/main.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,8 @@ int tmain(int argc, tchar* argv[]) {
2828

2929
// -------- Parsing and validation of input arguments --------
3030
if (argc != 4) {
31-
slog::info << "Usage : " << argv[0] << " <path_to_model> <path_to_image> <device_name>" << slog::endl;
31+
slog::info << "Usage : " << TSTRING2STRING(argv[0]) << " <path_to_model> <path_to_image> <device_name>"
32+
<< slog::endl;
3233
return EXIT_FAILURE;
3334
}
3435

src/common/low_precision_transformations/CMakeLists.txt

+3
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,9 @@ source_group("src" FILES ${LIBRARY_SRC})
1616
source_group("include" FILES ${PUBLIC_HEADERS})
1717

1818
# Create library
19+
if((CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG) AND CMAKE_CXX_STANDARD GREATER_EQUAL 20)
20+
set(CMAKE_CXX_FLAGS "-Wno-error=deprecated ${CMAKE_CXX_FLAGS}")
21+
endif()
1922

2023
add_library(${TARGET_NAME}_obj OBJECT
2124
${LIBRARY_SRC}

src/common/snippets/CMakeLists.txt

+3
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,9 @@ source_group("src" FILES ${LIBRARY_SRC})
1616
source_group("include" FILES ${PUBLIC_HEADERS})
1717

1818
# Create static library
19+
if((CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG) AND CMAKE_CXX_STANDARD GREATER_EQUAL 20)
20+
set(CMAKE_CXX_FLAGS "-Wno-error=deprecated ${CMAKE_CXX_FLAGS}")
21+
endif()
1922

2023
add_library(${TARGET_NAME} STATIC
2124
${LIBRARY_SRC}

src/common/transformations/CMakeLists.txt

+3
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,9 @@ source_group("src" FILES ${LIBRARY_SRC})
1616
source_group("include" FILES ${PUBLIC_HEADERS})
1717

1818
# Create library
19+
if((CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG) AND CMAKE_CXX_STANDARD GREATER_EQUAL 20)
20+
set(CMAKE_CXX_FLAGS "-Wno-error=deprecated ${CMAKE_CXX_FLAGS}")
21+
endif()
1922

2023
add_library(${TARGET_NAME}_obj OBJECT ${LIBRARY_SRC} ${PUBLIC_HEADERS})
2124
target_compile_definitions(${TARGET_NAME}_obj PRIVATE IMPLEMENT_OPENVINO_API)

src/common/transformations/include/transformations/rt_info/nms_selected_indices.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ TRANSFORMATIONS_API bool has_nms_selected_indices(const Node* node);
2121

2222
TRANSFORMATIONS_API void set_nms_selected_indices(Node* node);
2323

24-
class TRANSFORMATIONS_API NmsSelectedIndices : ov::RuntimeAttribute {
24+
class TRANSFORMATIONS_API NmsSelectedIndices : public ov::RuntimeAttribute {
2525
public:
2626
OPENVINO_RTTI("nms_selected_indices", "0");
2727
NmsSelectedIndices() = default;

src/core/tests/matcher_pass.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ class TestMatcherPass : public ov::pass::MatcherPass {
2525
auto m_relu1 = ov::pass::pattern::wrap_type<ov::op::v0::Relu>(pattern::consumers_count(1));
2626
auto m_relu2 = ov::pass::pattern::wrap_type<ov::op::v0::Relu>({m_relu1});
2727

28-
ov::graph_rewrite_callback callback = [=](pattern::Matcher& m) {
28+
ov::graph_rewrite_callback callback = [m_relu1, this](pattern::Matcher& m) {
2929
// Map that helps to connect labels with matched outputs
3030
auto& node_to_output = m.get_pattern_value_map();
3131

src/frontends/paddle/src/CMakeLists.txt

+4
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,10 @@
22
# SPDX-License-Identifier: Apache-2.0
33
#
44

5+
if((CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG) AND CMAKE_CXX_STANDARD GREATER_EQUAL 20)
6+
set(CMAKE_CXX_FLAGS "-Wno-error=deprecated ${CMAKE_CXX_FLAGS}")
7+
endif()
8+
59
ov_add_frontend(NAME paddle
610
LINKABLE_FRONTEND
711
PROTOBUF_REQUIRED

src/frontends/pytorch/src/CMakeLists.txt

+4
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,10 @@
22
# SPDX-License-Identifier: Apache-2.0
33
#
44

5+
if((CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG) AND CMAKE_CXX_STANDARD GREATER_EQUAL 20)
6+
set(CMAKE_CXX_FLAGS "-Wno-error=deprecated ${CMAKE_CXX_FLAGS}")
7+
endif()
8+
59
ov_add_frontend(NAME pytorch
610
LINKABLE_FRONTEND
711
SHUTDOWN_PROTOBUF

src/frontends/tensorflow/src/variables_index.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -228,11 +228,11 @@ bool VariablesIndex::read_variables(std::ifstream& vi_stream, const std::wstring
228228
}
229229
if (m_mmap_enabled) {
230230
m_data_files[shard].mmap = load_mmap_object(fullPath);
231-
FRONT_END_GENERAL_CHECK(m_data_files[shard].mmap->data(), L"Variable index data cannot be mapped");
231+
FRONT_END_GENERAL_CHECK(m_data_files[shard].mmap->data(), "Variable index data cannot be mapped");
232232
} else {
233233
m_data_files[shard].stream = std::shared_ptr<std::ifstream>(
234234
new std::ifstream(fullPath.c_str(), std::ifstream::in | std::ifstream::binary));
235-
FRONT_END_GENERAL_CHECK(m_data_files[shard].stream->is_open(), L"Variable index data file does not exist");
235+
FRONT_END_GENERAL_CHECK(m_data_files[shard].stream->is_open(), "Variable index data file does not exist");
236236
}
237237
}
238238

src/inference/tests/functional/caching_test.cpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -2359,9 +2359,7 @@ TEST_P(CachingTest, LoadBATCHWithConfig) {
23592359
EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber());
23602360
EXPECT_CALL(*mockPlugin, get_property(ov::hint::performance_mode.name(), _))
23612361
.Times(AnyNumber())
2362-
.WillRepeatedly(Return([] {
2363-
return ov::hint::PerformanceMode::THROUGHPUT;
2364-
}));
2362+
.WillRepeatedly(Return(ov::hint::PerformanceMode::THROUGHPUT));
23652363
if (m_remoteContext) {
23662364
return; // skip the remote Context test for Auto plugin
23672365
}
@@ -2490,4 +2488,4 @@ INSTANTIATE_TEST_SUITE_P(CacheTestWithProxyEnabled,
24902488
CacheTestWithProxyEnabled,
24912489
::testing::Combine(::testing::ValuesIn(loadVariants), ::testing::ValuesIn(cacheFolders)),
24922490
getTestCaseName);
2493-
#endif
2491+
#endif

src/plugins/auto_batch/src/sync_infer_request.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -160,4 +160,4 @@ std::vector<ov::ProfilingInfo> SyncInferRequest::get_profiling_info() const {
160160
return m_batched_request_wrapper->_infer_request_batched->get_profiling_info();
161161
}
162162
} // namespace autobatch_plugin
163-
} // namespace ov
163+
} // namespace ov

src/plugins/intel_cpu/CMakeLists.txt

+5-1
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,10 @@ endif()
88

99
set(TARGET_NAME "openvino_intel_cpu_plugin")
1010

11+
if((CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG) AND CMAKE_CXX_STANDARD GREATER_EQUAL 20)
12+
set(CMAKE_CXX_FLAGS "-Wno-error=deprecated ${CMAKE_CXX_FLAGS}")
13+
endif()
14+
1115
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
1216
# C4267, 4244 issues from oneDNN headers conversion from 'XXX' to 'YYY', possible loss of data
1317
ov_add_compiler_flags(/wd4018)
@@ -205,7 +209,7 @@ if(BUILD_SHARED_LIBS)
205209
$<TARGET_PROPERTY:openvino::conditional_compilation,INTERFACE_INCLUDE_DIRECTORIES>)
206210

207211
target_include_directories(${TARGET_NAME}_obj SYSTEM PUBLIC $<TARGET_PROPERTY:dnnl,INCLUDE_DIRECTORIES>)
208-
212+
209213
if(ENABLE_MLAS_FOR_CPU)
210214
target_include_directories(${TARGET_NAME}_obj SYSTEM PUBLIC $<TARGET_PROPERTY:mlas,INCLUDE_DIRECTORIES>)
211215
endif()

src/plugins/intel_cpu/src/cache/multi_cache.h

+8-4
Original file line numberDiff line numberDiff line change
@@ -41,10 +41,14 @@ class MultiCache {
4141
* Also the builder type is used for the ValueType deduction
4242
* @return result of the operation which is a pair of the requested object of ValType and the status of whether the cache hit or miss occurred
4343
*/
44-
45-
template<typename KeyType, typename BuilderType, typename ValueType = typename std::result_of<BuilderType&(const KeyType&)>::type>
46-
typename CacheEntry<KeyType, ValueType>::ResultType
47-
getOrCreate(const KeyType& key, BuilderType builder) {
44+
template <typename KeyType,
45+
typename BuilderType,
46+
#if (defined(_MSVC_LANG) && (_MSVC_LANG > 201703L)) || (defined(__cplusplus) && (__cplusplus > 201703L))
47+
typename ValueType = std::invoke_result_t<BuilderType&, const KeyType&>>
48+
#else
49+
typename ValueType = typename std::result_of<BuilderType&(const KeyType&)>::type>
50+
#endif
51+
typename CacheEntry<KeyType, ValueType>::ResultType getOrCreate(const KeyType& key, BuilderType builder) {
4852
auto entry = getEntry<KeyType, ValueType>();
4953
return entry->getOrCreate(key, std::move(builder));
5054
}

src/plugins/intel_cpu/src/graph.cpp

+17-6
Original file line numberDiff line numberDiff line change
@@ -1093,6 +1093,17 @@ class UpdateNodesSeq : public IUpdateNodes {
10931093
#endif
10941094

10951095
#if (OV_THREAD == OV_THREAD_TBB || OV_THREAD == OV_THREAD_TBB_AUTO || OV_THREAD == OV_THREAD_OMP)
1096+
1097+
# if (defined(_MSVC_LANG) && (_MSVC_LANG > 201703L)) || (defined(__cplusplus) && (__cplusplus > 201703L))
1098+
# define ov_memory_order_release std::memory_order_release
1099+
# define ov_memory_order_relaxed std::memory_order_relaxed
1100+
# define ov_memory_order_acquire std::memory_order_acquire
1101+
# else
1102+
# define ov_memory_order_release std::memory_order::memory_order_release
1103+
# define ov_memory_order_relaxed std::memory_order::memory_order_relaxed
1104+
# define ov_memory_order_acquire std::memory_order::memory_order_acquire
1105+
# endif
1106+
10961107
class UpdateNodesBase : public IUpdateNodes {
10971108
public:
10981109
explicit UpdateNodesBase(std::vector<NodePtr>& executableGraphNodes) : m_executableGraphNodes(executableGraphNodes) {}
@@ -1103,22 +1114,22 @@ class UpdateNodesBase : public IUpdateNodes {
11031114
if (node->isDynamicNode()) {
11041115
node->updateShapes();
11051116
}
1106-
m_prepareCounter.store(i, std::memory_order::memory_order_release);
1117+
m_prepareCounter.store(i, ov_memory_order_release);
11071118
}
11081119
}
11091120
catch(...) {
1110-
m_completion.store(true, std::memory_order::memory_order_relaxed);
1121+
m_completion.store(true, ov_memory_order_relaxed);
11111122
throw;
11121123
}
1113-
m_prepareCounter.store(stop_indx, std::memory_order::memory_order_relaxed);
1114-
m_completion.store(true, std::memory_order::memory_order_release);
1124+
m_prepareCounter.store(stop_indx, ov_memory_order_relaxed);
1125+
m_completion.store(true, ov_memory_order_release);
11151126
}
11161127

11171128
void updateDynParams(size_t node_indx, size_t /*unused*/) {
11181129
size_t local_counter = node_indx;
11191130
while (true) {
1120-
const bool completion = m_completion.load(std::memory_order::memory_order_acquire);
1121-
const size_t prepareCounter = m_prepareCounter.load(std::memory_order::memory_order_relaxed);
1131+
const bool completion = m_completion.load(ov_memory_order_acquire);
1132+
const size_t prepareCounter = m_prepareCounter.load(ov_memory_order_relaxed);
11221133
if (completion && local_counter == prepareCounter) {
11231134
break;
11241135
}

src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp

+4-1
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,10 @@ const std::vector<ExecutorImplementation<FCAttrs>>& getImplementations() {
244244
return true;
245245
},
246246
// create
247-
[](const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, ExecutorContext::CPtr context) {
247+
[](const FCAttrs& attrs,
248+
const PostOps& postOps,
249+
const MemoryArgs& memory,
250+
ExecutorContext::CPtr context) -> std::shared_ptr<Executor> {
248251
struct ConvolutionInstantiator {
249252
std::shared_ptr<DnnlConvolutionPrimitive> operator()(
250253
const MemoryArgs& memory,

src/plugins/intel_cpu/src/nodes/inverse.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ void Inverse::lu_decomposition(const T* data,
153153

154154
// Find maximum value pivot - non-parallel
155155
for (size_t i = (k + 1) * m_side, j = k + 1; i < m_side_squared; i += m_side, ++j) {
156-
if (abs(U[i + k]) > abs(U[pivot_idx + k])) {
156+
if (std::abs(U[i + k]) > std::abs(U[pivot_idx + k])) {
157157
pivot_row = j;
158158
pivot_idx = pivot_row * m_side;
159159
}

src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel.hpp

+3
Original file line numberDiff line numberDiff line change
@@ -700,6 +700,9 @@ struct jit_kernel : public dnnl::impl::cpu::x64::jit_generator {
700700
std::unordered_map<size_t, std::unique_ptr<jit_emitter>> _emitters;
701701
};
702702

703+
template <>
704+
const Xbyak::Reg64& jit_kernel::reserve<Xbyak::Reg64>();
705+
703706
template<typename T>
704707
void jit_kernel::copy(const Xbyak::Reg64& dst,
705708
const Xbyak::Reg64& src,

src/plugins/intel_gpu/CMakeLists.txt

+4
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,10 @@ endif()
88

99
set (TARGET_NAME "openvino_intel_gpu_plugin")
1010

11+
if((CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG) AND CMAKE_CXX_STANDARD GREATER_EQUAL 20)
12+
set(CMAKE_CXX_FLAGS "-Wno-error=deprecated ${CMAKE_CXX_FLAGS}")
13+
endif()
14+
1115
if(CMAKE_COMPILER_IS_GNUCXX)
1216
ov_add_compiler_flags(-Wno-strict-aliasing)
1317
endif()

src/plugins/intel_gpu/src/graph/graph_optimizer/reorder_inputs.cpp

+10-10
Original file line numberDiff line numberDiff line change
@@ -689,16 +689,16 @@ void reorder_inputs::run(program& p, layout_optimizer& lo, reorder_factory& rf)
689689
}
690690

691691
GPU_DEBUG_IF(debug_config->verbose >= 2) {
692-
reorder_cnt total_reorder_count = std::accumulate(
693-
p.get_processing_order().begin(),
694-
p.get_processing_order().end(),
695-
reorder_cnt{ 0, 0 },
696-
[&](reorder_cnt& total, program_node* node) {
697-
if (fmt_map.count(node) == 0 || fmt_map.at(node) == format::any)
698-
return total;
699-
auto count = count_reorders(fmt_map, lo, node);
700-
return reorder_cnt{ total.number + count.number, total.total_sizes + count.total_sizes };
701-
});
692+
reorder_cnt total_reorder_count =
693+
std::accumulate(p.get_processing_order().begin(),
694+
p.get_processing_order().end(),
695+
reorder_cnt{0, 0},
696+
[&](reorder_cnt total, program_node* node) {
697+
if (fmt_map.count(node) == 0 || fmt_map.at(node) == format::any)
698+
return total;
699+
auto count = count_reorders(fmt_map, lo, node);
700+
return reorder_cnt{total.number + count.number, total.total_sizes + count.total_sizes};
701+
});
702702
// Divide results by two as above function will each reorder from both sides
703703
GPU_DEBUG_LOG_PASS << "Total number of reorders: " << total_reorder_count.number / 2 << std::endl;
704704
GPU_DEBUG_LOG_PASS << "Total elements count of all reorders: " << total_reorder_count.total_sizes / 2 << std::endl;

src/plugins/intel_gpu/src/kernel_selector/auto_tuner.cpp

+21
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,27 @@
3636
#include <dlfcn.h>
3737
#endif
3838

39+
#if __cplusplus > 201703L
40+
41+
// Add operators `==` and `!=` for rapidjson::GenericMemberIterator for non const iterator when build with C++20,
42+
// is more strict regarding type checks.
43+
namespace rapidjson {
44+
45+
template <typename Encoding, typename Allocator>
46+
inline bool operator==(GenericMemberIterator<false, Encoding, Allocator> lhs,
47+
GenericMemberIterator<false, Encoding, Allocator> rhs) {
48+
return static_cast<GenericMemberIterator<true, Encoding, Allocator>>(lhs) ==
49+
static_cast<GenericMemberIterator<true, Encoding, Allocator>>(rhs);
50+
}
51+
52+
template <typename Encoding, typename Allocator>
53+
inline bool operator!=(GenericMemberIterator<false, Encoding, Allocator> lhs,
54+
GenericMemberIterator<false, Encoding, Allocator> rhs) {
55+
return !(lhs == rhs);
56+
}
57+
} // namespace rapidjson
58+
#endif
59+
3960
namespace kernel_selector {
4061

4162
class TuningCache::Impl {

src/plugins/intel_gpu/src/kernel_selector/kernel_selector_common.cpp

-2
Original file line numberDiff line numberDiff line change
@@ -612,10 +612,8 @@ std::string toString_v2(const DataTensor& tensor) {
612612
std::stringstream s;
613613
s << toString(tensor.GetDType()) << "_";
614614
s << toString(tensor.GetLayout());
615-
int i = 0;
616615
for (auto dim : tensor.GetDims()) {
617616
s << "_v" << dim.v << "_p" << dim.pad.before << "_" << dim.pad.after;
618-
i++;
619617
}
620618
return s.str();
621619
}

src/plugins/intel_gpu/tests/unit/module_tests/primitive_comparison_test.cpp

+7
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,13 @@
1111
#include <intel_gpu/primitives/gather.hpp>
1212
#include <intel_gpu/primitives/permute.hpp>
1313

14+
namespace cldnn {
15+
// For gtest NE compare, class defines only `==` operator. Required when building using C++20
16+
inline bool operator!=(const range& lhs, const fully_connected& rhs) {
17+
return !(lhs.operator==(rhs));
18+
}
19+
} // namespace cldnn
20+
1421
using namespace cldnn;
1522
using namespace ::tests;
1623

src/tests/test_utils/common_test_utils/src/file_utils.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -192,7 +192,7 @@ std::string getRelativePath(const std::string& from, const std::string& to) {
192192
output += std::accumulate(mismatch_it.first,
193193
from_vec.end(),
194194
std::string{},
195-
[&separator](std::string& a, const std::string&) -> std::string {
195+
[&separator](std::string a, const std::string&) -> std::string {
196196
return a += ".." + separator;
197197
});
198198
}
@@ -203,7 +203,7 @@ std::string getRelativePath(const std::string& from, const std::string& to) {
203203
output += std::accumulate(mismatch_it.second,
204204
to_vec.end(),
205205
std::string{},
206-
[&separator](std::string& a, const std::string& b) -> std::string {
206+
[&separator](std::string a, const std::string& b) -> std::string {
207207
return a.empty() ? a += b : a += separator + b;
208208
});
209209
return output;

thirdparty/itt_collector/sea_itt_lib/sea_itt_lib.cpp

+5-5
Original file line numberDiff line numberDiff line change
@@ -327,14 +327,14 @@ SEA_EXPORT int NotifyEvent(iJIT_JVM_EVENT event_type, void* EventSpecificData) {
327327

328328
switch (event_type) {
329329
case iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED: {
330-
sea::WriteJit(&(uint32_t)methodData->method_id, sizeof(uint32_t));
330+
sea::WriteJit(&methodData->method_id, sizeof(uint32_t));
331331
sea::WriteJit(&methodData->method_load_address, sizeof(void*));
332-
sea::WriteJit(&(uint32_t)methodData->method_size, sizeof(uint32_t));
333-
sea::WriteJit(&(uint32_t)methodData->line_number_size, sizeof(uint32_t));
332+
sea::WriteJit(&methodData->method_size, sizeof(uint32_t));
333+
sea::WriteJit(&methodData->line_number_size, sizeof(uint32_t));
334334
for (unsigned int i = 0; i < methodData->line_number_size; ++i) {
335335
const LineNumberInfo& lni = methodData->line_number_table[i];
336-
sea::WriteJit(&(uint32_t)lni.Offset, sizeof(uint32_t));
337-
sea::WriteJit(&(uint32_t)lni.LineNumber, sizeof(uint32_t));
336+
sea::WriteJit(&lni.Offset, sizeof(uint32_t));
337+
sea::WriteJit(&lni.LineNumber, sizeof(uint32_t));
338338
}
339339

340340
const char* strings[] = {methodData->method_name, methodData->class_file_name, methodData->source_file_name};

0 commit comments

Comments
 (0)