Skip to content

Commit c902c4d

Browse files
committed
ggml-qnn: refine code format to make potential collaborators happy
1 parent a47bbf3 commit c902c4d

File tree

1 file changed

+38
-37
lines changed

1 file changed

+38
-37
lines changed

ggml/src/ggml-qnn/ggml-qnn.cpp

Lines changed: 38 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -1743,39 +1743,38 @@ class qnn_instance {
17431743
static constexpr const int _required_num_providers = 1;
17441744

17451745
private:
1746-
std::string _lib_path;
1747-
std::string _backend_name;
1748-
std::string _model_name; // name of prebuilt QNN model, might be used in the future
1749-
BackendIdType _backend_id;
1746+
std::string _lib_path;
1747+
std::string _backend_name;
1748+
std::string _model_name; // name of prebuilt QNN model, might be used in the future
1749+
BackendIdType _backend_id;
17501750

17511751
bool _debug_tensor = false; // flag to indicate if requested graph is to be run in debug mode
17521752
bool _do_node_validations = true; // flag to indicate whether all add_node calls need to be validated
17531753
QnnLog_Level_t _qnn_log_level = QNN_LOG_LEVEL_DEBUG;
17541754

17551755
ggml_qnn_profile_level _profile_level = ggml_qnn_profile_level::profile_detail;
17561756

1757-
qnn_interface _qnn_interface;
1758-
1759-
void * _system_lib_handle = nullptr;
1757+
void * _system_lib_handle = nullptr;
17601758

1761-
Qnn_GraphHandle_t _qnn_graph_handle = nullptr;
1759+
Qnn_GraphHandle_t _qnn_graph_handle = nullptr;
17621760

1763-
Qnn_LogHandle_t _qnn_log_handle = nullptr;
1761+
Qnn_LogHandle_t _qnn_log_handle = nullptr;
17641762

17651763
Qnn_ProfileHandle_t _qnn_profile_handle = nullptr;
17661764

1767-
Qnn_DeviceHandle_t _qnn_device_handle = nullptr;
1765+
Qnn_DeviceHandle_t _qnn_device_handle = nullptr;
17681766

17691767
Qnn_BackendHandle_t _qnn_backend_handle = nullptr;
17701768

17711769
Qnn_ContextHandle_t _qnn_context_handle = nullptr;
17721770

17731771
QnnSystemContext_Handle_t _qnn_system_handle = nullptr;
17741772

1775-
QnnHtpDevice_PerfInfrastructure_t *_qnn_htp_perfinfra = nullptr;
1776-
uint32_t _qnn_power_configid = 1;
1777-
uint32_t _qnn_rpc_pollingtime = 9999; // 0-10000 us for high performing
1773+
QnnHtpDevice_PerfInfrastructure_t * _qnn_htp_perfinfra = nullptr;
1774+
uint32_t _qnn_power_configid = 1;
1775+
uint32_t _qnn_rpc_pollingtime = 9999; // 0-10000 us for high performing
17781776

1777+
qnn_interface _qnn_interface;
17791778
QNN_INTERFACE_VER_TYPE _qnn_raw_interface;
17801779
QNN_SYSTEM_INTERFACE_VER_TYPE _qnn_raw_system_interface;
17811780

@@ -1787,7 +1786,6 @@ class qnn_instance {
17871786
static std::unordered_map<std::string, BackendIdType> _lib_path_to_backend_id;
17881787
static std::unordered_map<BackendIdType, const QnnInterface_t *> _loaded_backend;
17891788

1790-
void * _rpc_lib_handle = nullptr;
17911789
std::atomic_bool _rpcmem_initialized{false};
17921790
pfn_rpc_mem_alloc _pfn_rpc_mem_alloc;
17931791
pfn_rpc_mem_free _pfn_rpc_mem_free;
@@ -1796,12 +1794,13 @@ class qnn_instance {
17961794
pfn_rpc_mem_deinit _pfn_rpc_mem_deinit;
17971795
std::unordered_map<void *, void *> _rpcmem_store_map;
17981796
std::unordered_map<void *, size_t> _rpcmem_usage_map;
1799-
size_t _rpcmem_capacity = 512; // mempool size in Mbytes
18001797
size_t _rpcmem_usage = 0; // mempool usage in Mbytes
1798+
size_t _rpcmem_capacity = 512; // mempool size in Mbytes
18011799

18021800
std::string _graph_name;
18031801
QNNBackend _device_id;
1804-
bool _enable_qnn_rpc = false; //TODO:unknown issue with QNN RPC feature
1802+
void * _rpc_lib_handle = nullptr;
1803+
bool _enable_qnn_rpc = false; //TODO:unknown issue with QNN RPC feature
18051804

18061805
DISABLE_COPY(qnn_instance);
18071806
DISABLE_MOVE(qnn_instance);
@@ -1925,13 +1924,13 @@ int qnn_instance::register_rpcmem(void * p_data, Qnn_Tensor_t * p_tensor) {
19251924

19261925
if (is_rpcmem_registered((QNN_VER_PTR(*p_tensor)->memHandle))) {
19271926
GGMLQNN_LOG_WARN("tensor %s has been registered shared memory\n", (QNN_VER_PTR(*p_tensor)->name));
1928-
return 4;
1927+
return 3;
19291928
}
19301929

19311930
int32_t mem_fd = rpcmem_to_fd(p_data);
19321931
if (-1 == mem_fd) {
19331932
GGMLQNN_LOG_WARN("failed to get file descriptor\n");
1934-
return 5;
1933+
return 4;
19351934
}
19361935
GGMLQNN_LOG_DEBUG("mem_fd %d\n", mem_fd);
19371936
Qnn_MemDescriptor_t descriptor = {
@@ -1947,9 +1946,8 @@ int qnn_instance::register_rpcmem(void * p_data, Qnn_Tensor_t * p_tensor) {
19471946
/*numDescriptors=*/1,
19481947
&handle);
19491948
if (error != QNN_SUCCESS) {
1950-
GGMLQNN_LOG_WARN("failed to register shared memory, error %d, %s\n", QNN_GET_ERROR_CODE(error),
1951-
strerror(error));
1952-
return 6;
1949+
GGMLQNN_LOG_WARN("failed to register shared memory, error %d, %s\n", QNN_GET_ERROR_CODE(error), strerror(error));
1950+
return 5;
19531951
} else {
19541952
GGMLQNN_LOG_INFO("tensor %s successfully register shared memory\n", (QNN_VER_PTR(*p_tensor)->name));
19551953
}
@@ -1988,8 +1986,7 @@ Qnn_MemHandle_t qnn_instance::register_rpcmem(void * p_data, const uint32_t ran
19881986
{{mem_fd}}
19891987
};
19901988
Qnn_MemHandle_t handle = nullptr;
1991-
auto error = _qnn_interface.qnn_mem_register(_qnn_context_handle, &descriptor,
1992-
/*numDescriptors=*/1, &handle);
1989+
auto error = _qnn_interface.qnn_mem_register(_qnn_context_handle, &descriptor, /*numDescriptors=*/1, &handle);
19931990
if (error != QNN_SUCCESS) {
19941991
GGMLQNN_LOG_WARN("failed to register shared memory, error %d, %s", QNN_GET_ERROR_CODE(error), strerror(error));
19951992
return nullptr;
@@ -2407,7 +2404,7 @@ int qnn_instance::qnn_init(const QnnSaver_Config_t ** saver_config) {
24072404
if (QNN_PROFILE_NO_ERROR != _qnn_raw_interface.profileCreate(
24082405
_qnn_backend_handle, QNN_PROFILE_LEVEL_BASIC, &_qnn_profile_handle)) {
24092406
GGMLQNN_LOG_WARN("unable to create profile handle in the backend\n");
2410-
return 7;
2407+
return 6;
24112408
} else {
24122409
GGMLQNN_LOG_DEBUG("initialize qnn profile successfully\n");
24132410
}
@@ -2433,7 +2430,7 @@ int qnn_instance::qnn_init(const QnnSaver_Config_t ** saver_config) {
24332430
#endif
24342431
if (nullptr == _rpc_lib_handle) {
24352432
GGMLQNN_LOG_WARN("failed to load qualcomm's rpc lib, error:%s\n", dlerror());
2436-
return 9;
2433+
return 8;
24372434
} else {
24382435
GGMLQNN_LOG_DEBUG("load rpcmem lib successfully\n");
24392436
set_rpcmem_initialized(true);
@@ -2447,7 +2444,7 @@ int qnn_instance::qnn_init(const QnnSaver_Config_t ** saver_config) {
24472444
|| nullptr == _pfn_rpc_mem_to_fd) {
24482445
GGMLQNN_LOG_WARN("unable to access symbols in QNN RPC lib. dlerror(): %s", dlerror());
24492446
dlclose(_rpc_lib_handle);
2450-
return 10;
2447+
return 9;
24512448
}
24522449

24532450
if (nullptr != _pfn_rpc_mem_init) // make Qualcomm's SoC based low-end phone happy
@@ -2459,7 +2456,7 @@ int qnn_instance::qnn_init(const QnnSaver_Config_t ** saver_config) {
24592456
&_qnn_context_handle);
24602457
if (nullptr == _qnn_context_handle) {
24612458
GGMLQNN_LOG_WARN("why failed to initialize qnn context, error:%s\n", strerror(errno));
2462-
return 8;
2459+
return 10;
24632460
} else {
24642461
GGMLQNN_LOG_DEBUG("initialize qnn context successfully\n");
24652462
}
@@ -2751,29 +2748,33 @@ static bool ggml_qnn_can_handle_op(const struct ggml_tensor * tensor) {
27512748
return true;
27522749
}
27532750
if (ggml_is_empty(tensor) || tensor->op == GGML_OP_RESHAPE
2754-
|| tensor->op == GGML_OP_TRANSPOSE || tensor->op == GGML_OP_VIEW
2755-
|| tensor->op == GGML_OP_PERMUTE) {
2751+
|| tensor->op == GGML_OP_TRANSPOSE
2752+
|| tensor->op == GGML_OP_VIEW
2753+
|| tensor->op == GGML_OP_PERMUTE
2754+
) {
27562755
return false;
27572756
}
27582757

27592758
//TODO: support other op
2760-
bool supported_op = ((tensor->op == GGML_OP_ADD) || (tensor->op == GGML_OP_MUL_MAT)
2761-
|| (tensor->op == GGML_OP_MUL));
2759+
bool supported_op = ((tensor->op == GGML_OP_ADD)
2760+
|| (tensor->op == GGML_OP_MUL_MAT)
2761+
|| (tensor->op == GGML_OP_MUL)
2762+
);
27622763
if (!supported_op) {
27632764
return false;
27642765
}
27652766

27662767
struct ggml_tensor * src0 = tensor->src[0];
27672768
struct ggml_tensor * src1 = tensor->src[1];
27682769

2769-
const int64_t ne00 = tensor->src[0]->ne[0];
2770-
const int64_t ne01 = tensor->src[0]->ne[1];
2770+
const int64_t ne00 = tensor->src[0]->ne[0];
2771+
const int64_t ne01 = tensor->src[0]->ne[1];
27712772

2772-
const int64_t ne10 = tensor->src[1]->ne[0];
2773-
const int64_t ne11 = tensor->src[1]->ne[1];
2773+
const int64_t ne10 = tensor->src[1]->ne[0];
2774+
const int64_t ne11 = tensor->src[1]->ne[1];
27742775

2775-
const int64_t ne0 = tensor->ne[0];
2776-
const int64_t ne1 = tensor->ne[1];
2776+
const int64_t ne0 = tensor->ne[0];
2777+
const int64_t ne1 = tensor->ne[1];
27772778

27782779
const uint32_t src0_rank = ggml_get_tensor_rank(src0);
27792780
const uint32_t src1_rank = ggml_get_tensor_rank(src1);

0 commit comments

Comments
 (0)