Skip to content

PR: Refine ggml-qnn backend(QNN, Qualcomm Neural Network,aka Qualcomm AI Engine Direct) for latest ggml,whisper.cpp,llama.cpp #12049

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 37 commits into from
Closed
Changes from 1 commit
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
74029f3
ggml-qnn: add Qualcomm QNN backend for GGML
zhouwg Feb 14, 2025
986a37d
ggml-qnn: santiy check
zhouwg Feb 15, 2025
af604d5
ggml-qnn: update script build-run-android.sh to compare peformance of…
zhouwg Feb 16, 2025
816ebb9
ggml-qnn: fix minor issue in test-backend-ops.cpp
zhouwg Feb 17, 2025
2a8020b
ggml-qnn: merge QNN RPC feature from https://github.com/zhouwg/kantv/…
zhouwg Feb 18, 2025
da4d007
ggml-qnn: sync from branch kantvai-ggmlqnn-npurpc
zhouwg Feb 18, 2025
7cb1a86
ggml-qnn: a concise approach to offload mulmat to QNN backend(sync fr…
zhouwg Feb 19, 2025
c8cf291
ggml-qnn: remove redundant codes
zhouwg Feb 20, 2025
84317c7
ggml-qnn: sync from branch kantvai-ggmlqnn-npurpc
zhouwg Feb 20, 2025
c6a04c6
ggml-qnn: sync from branch kantvai-ggmlqnn-npurpc
zhouwg Feb 20, 2025
59a2fbe
ggml-qnn: sync from branch kantvai-ggmlqnn-npurpc
zhouwg Feb 21, 2025
1e6f4a7
ggml-qnn: add Qualcomm QNN backend for GGML
zhouwg Feb 14, 2025
6974079
ggml-qnn: santiy check
zhouwg Feb 15, 2025
ea970f9
ggml-qnn: update script build-run-android.sh to compare peformance of…
zhouwg Feb 16, 2025
d0c01c0
ggml-qnn: fix minor issue in test-backend-ops.cpp
zhouwg Feb 17, 2025
b48ad85
ggml-qnn: merge QNN RPC feature from https://github.com/zhouwg/kantv/…
zhouwg Feb 18, 2025
5ac113b
ggml-qnn: sync from branch kantvai-ggmlqnn-npurpc
zhouwg Feb 18, 2025
31152be
ggml-qnn: a concise approach to offload mulmat to QNN backend(sync fr…
zhouwg Feb 19, 2025
e16dd3c
ggml-qnn: remove redundant codes
zhouwg Feb 20, 2025
1d56350
ggml-qnn: sync from branch kantvai-ggmlqnn-npurpc
zhouwg Feb 20, 2025
12f4911
ggml-qnn: sync from branch kantvai-ggmlqnn-npurpc
zhouwg Feb 20, 2025
37985f9
ggml-qnn: sync from branch kantvai-ggmlqnn-npurpc
zhouwg Feb 21, 2025
9fa0765
rebase to the latest upstream
zhouwg Feb 21, 2025
60ca941
ggml-qnn: fix a minior typo in internal doc
zhouwg Feb 23, 2025
d5d110d
ggml-qnn: refine function ggml_qnn_create_general_tensor() to avoid c…
zhouwg Feb 23, 2025
c687f26
ggml-qnn: fix a minor typo in source code
zhouwg Feb 24, 2025
d1b9d1b
build: avoid ggml-qnn backend breaking other backend's builds
zhouwg Feb 24, 2025
35a289a
ggml-qnn: remove redundant codes to make PR reviewers happy
zhouwg Feb 25, 2025
71dae47
ggml-qnn: refine code format
zhouwg Feb 25, 2025
d80b289
ggml-qnn: offload quantized type mulmat to QNN backend
zhouwg Feb 26, 2025
eb47de0
ggml-qnn: benchmark of real LLM inference on a Snapdragon 8 Gen3 phone
zhouwg Feb 26, 2025
36b58e3
ggml-qnn: refine source code structure to make code more clearly
zhouwg Feb 27, 2025
302e014
ggml-qnn: refine code
zhouwg Feb 27, 2025
a134884
ggml-qnn: enable release build with necessary logs to make reviewers …
zhouwg Feb 27, 2025
137b347
ggml-qnn: enable all quantize type with 2d mulmat
zhouwg Feb 27, 2025
653bc33
ggml-qnn: enable log output of GGMLQNN_LOG_INFO in command line mode …
zhouwg Feb 28, 2025
9d10e4f
ggml-qnn: Windows port --- step2
zhouwg Feb 28, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
ggml-qnn: sync from branch kantvai-ggmlqnn-npurpc
  • Loading branch information
zhouwg committed Feb 21, 2025
commit 5ac113b7f119eff62cadd83675d6cc969a79ec02
133 changes: 60 additions & 73 deletions ggml/src/ggml-qnn/ggml-qnn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1903,7 +1903,7 @@ class qnn_instance {
return _qnn_mem_set.count(handle) != 0U;
}

bool enalbe_qnn_rpc() {
bool enable_qnn_rpc() {
return _enable_qnn_rpc;
}

Expand Down Expand Up @@ -1989,6 +1989,9 @@ class qnn_instance {
std::string _graph_name;
QNNBackend _device_id;
bool _enable_qnn_rpc = false; //FIXME:unknown issue with QNN RPC feature

DISABLE_COPY(qnn_instance);
DISABLE_MOVE(qnn_instance);
};

std::mutex qnn_instance::_init_mutex;
Expand Down Expand Up @@ -3106,6 +3109,8 @@ static void ggml_qnn_add(ggml_backend_t backend, ggml_tensor * op) {
uint32_t * tensor_1_dimensions = QNN_VER_PTR(*tensor_1)->dimensions;
uint32_t * tensor_2_dimensions = QNN_VER_PTR(*tensor_2)->dimensions;

bool enable_npu_rpc = instance->enable_qnn_rpc() && ctx->device == QNN_BACKEND_NPU;

if (!graph_initialized) {
graph_name = map_entry;
GGMLQNN_LOG_DEBUG("graph name %s", graph_name.c_str());
Expand All @@ -3121,37 +3126,29 @@ static void ggml_qnn_add(ggml_backend_t backend, ggml_tensor * op) {
return;
}

if (instance->enalbe_qnn_rpc()) {
if (ctx->device == QNN_BACKEND_NPU) { // QNN RPC feature only available for NPU backend
QNN_VER_PTR(*tensor_0)->memType = QNN_TENSORMEMTYPE_MEMHANDLE;
QNN_VER_PTR(*tensor_0)->clientBuf = {.data=nullptr, .dataSize=0};
if (enable_npu_rpc) {
QNN_VER_PTR(*tensor_0)->memType = QNN_TENSORMEMTYPE_MEMHANDLE;
QNN_VER_PTR(*tensor_0)->clientBuf = {.data=nullptr, .dataSize=0};

QNN_VER_PTR(*tensor_1)->memType = QNN_TENSORMEMTYPE_MEMHANDLE;
QNN_VER_PTR(*tensor_1)->clientBuf = {.data=nullptr, .dataSize=0};
QNN_VER_PTR(*tensor_1)->memType = QNN_TENSORMEMTYPE_MEMHANDLE;
QNN_VER_PTR(*tensor_1)->clientBuf = {.data=nullptr, .dataSize=0};

QNN_VER_PTR(*tensor_2)->memType = QNN_TENSORMEMTYPE_MEMHANDLE;
QNN_VER_PTR(*tensor_2)->clientBuf = {.data=nullptr, .dataSize=0};
}
QNN_VER_PTR(*tensor_2)->memType = QNN_TENSORMEMTYPE_MEMHANDLE;
QNN_VER_PTR(*tensor_2)->clientBuf = {.data=nullptr, .dataSize=0};
}

error = qnn_raw_interface.tensorCreateGraphTensor(graph_handle, tensor_0);
CHECK_QNN_API(error);
error = qnn_raw_interface.tensorCreateGraphTensor(graph_handle, tensor_1);
CHECK_QNN_API(error);
error = qnn_raw_interface.tensorCreateGraphTensor(graph_handle, tensor_2);
CHECK_QNN_API(error);

if (instance->enalbe_qnn_rpc()) {
if (ctx->device == QNN_BACKEND_NPU) { // QNN RPC feature only available for NPU backend
qnn_rpcbuffer_0 = create_rpc_buffer(instance, src0, tensor_0, true);
qnn_rpcbuffer_1 = create_rpc_buffer(instance, src1, tensor_1, true);
qnn_rpcbuffer_2 = create_rpc_buffer(instance, dst, tensor_2, false);
if (nullptr == qnn_rpcbuffer_0 || nullptr == qnn_rpcbuffer_1 ||
nullptr == qnn_rpcbuffer_2) {
GGMLQNN_LOG_INFO("create rpc buffer failure\n");
//FIXME: potential memory leak althought it shouldn't happen
return;
}
CHECK_QNN_API(error = qnn_raw_interface.tensorCreateGraphTensor(graph_handle, tensor_0));
CHECK_QNN_API(error = qnn_raw_interface.tensorCreateGraphTensor(graph_handle, tensor_1));
CHECK_QNN_API(error = qnn_raw_interface.tensorCreateGraphTensor(graph_handle, tensor_2));

if (enable_npu_rpc) {
qnn_rpcbuffer_0 = create_rpc_buffer(instance, src0, tensor_0, true);
qnn_rpcbuffer_1 = create_rpc_buffer(instance, src1, tensor_1, true);
qnn_rpcbuffer_2 = create_rpc_buffer(instance, dst, tensor_2, false);
if (nullptr == qnn_rpcbuffer_0 || nullptr == qnn_rpcbuffer_1 || nullptr == qnn_rpcbuffer_2) {
GGMLQNN_LOG_INFO("create rpc buffer failure\n");
//FIXME: potential memory leak althought it shouldn't happen
return;
}
} else {
QNN_VER_PTR(*tensor_0)->clientBuf = {src0->data, ggml_get_tensor_data_size(src0)};
Expand Down Expand Up @@ -3179,23 +3176,19 @@ static void ggml_qnn_add(ggml_backend_t backend, ggml_tensor * op) {
tensor_outputs
}
};
error = qnn_raw_interface.graphAddNode(graph_handle, op_config);
CHECK_QNN_API(error);
error = qnn_raw_interface.graphFinalize(graph_handle, nullptr, nullptr);
CHECK_QNN_API(error);
CHECK_QNN_API(error = qnn_raw_interface.graphAddNode(graph_handle, op_config));
CHECK_QNN_API(error = qnn_raw_interface.graphFinalize(graph_handle, nullptr, nullptr));
error = qnn_raw_interface.graphExecute(graph_handle,
tensor_inputs, 2,
tensor_outputs, 1,
nullptr, nullptr);
CHECK_QNN_API(error);

if (instance->enalbe_qnn_rpc()) {
if (ctx->device == QNN_BACKEND_NPU) { // QNN RPC feature only available for NPU backend
uint8_t * qnn_rpcbuffer = static_cast<uint8_t *>(instance->get_rpcmem_from_memhandle(QNN_VER_PTR(*tensor_2)->memHandle));
GGMLQNN_LOG_INFO("qnn_rpcbuffer = %p\n", qnn_rpcbuffer);
if (nullptr != qnn_rpcbuffer) {
memcpy(dst->data, qnn_rpcbuffer, ggml_nbytes(dst));
}
if (enable_npu_rpc) {
uint8_t * qnn_rpcbuffer = static_cast<uint8_t *>(instance->get_rpcmem_from_memhandle(QNN_VER_PTR(*tensor_2)->memHandle));
GGMLQNN_LOG_INFO("qnn_rpcbuffer = %p\n", qnn_rpcbuffer);
if (nullptr != qnn_rpcbuffer) {
memcpy(dst->data, qnn_rpcbuffer, ggml_nbytes(dst));
}
}

Expand Down Expand Up @@ -3223,25 +3216,23 @@ static void ggml_qnn_add(ggml_backend_t backend, ggml_tensor * op) {
QNN_VER_PTR(*tensor_2)->rank = ggml_get_tensor_rank(dst);
QNN_VER_PTR(*tensor_2)->dataType = dst_qnn_type;

if (instance->enalbe_qnn_rpc()) {
if (ctx->device == QNN_BACKEND_NPU) { // QNN RPC feature only available for NPU backend
//FIXME:why failure with test-backend-ops
uint8_t * qnn_buffer_0 = static_cast<uint8_t *>(instance->get_rpcmem_from_memhandle(QNN_VER_PTR(*tensor_0)->memHandle));
GGMLQNN_LOG_INFO("qnn_rpcbuffer_0 = %p\n", qnn_rpcbuffer_0);
if (nullptr != qnn_buffer_0) {
memcpy(qnn_buffer_0, src0->data, ggml_nbytes(src0));
}
if (enable_npu_rpc) {
//FIXME:why failure with test-backend-ops
uint8_t * qnn_buffer_0 = static_cast<uint8_t *>(instance->get_rpcmem_from_memhandle(QNN_VER_PTR(*tensor_0)->memHandle));
GGMLQNN_LOG_INFO("qnn_rpcbuffer_0 = %p\n", qnn_rpcbuffer_0);
if (nullptr != qnn_buffer_0) {
memcpy(qnn_buffer_0, src0->data, ggml_nbytes(src0));
}

uint8_t * qnn_buffer_1 = static_cast<uint8_t *>(instance->get_rpcmem_from_memhandle(QNN_VER_PTR(*tensor_1)->memHandle));
GGMLQNN_LOG_INFO("qnn_rpcbuffer_1 = %p\n", qnn_rpcbuffer_1);
if (nullptr != qnn_buffer_1) {
memcpy(qnn_buffer_1, src1->data, ggml_nbytes(src1));
}
uint8_t * qnn_buffer_1 = static_cast<uint8_t *>(instance->get_rpcmem_from_memhandle(QNN_VER_PTR(*tensor_1)->memHandle));
GGMLQNN_LOG_INFO("qnn_rpcbuffer_1 = %p\n", qnn_rpcbuffer_1);
if (nullptr != qnn_buffer_1) {
memcpy(qnn_buffer_1, src1->data, ggml_nbytes(src1));
}
} else {
QNN_VER_PTR(*tensor_0)->clientBuf = {src0->data, ggml_get_tensor_data_size(src0)};
QNN_VER_PTR(*tensor_1)->clientBuf = {src1->data, ggml_get_tensor_data_size(src1)};
QNN_VER_PTR(*tensor_2)->clientBuf = {dst->data, ggml_get_tensor_data_size(dst)};
QNN_VER_PTR(*tensor_2)->clientBuf = {dst->data, ggml_get_tensor_data_size(dst)};
}

Qnn_Tensor_t tensor_inputs[] = {
Expand All @@ -3255,16 +3246,13 @@ static void ggml_qnn_add(ggml_backend_t backend, ggml_tensor * op) {
tensor_inputs, 2,
tensor_outputs, 1,
nullptr, nullptr);
if (QNN_SUCCESS != error) {
GGMLQNN_LOG_INFO("error = %d\n", error);
}
CHECK_QNN_API(error);

if (instance->enalbe_qnn_rpc()) {
if (ctx->device == QNN_BACKEND_NPU) { // QNN RPC feature only available for NPU backend
//FIXME:why failure with test-backend-ops
uint8_t * qnn_buffer_2 = static_cast<uint8_t *>(instance->get_rpcmem_from_memhandle(QNN_VER_PTR(*tensor_2)->memHandle));
if (nullptr != qnn_buffer_2)
memcpy(dst->data, qnn_buffer_2, ggml_nbytes(dst));
if (enable_npu_rpc) {
//FIXME:why failure with test-backend-ops
uint8_t * qnn_buffer_2 = static_cast<uint8_t *>(instance->get_rpcmem_from_memhandle(QNN_VER_PTR(*tensor_2)->memHandle));
if (nullptr != qnn_buffer_2) {
memcpy(dst->data, qnn_buffer_2, ggml_nbytes(dst));
}
}
}
Expand Down Expand Up @@ -3358,12 +3346,9 @@ static void ggml_qnn_mul_mat(ggml_backend_t backend, ggml_tensor * op) {
GGMLQNN_LOG_INFO("can't create qnn graph handle with graph name %s, error = %d\n", graph_name.c_str(), error);
return;
}
error = qnn_raw_interface.tensorCreateGraphTensor(graph_handle, tensor_0);
CHECK_QNN_API(error);
error = qnn_raw_interface.tensorCreateGraphTensor(graph_handle, tensor_1);
CHECK_QNN_API(error);
error = qnn_raw_interface.tensorCreateGraphTensor(graph_handle, tensor_2);
CHECK_QNN_API(error);
CHECK_QNN_API(error = qnn_raw_interface.tensorCreateGraphTensor(graph_handle, tensor_0));
CHECK_QNN_API(error = qnn_raw_interface.tensorCreateGraphTensor(graph_handle, tensor_1));
CHECK_QNN_API(error = qnn_raw_interface.tensorCreateGraphTensor(graph_handle, tensor_2));

QNN_VER_PTR(*tensor_0)->clientBuf = {src0->data, ggml_get_tensor_data_size(src0)};
QNN_VER_PTR(*tensor_1)->clientBuf = {src1->data, ggml_get_tensor_data_size(src1)};
Expand All @@ -3389,18 +3374,18 @@ static void ggml_qnn_mul_mat(ggml_backend_t backend, ggml_tensor * op) {
tensor_outputs
}
};
error = qnn_raw_interface.graphAddNode(graph_handle, op_config);
CHECK_QNN_API(error);
error = qnn_raw_interface.graphFinalize(graph_handle, nullptr, nullptr);
CHECK_QNN_API(error);
CHECK_QNN_API(error = qnn_raw_interface.graphAddNode(graph_handle, op_config));
CHECK_QNN_API(error = qnn_raw_interface.graphFinalize(graph_handle, nullptr, nullptr));
error = qnn_raw_interface.graphExecute(graph_handle,
tensor_inputs, 2,
tensor_outputs, 1,
nullptr, nullptr);
CHECK_QNN_API(error);
auto graph_item = std::make_tuple(graph_handle, tensor_0, tensor_1, tensor_2);
instance->_qnn_graph_map[map_entry] = graph_item;

} else {

uint32_t dimensions_input_0[] = {(uint32_t) src0->ne[0], (uint32_t) src0->ne[1],
(uint32_t) src0->ne[2], (uint32_t) src0->ne[3]};
uint32_t dimensions_input_1[] = {(uint32_t) src1->ne[0], (uint32_t) src1->ne[1],
Expand All @@ -3410,9 +3395,11 @@ static void ggml_qnn_mul_mat(ggml_backend_t backend, ggml_tensor * op) {
QNN_VER_PTR(*tensor_0)->dimensions = dimensions_input_0;
QNN_VER_PTR(*tensor_0)->rank = ggml_get_tensor_rank(src0);
QNN_VER_PTR(*tensor_0)->dataType = src0_qnn_type;

QNN_VER_PTR(*tensor_1)->dimensions = dimensions_input_1;
QNN_VER_PTR(*tensor_1)->rank = ggml_get_tensor_rank(src1);
QNN_VER_PTR(*tensor_1)->dataType = src1_qnn_type;

QNN_VER_PTR(*tensor_2)->dimensions = dimensions_output;
QNN_VER_PTR(*tensor_2)->rank = ggml_get_tensor_rank(dst);
QNN_VER_PTR(*tensor_2)->dataType = dst_qnn_type;
Expand Down Expand Up @@ -3656,7 +3643,7 @@ static enum ggml_status ggml_backend_qnn_graph_compute(ggml_backend_t backend, s
ggml_backend_qnn_context * ctx = (ggml_backend_qnn_context *) backend->context;
GGML_UNUSED(ctx);

//GGMLQNN_LOG_DEBUG("cgraph->n_nodes %d", cgraph->n_nodes);
GGMLQNN_LOG_DEBUG("cgraph->n_nodes %d", cgraph->n_nodes);
for (int i = 0; i < cgraph->n_nodes; i++) {
ggml_tensor * node = cgraph->nodes[i];
if (ggml_is_empty(node) || node->op == GGML_OP_RESHAPE
Expand Down