Skip to content

PR: Refine ggml-qnn backend(QNN, Qualcomm Neural Network,aka Qualcomm AI Engine Direct) for latest ggml,whisper.cpp,llama.cpp #12049

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 37 commits into from
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
74029f3
ggml-qnn: add Qualcomm QNN backend for GGML
zhouwg Feb 14, 2025
986a37d
ggml-qnn: santiy check
zhouwg Feb 15, 2025
af604d5
ggml-qnn: update script build-run-android.sh to compare peformance of…
zhouwg Feb 16, 2025
816ebb9
ggml-qnn: fix minor issue in test-backend-ops.cpp
zhouwg Feb 17, 2025
2a8020b
ggml-qnn: merge QNN RPC feature from https://github.com/zhouwg/kantv/…
zhouwg Feb 18, 2025
da4d007
ggml-qnn: sync from branch kantvai-ggmlqnn-npurpc
zhouwg Feb 18, 2025
7cb1a86
ggml-qnn: a concise approach to offload mulmat to QNN backend(sync fr…
zhouwg Feb 19, 2025
c8cf291
ggml-qnn: remove redundant codes
zhouwg Feb 20, 2025
84317c7
ggml-qnn: sync from branch kantvai-ggmlqnn-npurpc
zhouwg Feb 20, 2025
c6a04c6
ggml-qnn: sync from branch kantvai-ggmlqnn-npurpc
zhouwg Feb 20, 2025
59a2fbe
ggml-qnn: sync from branch kantvai-ggmlqnn-npurpc
zhouwg Feb 21, 2025
1e6f4a7
ggml-qnn: add Qualcomm QNN backend for GGML
zhouwg Feb 14, 2025
6974079
ggml-qnn: santiy check
zhouwg Feb 15, 2025
ea970f9
ggml-qnn: update script build-run-android.sh to compare peformance of…
zhouwg Feb 16, 2025
d0c01c0
ggml-qnn: fix minor issue in test-backend-ops.cpp
zhouwg Feb 17, 2025
b48ad85
ggml-qnn: merge QNN RPC feature from https://github.com/zhouwg/kantv/…
zhouwg Feb 18, 2025
5ac113b
ggml-qnn: sync from branch kantvai-ggmlqnn-npurpc
zhouwg Feb 18, 2025
31152be
ggml-qnn: a concise approach to offload mulmat to QNN backend(sync fr…
zhouwg Feb 19, 2025
e16dd3c
ggml-qnn: remove redundant codes
zhouwg Feb 20, 2025
1d56350
ggml-qnn: sync from branch kantvai-ggmlqnn-npurpc
zhouwg Feb 20, 2025
12f4911
ggml-qnn: sync from branch kantvai-ggmlqnn-npurpc
zhouwg Feb 20, 2025
37985f9
ggml-qnn: sync from branch kantvai-ggmlqnn-npurpc
zhouwg Feb 21, 2025
9fa0765
rebase to the latest upstream
zhouwg Feb 21, 2025
60ca941
ggml-qnn: fix a minior typo in internal doc
zhouwg Feb 23, 2025
d5d110d
ggml-qnn: refine function ggml_qnn_create_general_tensor() to avoid c…
zhouwg Feb 23, 2025
c687f26
ggml-qnn: fix a minor typo in source code
zhouwg Feb 24, 2025
d1b9d1b
build: avoid ggml-qnn backend breaking other backend's builds
zhouwg Feb 24, 2025
35a289a
ggml-qnn: remove redundant codes to make PR reviewers happy
zhouwg Feb 25, 2025
71dae47
ggml-qnn: refine code format
zhouwg Feb 25, 2025
d80b289
ggml-qnn: offload quantized type mulmat to QNN backend
zhouwg Feb 26, 2025
eb47de0
ggml-qnn: benchmark of real LLM inference on a Snapdragon 8 Gen3 phone
zhouwg Feb 26, 2025
36b58e3
ggml-qnn: refine source code structure to make code more clearly
zhouwg Feb 27, 2025
302e014
ggml-qnn: refine code
zhouwg Feb 27, 2025
a134884
ggml-qnn: enable release build with necessary logs to make reviewers …
zhouwg Feb 27, 2025
137b347
ggml-qnn: enable all quantize type with 2d mulmat
zhouwg Feb 27, 2025
653bc33
ggml-qnn: enable log output of GGMLQNN_LOG_INFO in command line mode …
zhouwg Feb 28, 2025
9d10e4f
ggml-qnn: Windows port --- step2
zhouwg Feb 28, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
ggml-qnn: refine code
  • Loading branch information
zhouwg committed Feb 27, 2025
commit 302e014aec4f3b074ed714a5e9776cce66ac86fa
5 changes: 2 additions & 3 deletions ggml/src/ggml-qnn/ggml-qnn-impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -373,9 +373,9 @@ class qnn_interface {
}

private:
const QnnInterface_t *_qnn_interface = nullptr;
const QnnInterface_t * _qnn_interface = nullptr;

const QnnSystemInterface_t *_qnn_sys_interface = nullptr;
const QnnSystemInterface_t * _qnn_sys_interface = nullptr;
};

class qnn_instance {
Expand Down Expand Up @@ -582,7 +582,6 @@ const char * ggmlqnn_get_error_string(Qnn_ErrorHandle_t qnn_error_code);
Qnn_DataType_t ggmlqnn_datatype_from_ggml_datatype(enum ggml_type ggmltype);
void * ggmlqnn_type_trait(ggml_backend_qnn_context * ctx, ggml_tensor * op);
void ggmlqnn_get_graphkey_from_op(const ggml_tensor * op, std::string & output);
bool ggmlqnn_is_valid_params(ggml_backend_qnn_context * ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst);
uint8_t * ggmlqnn_create_rpc_buffer(qnn_instance * instance, const ggml_tensor * ggml_tensor, Qnn_Tensor_t * qnn_tensor, bool b_copydata);
void ggmlqnn_print_tensors_info(const char * func_name, ggml_backend_qnn_context * ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst);

Expand Down
25 changes: 17 additions & 8 deletions ggml/src/ggml-qnn/ggml-qnn-ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,22 @@
#include "ggml-common.h"
#include "ggml-qnn-ops.h"

static inline bool ggmlqnn_is_valid_params(ggml_backend_qnn_context * ctx, const ggml_tensor * src0,
const ggml_tensor * src1, ggml_tensor * dst) {
if ((nullptr == ctx) || (nullptr == src0) || (nullptr == src1) || (nullptr == dst)) {
GGMLQNN_LOG_WARN("invalid params\n");
return false;
}

qnn_instance * instance = ctx->instance;
if (nullptr == instance) {
GGMLQNN_LOG_WARN("invalid params\n");
return false;
}

return true;
}

#define GGMLQNN_CHECK_PARAMS(ctx, src0, src1, dst) \
do { \
if (!ggmlqnn_is_valid_params((ctx), (src0), (src1), (dst))) { \
Expand Down Expand Up @@ -491,11 +507,10 @@ void ggml_qnn_mul_mat(ggml_backend_qnn_context * ctx, ggml_tensor * op) {
QNN_VER_PTR(*p_tensor2)->dimensions = tensor_2_dimensions;
op_perf.info();
}

void ggml_qnn_repeat(ggml_backend_qnn_context * ctx, ggml_tensor * dst) {
}

void ggml_qnn_add(ggml_backend_qnn_context * ctx, ggml_tensor * dst) {
}

void ggml_qnn_div(ggml_backend_qnn_context * ctx, ggml_tensor * dst) {
}
Expand Down Expand Up @@ -539,12 +554,6 @@ void ggml_qnn_upsample_nearest2d(ggml_backend_qnn_context * ctx, ggml_tensor * d
void ggml_qnn_pad(ggml_backend_qnn_context * ctx, ggml_tensor * dst) {
}

static void ggml_qnn_avg_pool2d(ggml_backend_qnn_context * ctx, ggml_tensor * dst) {
}

static void ggml_qnn_max_pool2d(ggml_backend_qnn_context * ctx, ggml_tensor * dst) {
}

void ggml_qnn_pool2d(ggml_backend_qnn_context * ctx, ggml_tensor * dst) {
}

Expand Down
2 changes: 1 addition & 1 deletion ggml/src/ggml-qnn/ggml-qnn-ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@
#include "ggml-qnn-impl.h"
void ggml_qnn_general_node(ggml_backend_qnn_context * ctx, ggml_tensor * dst);
void ggml_qnn_mul_mat(ggml_backend_qnn_context * ctx, ggml_tensor * dst);

void ggml_qnn_repeat(ggml_backend_qnn_context * ctx, ggml_tensor * dst);
void ggml_qnn_add(ggml_backend_qnn_context * ctx, ggml_tensor * dst);
void ggml_qnn_div(ggml_backend_qnn_context * ctx, ggml_tensor * dst);
void ggml_qnn_leaky_relu(ggml_backend_qnn_context * ctx, ggml_tensor * dst);
void ggml_qnn_concat(ggml_backend_qnn_context * ctx, ggml_tensor * dst);
Expand Down
16 changes: 0 additions & 16 deletions ggml/src/ggml-qnn/ggml-qnn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1139,22 +1139,6 @@ void ggmlqnn_get_graphkey_from_op(const ggml_tensor * op, std::string & output)
}
}

bool ggmlqnn_is_valid_params(ggml_backend_qnn_context * ctx, const ggml_tensor * src0,
const ggml_tensor * src1, ggml_tensor * dst) {
if ((nullptr == ctx) || (nullptr == src0) || (nullptr == src1) || (nullptr == dst)) {
GGMLQNN_LOG_WARN("invalid params\n");
return false;
}

qnn_instance * instance = ctx->instance;
if (nullptr == instance) {
GGMLQNN_LOG_WARN("invalid params\n");
return false;
}

return true;
}

template<typename Fn>
Fn load_qnn_functionpointers(void * handle, const char * function_name) {
#if defined(__ANDROID__) || defined(__linux__)
Expand Down