Skip to content

Qualcomm AI Engine Direct - Streaming Mimi Enablement #10570

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions backends/qualcomm/_passes/lift_constant_scalar_operands.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,13 @@ class TensorOpInfo:
aten.ne.Scalar: TensorOpInfo(aten.ne.Tensor, False, False),
aten.add.Scalar: TensorOpInfo(aten.add.Tensor, False, False),
aten.add_.Scalar: TensorOpInfo(aten.add_.Tensor, False, False),
# For below cases, refer to LiftAddTensor Model in UT for sample
aten.add.Tensor: TensorOpInfo(aten.add.Tensor, False, False),
aten.div.Scalar: TensorOpInfo(aten.div.Tensor, False, False),
aten.mul.Scalar: TensorOpInfo(aten.mul.Tensor, False, False),
aten.rsub.Scalar: TensorOpInfo(aten.rsub.Tensor, False, False),
aten.sub.Scalar: TensorOpInfo(aten.sub.Tensor, False, False),
aten.sub.Tensor: TensorOpInfo(aten.sub.Tensor, False, False),
aten.pow.Tensor_Scalar: TensorOpInfo(aten.pow.Tensor_Tensor, False, False),
# The scalar number arg[1] is missing when using default. Result in a corner case to deal
aten.leaky_relu.default: TensorOpInfo(aten.prelu.default, True, False),
Expand Down
1 change: 1 addition & 0 deletions backends/qualcomm/_passes/remove_redundancy.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ def __init__(self):
exir_ops.edge.aten.clone.default: self._default_condition,
torch.ops.aten.alias.default: self._default_condition,
exir_ops.edge.aten.alias.default: self._default_condition,
exir_ops.edge.aten.alias_copy.default: self._default_condition,
exir_ops.edge.aten.lift_fresh_copy.default: self._default_condition,
# remove this target if '_skip_dim_order' is set to False
exir_ops.edge.dim_order_ops._to_dim_order_copy.default: self._dim_order_op_condition,
Expand Down
9 changes: 9 additions & 0 deletions backends/qualcomm/tests/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -939,6 +939,15 @@ def forward(self, x):
return self.constant < x


class LiftAddTensor(torch.nn.Module):
def __init__(self):
super().__init__()

def forward(self, x):
N = 2 - 1
return x + N


class Linear(torch.nn.Module):
def __init__(self, use_bias: bool = True):
super().__init__()
Expand Down
6 changes: 6 additions & 0 deletions backends/qualcomm/tests/test_qnn_delegate.py
Original file line number Diff line number Diff line change
Expand Up @@ -980,6 +980,12 @@ def test_qnn_backend_einsum_outer_product_relu(self):
)
self.lower_module_and_test_output(module, sample_input)

# TODO: Create a new UT class for passes specific checks
def test_qnn_backend_lift_add_tensor(self):
module = LiftAddTensor() # noqa: F405
sample_input = (torch.Tensor([1, 2, 3, 4]).to(torch.int32),)
self.lower_module_and_test_output(module, sample_input)

@unittest.skip("Fail because of bad accuracy")
def test_qnn_backend_moe_feed_forward(self):
args = ModelArgs()
Expand Down
3 changes: 3 additions & 0 deletions examples/qualcomm/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,9 @@ add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/executor_runner)
# build qnn_llama_runner for llama
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/oss_scripts/llama)

# build qnn_mimi_decoder_runner
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/oss_scripts/moshi)

# build qaihub_llama2_7b_runner and qaihub_llama3_8b_runner
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/qaihub_scripts/llama)

Expand Down
33 changes: 33 additions & 0 deletions examples/qualcomm/oss_scripts/moshi/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
# Copyright (c) Qualcomm Innovation Center, Inc.
# All rights reserved
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

set(_qnn_mimi_decoder_runner__srcs
${CMAKE_CURRENT_LIST_DIR}/qnn_mimi_decoder_runner.cpp
${CMAKE_CURRENT_LIST_DIR}/runner/runner.cpp
${CMAKE_CURRENT_LIST_DIR}/runner/runner.h
)

# build mimi decoder runner
add_executable(qnn_mimi_decoder_runner ${_qnn_mimi_decoder_runner__srcs})
target_include_directories(
qnn_mimi_decoder_runner PUBLIC ${_common_include_directories}
)
target_link_libraries(
qnn_mimi_decoder_runner
qnn_executorch_backend
executorch_core
extension_module
extension_data_loader
gflags
)

target_compile_options(
qnn_llama_runner PUBLIC ${_common_compile_options}
)

set_target_properties(
qnn_mimi_decoder_runner PROPERTIES LINK_FLAGS "-Wl,-rpath='$ORIGIN'"
)
Loading
Loading