From a84810644a200a70fd3d61e8c8441af8bfa5809c Mon Sep 17 00:00:00 2001 From: Fang Xiaolong Date: Wed, 8 Jan 2025 18:51:13 +0800 Subject: [PATCH] Add Ascend NPU support. --- python-package/README.md | 2 +- python-package/insightface/model_zoo/model_zoo.py | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/python-package/README.md b/python-package/README.md index 69c199b56..063adf903 100644 --- a/python-package/README.md +++ b/python-package/README.md @@ -14,7 +14,7 @@ For ``insightface<=0.1.5``, we use MXNet as inference backend. Starting from insightface>=0.2, we use onnxruntime as inference backend. -You have to install ``onnxruntime-gpu`` manually to enable GPU inference, or install ``onnxruntime`` to use CPU only inference. +You have to install ``onnxruntime-gpu`` manually to enable GPU inference, install ``onnxruntime-cann`` manually to enable Ascend NPU inference, or install ``onnxruntime`` to use CPU only inference. ## Change Log diff --git a/python-package/insightface/model_zoo/model_zoo.py b/python-package/insightface/model_zoo/model_zoo.py index fc6283114..11938ec9f 100644 --- a/python-package/insightface/model_zoo/model_zoo.py +++ b/python-package/insightface/model_zoo/model_zoo.py @@ -8,6 +8,7 @@ import os.path as osp import glob import onnxruntime +import importlib from .arcface_onnx import * from .retinaface import * #from .scrfd import * @@ -68,7 +69,12 @@ def find_onnx_file(dir_path): return paths[-1] def get_default_providers(): - return ['CUDAExecutionProvider', 'CPUExecutionProvider'] + # In Ascend NPU, acl is a base module, if the module `acl` exists, the codes runs in Ascend device. + if importlib.util.find_spec("acl") is not None: + providers = ["CANNExecutionProvider","CPUExecutionProvider"] + else: + providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] + return providers def get_default_provider_options(): return None