Limit combinations of backends and targets in demos and benchmark (#145)
Browse files* limit backend and target combination in demos and benchmark
* simpler version checking
- demo.py +33 -30
- facial_fer_model.py +3 -5
    	
        demo.py
    CHANGED
    
    | @@ -11,38 +11,38 @@ from facial_fer_model import FacialExpressionRecog | |
| 11 | 
             
            sys.path.append('../face_detection_yunet')
         | 
| 12 | 
             
            from yunet import YuNet
         | 
| 13 |  | 
| 14 | 
            -
             | 
| 15 | 
            -
             | 
| 16 | 
            -
             | 
| 17 | 
            -
             | 
| 18 | 
            -
             | 
| 19 | 
            -
             | 
| 20 | 
            -
                 | 
| 21 | 
            -
             | 
| 22 | 
            -
             | 
| 23 | 
            -
             | 
| 24 | 
            -
             | 
| 25 | 
            -
             | 
| 26 | 
            -
            help_msg_backends = "Choose one of the computation backends: {:d}: OpenCV implementation (default); {:d}: CUDA"
         | 
| 27 | 
            -
            help_msg_targets = "Chose one of the target computation devices: {:d}: CPU (default); {:d}: CUDA; {:d}: CUDA fp16"
         | 
| 28 | 
            -
            try:
         | 
| 29 | 
            -
                backends += [cv.dnn.DNN_BACKEND_TIMVX]
         | 
| 30 | 
            -
                targets += [cv.dnn.DNN_TARGET_NPU]
         | 
| 31 | 
            -
                help_msg_backends += "; {:d}: TIMVX"
         | 
| 32 | 
            -
                help_msg_targets += "; {:d}: NPU"
         | 
| 33 | 
            -
            except:
         | 
| 34 | 
            -
                print('This version of OpenCV does not support TIM-VX and NPU. Visit https://github.com/opencv/opencv/wiki/TIM-VX-Backend-For-Running-OpenCV-On-NPU for more information.')
         | 
| 35 |  | 
| 36 | 
             
            parser = argparse.ArgumentParser(description='Facial Expression Recognition')
         | 
| 37 | 
            -
            parser.add_argument('--input', '-i', type=str, | 
| 38 | 
            -
             | 
| 39 | 
            -
            parser.add_argument('-- | 
| 40 | 
            -
             | 
| 41 | 
            -
            parser.add_argument('-- | 
| 42 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 43 | 
             
            args = parser.parse_args()
         | 
| 44 |  | 
| 45 | 
            -
             | 
| 46 | 
             
            def visualize(image, det_res, fer_res, box_color=(0, 255, 0), text_color=(0, 0, 255)):
         | 
| 47 |  | 
| 48 | 
             
                print('%s %3d faces detected.' % (datetime.datetime.now(), len(det_res)))
         | 
| @@ -83,11 +83,14 @@ def process(detect_model, fer_model, frame): | |
| 83 |  | 
| 84 |  | 
| 85 | 
             
            if __name__ == '__main__':
         | 
|  | |
|  | |
|  | |
| 86 | 
             
                detect_model = YuNet(modelPath='../face_detection_yunet/face_detection_yunet_2022mar.onnx')
         | 
| 87 |  | 
| 88 | 
             
                fer_model = FacialExpressionRecog(modelPath=args.model,
         | 
| 89 | 
            -
                                                  backendId= | 
| 90 | 
            -
                                                  targetId= | 
| 91 |  | 
| 92 | 
             
                # If input is an image
         | 
| 93 | 
             
                if args.input is not None:
         | 
|  | |
| 11 | 
             
            sys.path.append('../face_detection_yunet')
         | 
| 12 | 
             
            from yunet import YuNet
         | 
| 13 |  | 
| 14 | 
            +
            # Check OpenCV version
         | 
| 15 | 
            +
            assert cv.__version__ >= "4.7.0", \
         | 
| 16 | 
            +
                   "Please install latest opencv-python to try this demo: python3 -m pip install --upgrade opencv-python"
         | 
| 17 | 
            +
             | 
| 18 | 
            +
            # Valid combinations of backends and targets
         | 
| 19 | 
            +
            backend_target_pairs = [
         | 
| 20 | 
            +
                [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU],
         | 
| 21 | 
            +
                [cv.dnn.DNN_BACKEND_CUDA,   cv.dnn.DNN_TARGET_CUDA],
         | 
| 22 | 
            +
                [cv.dnn.DNN_BACKEND_CUDA,   cv.dnn.DNN_TARGET_CUDA_FP16],
         | 
| 23 | 
            +
                [cv.dnn.DNN_BACKEND_TIMVX,  cv.dnn.DNN_TARGET_NPU],
         | 
| 24 | 
            +
                [cv.dnn.DNN_BACKEND_CANN,   cv.dnn.DNN_TARGET_NPU]
         | 
| 25 | 
            +
            ]
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 26 |  | 
| 27 | 
             
            parser = argparse.ArgumentParser(description='Facial Expression Recognition')
         | 
| 28 | 
            +
            parser.add_argument('--input', '-i', type=str,
         | 
| 29 | 
            +
                                help='Path to the input image. Omit for using default camera.')
         | 
| 30 | 
            +
            parser.add_argument('--model', '-m', type=str, default='./facial_expression_recognition_mobilefacenet_2022july.onnx',
         | 
| 31 | 
            +
                                help='Path to the facial expression recognition model.')
         | 
| 32 | 
            +
            parser.add_argument('--backend_target', '-bt', type=int, default=0,
         | 
| 33 | 
            +
                                help='''Choose one of the backend-target pair to run this demo:
         | 
| 34 | 
            +
                                    {:d}: (default) OpenCV implementation + CPU,
         | 
| 35 | 
            +
                                    {:d}: CUDA + GPU (CUDA),
         | 
| 36 | 
            +
                                    {:d}: CUDA + GPU (CUDA FP16),
         | 
| 37 | 
            +
                                    {:d}: TIM-VX + NPU,
         | 
| 38 | 
            +
                                    {:d}: CANN + NPU
         | 
| 39 | 
            +
                                '''.format(*[x for x in range(len(backend_target_pairs))]))
         | 
| 40 | 
            +
            parser.add_argument('--save', '-s', action='store_true',
         | 
| 41 | 
            +
                                help='Specify to save results. This flag is invalid when using camera.')
         | 
| 42 | 
            +
            parser.add_argument('--vis', '-v', action='store_true',
         | 
| 43 | 
            +
                                help='Specify to open a window for result visualization. This flag is invalid when using camera.')
         | 
| 44 | 
             
            args = parser.parse_args()
         | 
| 45 |  | 
|  | |
| 46 | 
             
            def visualize(image, det_res, fer_res, box_color=(0, 255, 0), text_color=(0, 0, 255)):
         | 
| 47 |  | 
| 48 | 
             
                print('%s %3d faces detected.' % (datetime.datetime.now(), len(det_res)))
         | 
|  | |
| 83 |  | 
| 84 |  | 
| 85 | 
             
            if __name__ == '__main__':
         | 
| 86 | 
            +
                backend_id = backend_target_pairs[args.backend_target][0]
         | 
| 87 | 
            +
                target_id = backend_target_pairs[args.backend_target][1]
         | 
| 88 | 
            +
             | 
| 89 | 
             
                detect_model = YuNet(modelPath='../face_detection_yunet/face_detection_yunet_2022mar.onnx')
         | 
| 90 |  | 
| 91 | 
             
                fer_model = FacialExpressionRecog(modelPath=args.model,
         | 
| 92 | 
            +
                                                  backendId=backend_id,
         | 
| 93 | 
            +
                                                  targetId=target_id)
         | 
| 94 |  | 
| 95 | 
             
                # If input is an image
         | 
| 96 | 
             
                if args.input is not None:
         | 
    	
        facial_fer_model.py
    CHANGED
    
    | @@ -29,12 +29,10 @@ class FacialExpressionRecog: | |
| 29 | 
             
                def name(self):
         | 
| 30 | 
             
                    return self.__class__.__name__
         | 
| 31 |  | 
| 32 | 
            -
                def  | 
| 33 | 
            -
                    self._backendId =  | 
|  | |
| 34 | 
             
                    self._model.setPreferableBackend(self._backendId)
         | 
| 35 | 
            -
             | 
| 36 | 
            -
                def setTarget(self, target_id):
         | 
| 37 | 
            -
                    self._targetId = target_id
         | 
| 38 | 
             
                    self._model.setPreferableTarget(self._targetId)
         | 
| 39 |  | 
| 40 | 
             
                def _preprocess(self, image, bbox):
         | 
|  | |
| 29 | 
             
                def name(self):
         | 
| 30 | 
             
                    return self.__class__.__name__
         | 
| 31 |  | 
| 32 | 
            +
                def setBackendAndTarget(self, backendId, targetId):
         | 
| 33 | 
            +
                    self._backendId = backendId
         | 
| 34 | 
            +
                    self._targetId = targetId
         | 
| 35 | 
             
                    self._model.setPreferableBackend(self._backendId)
         | 
|  | |
|  | |
|  | |
| 36 | 
             
                    self._model.setPreferableTarget(self._targetId)
         | 
| 37 |  | 
| 38 | 
             
                def _preprocess(self, image, bbox):
         | 
