ONNX
ytfeng commited on
Commit
4c77d30
·
1 Parent(s): f823f10

Add options for demo scripts to select backend & targets (#43)

Browse files

* add options for selecting backend & targets

* add eol

Files changed (2) hide show
  1. demo.py +17 -2
  2. pphumanseg.py +12 -4
demo.py CHANGED
@@ -19,9 +19,23 @@ def str2bool(v):
19
  else:
20
  raise NotImplementedError
21
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  parser = argparse.ArgumentParser(description='PPHumanSeg (https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.2/contrib/PP-HumanSeg)')
23
  parser.add_argument('--input', '-i', type=str, help='Path to the input image. Omit for using default camera.')
24
  parser.add_argument('--model', '-m', type=str, default='human_segmentation_pphumanseg_2021oct.onnx', help='Path to the model.')
 
 
25
  parser.add_argument('--save', '-s', type=str, default=False, help='Set true to save results. This flag is invalid when using camera.')
26
  parser.add_argument('--vis', '-v', type=str2bool, default=True, help='Set true to open a window for result visualization. This flag is invalid when using camera.')
27
  args = parser.parse_args()
@@ -84,7 +98,7 @@ def visualize(image, result, weight=0.6, fps=None):
84
 
85
  if __name__ == '__main__':
86
  # Instantiate PPHumanSeg
87
- model = PPHumanSeg(modelPath=args.model)
88
 
89
  if args.input is not None:
90
  # Read image and resize to 192x192
@@ -138,4 +152,5 @@ if __name__ == '__main__':
138
  # Visualize results in a new window
139
  cv.imshow('PPHumanSeg Demo', frame)
140
 
141
- tm.reset()
 
 
19
  else:
20
  raise NotImplementedError
21
 
22
+ backends = [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_CUDA]
23
+ targets = [cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16]
24
+ help_msg_backends = "Choose one of the computation backends: {:d}: OpenCV implementation (default); {:d}: CUDA"
25
+ help_msg_targets = "Chose one of the target computation devices: {:d}: CPU (default); {:d}: CUDA; {:d}: CUDA fp16"
26
+ try:
27
+ backends += [cv.dnn.DNN_BACKEND_TIMVX]
28
+ targets += [cv.dnn.DNN_TARGET_NPU]
29
+ help_msg_backends += "; {:d}: TIMVX"
30
+ help_msg_targets += "; {:d}: NPU"
31
+ except:
32
+ print('This version of OpenCV does not support TIM-VX and NPU. Visit https://gist.github.com/fengyuentau/5a7a5ba36328f2b763aea026c43fa45f for more information.')
33
+
34
  parser = argparse.ArgumentParser(description='PPHumanSeg (https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.2/contrib/PP-HumanSeg)')
35
  parser.add_argument('--input', '-i', type=str, help='Path to the input image. Omit for using default camera.')
36
  parser.add_argument('--model', '-m', type=str, default='human_segmentation_pphumanseg_2021oct.onnx', help='Path to the model.')
37
+ parser.add_argument('--backend', '-b', type=int, default=backends[0], help=help_msg_backends.format(*backends))
38
+ parser.add_argument('--target', '-t', type=int, default=targets[0], help=help_msg_targets.format(*targets))
39
  parser.add_argument('--save', '-s', type=str, default=False, help='Set true to save results. This flag is invalid when using camera.')
40
  parser.add_argument('--vis', '-v', type=str2bool, default=True, help='Set true to open a window for result visualization. This flag is invalid when using camera.')
41
  args = parser.parse_args()
 
98
 
99
  if __name__ == '__main__':
100
  # Instantiate PPHumanSeg
101
+ model = PPHumanSeg(modelPath=args.model, backendId=args.backend, targetId=args.target)
102
 
103
  if args.input is not None:
104
  # Read image and resize to 192x192
 
152
  # Visualize results in a new window
153
  cv.imshow('PPHumanSeg Demo', frame)
154
 
155
+ tm.reset()
156
+
pphumanseg.py CHANGED
@@ -8,9 +8,14 @@ import numpy as np
8
  import cv2 as cv
9
 
10
  class PPHumanSeg:
11
- def __init__(self, modelPath):
12
  self._modelPath = modelPath
 
 
 
13
  self._model = cv.dnn.readNet(self._modelPath)
 
 
14
 
15
  self._inputNames = ''
16
  self._outputNames = ['save_infer_model/scale_0.tmp_1']
@@ -23,10 +28,12 @@ class PPHumanSeg:
23
  return self.__class__.__name__
24
 
25
  def setBackend(self, backend_id):
26
- self._model.setPreferableBackend(backend_id)
 
27
 
28
  def setTarget(self, target_id):
29
- self._model.setPreferableTarget(target_id)
 
30
 
31
  def _preprocess(self, image):
32
  image = image.astype(np.float32, copy=False) / 255.0
@@ -52,4 +59,5 @@ class PPHumanSeg:
52
 
53
  def _postprocess(self, outputBlob):
54
  result = np.argmax(outputBlob[0], axis=1).astype(np.uint8)
55
- return result
 
 
8
  import cv2 as cv
9
 
10
  class PPHumanSeg:
11
+ def __init__(self, modelPath, backendId=0, targetId=0):
12
  self._modelPath = modelPath
13
+ self._backendId = backendId
14
+ self._targetId = targetId
15
+
16
  self._model = cv.dnn.readNet(self._modelPath)
17
+ self._model.setPreferableBackend(self._backendId)
18
+ self._model.setPreferableTarget(self._targetId)
19
 
20
  self._inputNames = ''
21
  self._outputNames = ['save_infer_model/scale_0.tmp_1']
 
28
  return self.__class__.__name__
29
 
30
  def setBackend(self, backend_id):
31
+ self._backendId = backend_id
32
+ self._model.setPreferableBackend(self._backendId)
33
 
34
  def setTarget(self, target_id):
35
+ self._targetId = target_id
36
+ self._model.setPreferableTarget(self._targetId)
37
 
38
  def _preprocess(self, image):
39
  image = image.astype(np.float32, copy=False) / 255.0
 
59
 
60
  def _postprocess(self, outputBlob):
61
  result = np.argmax(outputBlob[0], axis=1).astype(np.uint8)
62
+ return result
63
+