mirror of
https://github.com/openmv/openmv.git
synced 2025-11-04 14:49:50 +08:00
scripts/libraries: Vectorize NMS using numpy.
The new NMS algorithm with numpy can now handle a large number of bounding box candidates without choking. Note that FOMO had to be rewritten to be compatible with the new NMS algorithm.
This commit is contained in:
parent
9958d8ee62
commit
fafa8271ec
@ -9,9 +9,8 @@
|
||||
import sensor
|
||||
import time
|
||||
import ml
|
||||
from ml.utils import NMS
|
||||
from ml.postprocessing import fomo_postprocess
|
||||
import math
|
||||
import image
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
@ -19,9 +18,6 @@ sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.set_windowing((240, 240)) # Set 240x240 window.
|
||||
sensor.skip_frames(time=2000) # Let the camera adjust.
|
||||
|
||||
min_confidence = 0.4
|
||||
threshold_list = [(math.ceil(min_confidence * 255), 255)]
|
||||
|
||||
# Load built-in FOMO face detection model
|
||||
model = ml.Model("/rom/fomo_face_detection.tflite")
|
||||
print(model)
|
||||
@ -40,31 +36,7 @@ colors = [ # Add more colors if you are detecting more than 7 types of classes
|
||||
(255, 255, 255),
|
||||
]
|
||||
|
||||
|
||||
# FOMO outputs an image per class where each pixel in the image is the centroid of the trained
|
||||
# object. So, we will get those output images and then run find_blobs() on them to extract the
|
||||
# centroids. We will also run get_stats() on the detected blobs to determine their score.
|
||||
# The Non-Max-Supression (NMS) object then filters out overlapping detections and maps their
|
||||
# position in the output image back to the original input image. The function then returns a
|
||||
# list per class which each contain a list of (rect, score) tuples representing the detected
|
||||
# objects.
|
||||
def fomo_post_process(model, inputs, outputs):
|
||||
n, oh, ow, oc = model.output_shape[0]
|
||||
nms = NMS(ow, oh, inputs[0].roi)
|
||||
for i in range(oc):
|
||||
img = image.Image(outputs[0][0, :, :, i] * 255)
|
||||
blobs = img.find_blobs(
|
||||
threshold_list, x_stride=1, area_threshold=1, pixels_threshold=1
|
||||
)
|
||||
for b in blobs:
|
||||
rect = b.rect()
|
||||
x, y, w, h = rect
|
||||
score = (
|
||||
img.get_statistics(thresholds=threshold_list, roi=rect).l_mean() / 255.0
|
||||
)
|
||||
nms.add_bounding_box(x, y, x + w, y + h, score, i)
|
||||
return nms.get_bounding_boxes()
|
||||
|
||||
fomo = fomo_postprocess(threshold=0.4)
|
||||
|
||||
clock = time.clock()
|
||||
while True:
|
||||
@ -72,7 +44,7 @@ while True:
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
for i, detection_list in enumerate(model.predict([img], callback=fomo_post_process)):
|
||||
for i, detection_list in enumerate(model.predict([img], callback=fomo)):
|
||||
if i == 0:
|
||||
continue # background class
|
||||
if len(detection_list) == 0:
|
||||
|
||||
@ -26,14 +26,30 @@
|
||||
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
import math
|
||||
import image
|
||||
from ml.utils import NMS
|
||||
import ml.utils
|
||||
from micropython import const
|
||||
from ulab import numpy as np
|
||||
|
||||
|
||||
_NO_DETECTION = const(())
|
||||
_FOMO_CLASSES = const(1)
|
||||
_YOLO_V2_TX = const(0)
|
||||
_YOLO_V2_TY = const(1)
|
||||
_YOLO_V2_TW = const(2)
|
||||
_YOLO_V2_TH = const(3)
|
||||
_YOLO_V2_SCORE = const(4)
|
||||
_YOLO_V2_CLASSES = const(5)
|
||||
_YOLO_V5_CX = const(0)
|
||||
_YOLO_V5_CY = const(1)
|
||||
_YOLO_V5_CW = const(2)
|
||||
_YOLO_V5_CH = const(3)
|
||||
_YOLO_V5_SCORE = const(4)
|
||||
_YOLO_V5_CLASSES = const(5)
|
||||
_YOLO_V8_CX = const(0)
|
||||
_YOLO_V8_CY = const(1)
|
||||
_YOLO_V8_CW = const(2)
|
||||
_YOLO_V8_CH = const(3)
|
||||
_YOLO_V8_CLASSES = const(4)
|
||||
|
||||
|
||||
def dequantize(value, dtype, zero_point, scale):
|
||||
@ -42,48 +58,74 @@ def dequantize(value, dtype, zero_point, scale):
|
||||
return (value - zero_point) * scale
|
||||
|
||||
|
||||
# FOMO generates an image per class, where each pixel represents the centroid
|
||||
# of the trained object. These images are processed with `find_blobs()` to
|
||||
# extract centroids, and `get_stats()` is used to get their scores. Overlapping
|
||||
# detections are then filtered with NMS and positions are mapped back to the
|
||||
# original image, and a list of (rect, score) tuples is returned for each class,
|
||||
# representing detected objects.
|
||||
def sigmoid(x):
|
||||
return 1.0 / (1.0 + np.exp(-x))
|
||||
|
||||
|
||||
def mod(a, b):
|
||||
return a - (b * (a // b))
|
||||
|
||||
|
||||
def softmax(x):
|
||||
e_x = np.exp(x - np.max(x, axis=1, keepdims=True))
|
||||
return e_x / np.sum(e_x, axis=1, keepdims=True)
|
||||
|
||||
|
||||
class fomo_postprocess:
|
||||
def __init__(self, threshold=0.4):
|
||||
self.threshold_list = [(math.ceil(threshold * 255), 255)]
|
||||
def __init__(self, threshold=0.4, w_scale=1.414214, h_scale=1.414214,
|
||||
nms_threshold=0.1, nms_sigma=0.001,
|
||||
scale_aspect=ml.utils.NMS_SCALE_ASPECT_KEEP):
|
||||
self.threshold = threshold
|
||||
self.w_scale = w_scale
|
||||
self.h_scale = h_scale
|
||||
self.nms_threshold = nms_threshold
|
||||
self.nms_sigma = nms_sigma
|
||||
self.scale_aspect = scale_aspect
|
||||
|
||||
def __call__(self, model, inputs, outputs):
|
||||
n, oh, ow, oc = model.output_shape[0]
|
||||
ob, oh, ow, oc = model.output_shape[0]
|
||||
s = model.output_scale[0]
|
||||
zp = model.output_zero_point[0]
|
||||
dt = model.output_dtype[0]
|
||||
nms = NMS(ow, oh, inputs[0].roi)
|
||||
for i in range(oc):
|
||||
img = image.Image(dequantize(outputs[0][0, :, :, i], dt, zp, s) * 255)
|
||||
blobs = img.find_blobs(
|
||||
self.threshold_list, x_stride=1, area_threshold=1, pixels_threshold=1
|
||||
)
|
||||
for b in blobs:
|
||||
rect = b.rect()
|
||||
x, y, w, h = rect
|
||||
score = (
|
||||
img.get_statistics(thresholds=self.threshold_list, roi=rect).l_mean() / 255.0
|
||||
)
|
||||
nms.add_bounding_box(x, y, x + w, y + h, score, i)
|
||||
return nms.get_bounding_boxes()
|
||||
|
||||
# Reshape the output to a 2D array
|
||||
row_outputs = outputs[0].reshape((oh * ow, oc))
|
||||
|
||||
# Threshold all the scores
|
||||
score_indices = np.max(dequantize(row_outputs[:, _FOMO_CLASSES:], dt, zp, s), axis=1)
|
||||
score_indices = np.nonzero(score_indices > self.threshold)[0]
|
||||
if not len(score_indices):
|
||||
return _NO_DETECTION
|
||||
|
||||
# Get the bounding boxes that have a valid score
|
||||
bb = dequantize(np.take(row_outputs, score_indices, axis=0), dt, zp, s)
|
||||
|
||||
# Extract rows and columns
|
||||
bb_rows = score_indices // ow
|
||||
bb_cols = mod(score_indices, ow)
|
||||
|
||||
# Get the score information
|
||||
bb_scores = np.max(bb[:, _FOMO_CLASSES:], axis=1)
|
||||
|
||||
# Get the class information
|
||||
bb_classes = np.argmax(bb[:, _FOMO_CLASSES:], axis=1) + _FOMO_CLASSES
|
||||
|
||||
# Compute the bounding box information
|
||||
x_center = (bb_cols + 0.5) / ow
|
||||
y_center = (bb_rows + 0.5) / oh
|
||||
w_rel = np.full(len(bb_cols), self.w_scale / ow)
|
||||
h_rel = np.full(len(bb_rows), self.h_scale / oh)
|
||||
|
||||
return ml.utils.box_nms(x_center, y_center, w_rel, h_rel, bb_scores,
|
||||
bb_classes, model.input_shape[0][1:3], inputs[0].roi,
|
||||
self.nms_threshold, self.nms_sigma, self.scale_aspect)
|
||||
|
||||
|
||||
# This is a lightweight version of the tiny yolo v2 object detection algorithm.
|
||||
# It was optimized to work well on embedded devices with limited computational resources.
|
||||
class yolo_v2_postprocess:
|
||||
_YOLO_V2_TX = const(0)
|
||||
_YOLO_V2_TY = const(1)
|
||||
_YOLO_V2_TW = const(2)
|
||||
_YOLO_V2_TH = const(3)
|
||||
_YOLO_V2_SCORE = const(4)
|
||||
_YOLO_V2_CLASSES = const(5)
|
||||
|
||||
def __init__(self, threshold=0.6, anchors=None, nms_threshold=0.1, nms_sigma=0.1):
|
||||
def __init__(self, threshold=0.6, anchors=None, nms_threshold=0.1, nms_sigma=0.1,
|
||||
scale_aspect=ml.utils.NMS_SCALE_ASPECT_KEEP):
|
||||
self.threshold = threshold
|
||||
self.anchors = anchors
|
||||
if self.anchors is None:
|
||||
@ -95,6 +137,7 @@ class yolo_v2_postprocess:
|
||||
self.anchors_len = len(self.anchors)
|
||||
self.nms_threshold = nms_threshold
|
||||
self.nms_sigma = nms_sigma
|
||||
self.scale_aspect = scale_aspect
|
||||
|
||||
def __call__(self, model, inputs, outputs):
|
||||
ob, oh, ow, oc = model.output_shape[0]
|
||||
@ -103,16 +146,6 @@ class yolo_v2_postprocess:
|
||||
dt = model.output_dtype[0]
|
||||
class_count = (oc // self.anchors_len) - _YOLO_V2_CLASSES
|
||||
|
||||
def sigmoid(x):
|
||||
return 1.0 / (1.0 + np.exp(-x))
|
||||
|
||||
def mod(a, b):
|
||||
return a - (b * (a // b))
|
||||
|
||||
def softmax(x):
|
||||
e_x = np.exp(x - np.max(x, axis=1, keepdims=True))
|
||||
return e_x / np.sum(e_x, axis=1, keepdims=True)
|
||||
|
||||
# Reshape the output to a 2D array
|
||||
row_outputs = outputs[0].reshape((oh * ow * self.anchors_len,
|
||||
_YOLO_V2_CLASSES + class_count))
|
||||
@ -146,48 +179,32 @@ class yolo_v2_postprocess:
|
||||
w_rel = (bb_a_array[:, 0] * np.exp(bb[:, _YOLO_V2_TW])) / ow
|
||||
h_rel = (bb_a_array[:, 1] * np.exp(bb[:, _YOLO_V2_TH])) / oh
|
||||
|
||||
# Scale the bounding boxes to have enough integer precision for NMS
|
||||
ib, ih, iw, ic = model.input_shape[0]
|
||||
x_center = x_center * iw
|
||||
y_center = y_center * ih
|
||||
w_rel = w_rel * iw
|
||||
h_rel = h_rel * ih
|
||||
|
||||
nms = NMS(iw, ih, inputs[0].roi)
|
||||
for i in range(bb.shape[0]):
|
||||
nms.add_bounding_box(x_center[i] - (w_rel[i] / 2),
|
||||
y_center[i] - (h_rel[i] / 2),
|
||||
x_center[i] + (w_rel[i] / 2),
|
||||
y_center[i] + (h_rel[i] / 2),
|
||||
bb_scores[i], bb_classes[i])
|
||||
return nms.get_bounding_boxes(threshold=self.nms_threshold, sigma=self.nms_sigma)
|
||||
return ml.utils.box_nms(x_center, y_center, w_rel, h_rel, bb_scores, bb_classes,
|
||||
model.input_shape[0][1:3], inputs[0].roi,
|
||||
self.nms_threshold, self.nms_sigma, self.scale_aspect)
|
||||
|
||||
|
||||
# This is a lightweight version of the YOLO (You Only Look Once) object detection algorithm.
|
||||
# It is designed to work well on embedded devices with limited computational resources.
|
||||
class yolo_lc_postprocess(yolo_v2_postprocess):
|
||||
def __init__(self, threshold=0.6, anchors=None, nms_threshold=0.1, nms_sigma=0.1):
|
||||
def __init__(self, threshold=0.6, anchors=None, nms_threshold=0.1, nms_sigma=0.1,
|
||||
scale_aspect=ml.utils.NMS_SCALE_ASPECT_KEEP):
|
||||
if anchors is None:
|
||||
anchors = np.array([[0.076023, 0.258508],
|
||||
[0.163031, 0.413531],
|
||||
[0.234769, 0.702585],
|
||||
[0.427054, 0.715892],
|
||||
[0.748154, 0.857092]])
|
||||
super().__init__(threshold, anchors, nms_threshold, nms_sigma)
|
||||
super().__init__(threshold, anchors, nms_threshold, nms_sigma, scale_aspect)
|
||||
|
||||
|
||||
class yolo_v5_postprocess:
|
||||
_YOLO_V5_CX = const(0)
|
||||
_YOLO_V5_CY = const(1)
|
||||
_YOLO_V5_CW = const(2)
|
||||
_YOLO_V5_CH = const(3)
|
||||
_YOLO_V5_SCORE = const(4)
|
||||
_YOLO_V5_CLASSES = const(5)
|
||||
|
||||
def __init__(self, threshold=0.6, nms_threshold=0.1, nms_sigma=0.1):
|
||||
def __init__(self, threshold=0.6, nms_threshold=0.1, nms_sigma=0.1,
|
||||
scale_aspect=ml.utils.NMS_SCALE_ASPECT_KEEP):
|
||||
self.threshold = threshold
|
||||
self.nms_threshold = nms_threshold
|
||||
self.nms_sigma = nms_sigma
|
||||
self.scale_aspect = scale_aspect
|
||||
|
||||
def __call__(self, model, inputs, outputs):
|
||||
oh, ow, oc = model.output_shape[0]
|
||||
@ -217,34 +234,21 @@ class yolo_v5_postprocess:
|
||||
# Compute the bounding box information
|
||||
x_center = bb[:, _YOLO_V5_CX]
|
||||
y_center = bb[:, _YOLO_V5_CY]
|
||||
w_rel = bb[:, _YOLO_V5_CW] * 0.5
|
||||
h_rel = bb[:, _YOLO_V5_CH] * 0.5
|
||||
w_rel = bb[:, _YOLO_V5_CW]
|
||||
h_rel = bb[:, _YOLO_V5_CH]
|
||||
|
||||
# Scale the bounding boxes to have enough integer precision for NMS
|
||||
ib, ih, iw, ic = model.input_shape[0]
|
||||
xmin = (x_center - w_rel) * iw
|
||||
ymin = (y_center - h_rel) * ih
|
||||
xmax = (x_center + w_rel) * iw
|
||||
ymax = (y_center + h_rel) * ih
|
||||
|
||||
nms = NMS(iw, ih, inputs[0].roi)
|
||||
for i in range(bb.shape[0]):
|
||||
nms.add_bounding_box(xmin[i], ymin[i], xmax[i], ymax[i],
|
||||
bb_scores[i], bb_classes[i])
|
||||
return nms.get_bounding_boxes(threshold=self.nms_threshold, sigma=self.nms_sigma)
|
||||
return ml.utils.box_nms(x_center, y_center, w_rel, h_rel, bb_scores, bb_classes,
|
||||
model.input_shape[0][1:3], inputs[0].roi,
|
||||
self.nms_threshold, self.nms_sigma, self.scale_aspect)
|
||||
|
||||
|
||||
class yolo_v8_postprocess:
|
||||
_YOLO_V8_CX = const(0)
|
||||
_YOLO_V8_CY = const(1)
|
||||
_YOLO_V8_CW = const(2)
|
||||
_YOLO_V8_CH = const(3)
|
||||
_YOLO_V8_CLASSES = const(4)
|
||||
|
||||
def __init__(self, threshold=0.6, nms_threshold=0.1, nms_sigma=0.1):
|
||||
def __init__(self, threshold=0.6, nms_threshold=0.1, nms_sigma=0.1,
|
||||
scale_aspect=ml.utils.NMS_SCALE_ASPECT_KEEP):
|
||||
self.threshold = threshold
|
||||
self.nms_threshold = nms_threshold
|
||||
self.nms_sigma = nms_sigma
|
||||
self.scale_aspect = scale_aspect
|
||||
|
||||
def __call__(self, model, inputs, outputs):
|
||||
oh, ow, oc = model.output_shape[0]
|
||||
@ -274,18 +278,9 @@ class yolo_v8_postprocess:
|
||||
# Compute the bounding box information
|
||||
x_center = bb[_YOLO_V8_CX, :]
|
||||
y_center = bb[_YOLO_V8_CY, :]
|
||||
w_rel = bb[_YOLO_V8_CW, :] * 0.5
|
||||
h_rel = bb[_YOLO_V8_CH, :] * 0.5
|
||||
w_rel = bb[_YOLO_V8_CW, :]
|
||||
h_rel = bb[_YOLO_V8_CH, :]
|
||||
|
||||
# Scale the bounding boxes to have enough integer precision for NMS
|
||||
ib, ih, iw, ic = model.input_shape[0]
|
||||
xmin = (x_center - w_rel) * iw
|
||||
ymin = (y_center - h_rel) * ih
|
||||
xmax = (x_center + w_rel) * iw
|
||||
ymax = (y_center + h_rel) * ih
|
||||
|
||||
nms = NMS(iw, ih, inputs[0].roi)
|
||||
for i in range(bb.shape[1]):
|
||||
nms.add_bounding_box(xmin[i], ymin[i], xmax[i], ymax[i],
|
||||
bb_scores[i], bb_classes[i])
|
||||
return nms.get_bounding_boxes(threshold=self.nms_threshold, sigma=self.nms_sigma)
|
||||
return ml.utils.box_nms(x_center, y_center, w_rel, h_rel, bb_scores, bb_classes,
|
||||
model.input_shape[0][1:3], inputs[0].roi,
|
||||
self.nms_threshold, self.nms_sigma, self.scale_aspect)
|
||||
|
||||
@ -26,98 +26,124 @@
|
||||
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
import math
|
||||
from micropython import const
|
||||
from ulab import numpy as np
|
||||
|
||||
|
||||
class NMS:
|
||||
def __init__(
|
||||
self,
|
||||
window_w,
|
||||
window_h,
|
||||
roi,
|
||||
):
|
||||
self.window_w = window_w
|
||||
self.window_h = window_h
|
||||
self.roi = roi
|
||||
if roi[2] < 1 or roi[3] < 1:
|
||||
raise ValueError("Invalid ROI dimensions!")
|
||||
self.boxes = []
|
||||
NMS_SCALE_ASPECT_KEEP = const(0)
|
||||
NMS_SCALE_ASPECT_EXPAND = const(1)
|
||||
NMS_SCALE_ASPECT_IGNORE = const(2)
|
||||
|
||||
def add_bounding_box(self, xmin, ymin, xmax, ymax, score, label_index):
|
||||
if score >= 0.0 and score <= 1.0:
|
||||
xmin = max(0.0, min(xmin, self.window_w))
|
||||
ymin = max(0.0, min(ymin, self.window_h))
|
||||
xmax = max(0.0, min(xmax, self.window_w))
|
||||
ymax = max(0.0, min(ymax, self.window_h))
|
||||
w = int(xmax - xmin)
|
||||
h = int(ymax - ymin)
|
||||
if w > 0 and h > 0:
|
||||
self.boxes.append([int(xmin), int(ymin), w, h, score, label_index])
|
||||
|
||||
def get_bounding_boxes(self, threshold=0.1, sigma=0.1):
|
||||
sorted_boxes = sorted(self.boxes, key=lambda x: x[4], reverse=True)
|
||||
sigma_scale = (-1.0 / sigma) if (sigma > 0.0) else 0.0
|
||||
def box_nms(
|
||||
x_center, # shape: (N,)
|
||||
y_center, # shape: (N,)
|
||||
w_rel, # shape: (N,)
|
||||
h_rel, # shape: (N,)
|
||||
scores, # shape: (N,)
|
||||
classes, # shape: (N,)
|
||||
input_shape, # (input_h, input_w)
|
||||
image_roi, # (image_x, image_y, image_w, image_h)
|
||||
threshold=0.1, # IoU threshold for NMS
|
||||
sigma=0.1, # Sigma for NMS
|
||||
scale_aspect=NMS_SCALE_ASPECT_KEEP # Scale aspect ratio
|
||||
):
|
||||
N = len(classes)
|
||||
assert all(N == len(arr) for arr in [x_center, y_center, w_rel, h_rel, scores]), \
|
||||
f"Inconsistent lengths: classes={N}, x_center={len(x_center)}, " \
|
||||
f"y_center={len(y_center)}, w_rel={len(w_rel)}, h_rel={len(h_rel)}, " \
|
||||
f"scores={len(scores)}"
|
||||
|
||||
def iou(box1, box2):
|
||||
x1 = max(box1[0], box2[0])
|
||||
y1 = max(box1[1], box2[1])
|
||||
x2 = min(box1[0] + box1[2], box2[0] + box2[2])
|
||||
y2 = min(box1[1] + box1[3], box2[1] + box2[3])
|
||||
w = max(0, x2 - x1)
|
||||
h = max(0, y2 - y1)
|
||||
intersection = w * h
|
||||
union = (box1[2] * box1[3]) + (box2[2] * box2[3]) - intersection
|
||||
return float(intersection) / float(union)
|
||||
input_h, input_w = input_shape
|
||||
roi_x, roi_y, roi_w, roi_h = image_roi
|
||||
|
||||
# Perform Non Max Supression.
|
||||
sigma_inv = (-1.0 / sigma) if (sigma > 0.0) else 0.0
|
||||
|
||||
max_index = 0
|
||||
output_boxes = []
|
||||
max_label_index = 0
|
||||
x_scale = roi_w / float(input_w)
|
||||
y_scale = roi_h / float(input_h)
|
||||
|
||||
while len(sorted_boxes):
|
||||
box = sorted_boxes.pop(max_index)
|
||||
output_boxes.append(box)
|
||||
max_label_index = max(max_label_index, box[5])
|
||||
|
||||
# Compare and supress the remaining boxes in the list against the max.
|
||||
|
||||
for i in range(len(sorted_boxes)):
|
||||
v = iou(box, sorted_boxes[i])
|
||||
sorted_boxes[i][4] = sorted_boxes[i][4] * math.exp(sigma_scale * v * v)
|
||||
if sorted_boxes[i][4] < threshold:
|
||||
sorted_boxes[i][4] = 0.0
|
||||
|
||||
# Filter out supressed boxes and find the next largest.
|
||||
|
||||
sorted_boxes = list(filter(lambda x: x[4] > 0.0, sorted_boxes))
|
||||
if len(sorted_boxes):
|
||||
max_index = max(enumerate(sorted_boxes), key=lambda x: x[1][4])[0]
|
||||
|
||||
# Map the output boxes back to the input image.
|
||||
|
||||
x_scale = self.roi[2] / float(self.window_w)
|
||||
y_scale = self.roi[3] / float(self.window_h)
|
||||
if scale_aspect == NMS_SCALE_ASPECT_KEEP:
|
||||
scale = min(x_scale, y_scale)
|
||||
x_offset = ((self.roi[2] - (self.window_w * scale)) / 2) + self.roi[0]
|
||||
y_offset = ((self.roi[3] - (self.window_h * scale)) / 2) + self.roi[1]
|
||||
x_scale = scale
|
||||
y_scale = scale
|
||||
elif scale_aspect == NMS_SCALE_ASPECT_EXPAND:
|
||||
scale = max(x_scale, y_scale)
|
||||
x_scale = scale
|
||||
y_scale = scale
|
||||
elif scale_aspect != NMS_SCALE_ASPECT_IGNORE:
|
||||
raise ValueError("Invalid scale_aspect value!")
|
||||
|
||||
for i in range(len(output_boxes)):
|
||||
output_boxes[i][0] = int((output_boxes[i][0] * scale) + x_offset)
|
||||
output_boxes[i][1] = int((output_boxes[i][1] * scale) + y_offset)
|
||||
output_boxes[i][2] = int(output_boxes[i][2] * scale)
|
||||
output_boxes[i][3] = int(output_boxes[i][3] * scale)
|
||||
x_offset = ((roi_w - (input_w * x_scale)) * 0.5) + roi_x
|
||||
y_offset = ((roi_h - (input_h * y_scale)) * 0.5) + roi_y
|
||||
|
||||
# Create a list per class with (rect, score) tuples.
|
||||
# Convert boxes to (x1, y1, x2, y2) format.
|
||||
w_rel_2 = w_rel * 0.5
|
||||
h_rel_2 = h_rel * 0.5
|
||||
x1 = (x_center - w_rel_2) * input_w
|
||||
y1 = (y_center - h_rel_2) * input_h
|
||||
x2 = (x_center + w_rel_2) * input_w
|
||||
y2 = (y_center + h_rel_2) * input_h
|
||||
areas = (x2 - x1) * (y2 - y1)
|
||||
boxes = np.array([x1, y1, x2, y2, areas]).T
|
||||
|
||||
output_list = [[] for i in range(max_label_index + 1)]
|
||||
# Allocate output list for each class.
|
||||
output = [[] for _ in range(int(np.max(classes)) + 1)]
|
||||
|
||||
for i in range(len(output_boxes)):
|
||||
output_list[output_boxes[i][5]].append(
|
||||
(output_boxes[i][0:4], output_boxes[i][4])
|
||||
)
|
||||
# Filter out invalid boxes.
|
||||
valid_indices = np.nonzero((areas > 0.0) & (scores > threshold) & (scores <= 1.0))[0]
|
||||
if not len(valid_indices):
|
||||
return output
|
||||
|
||||
return output_list
|
||||
# Sort boxes by scores in descending order.
|
||||
valid_scores = np.take(scores, valid_indices, axis=0)
|
||||
sorted_valid_score_indices = np.argsort(valid_scores, axis=0)[::-1]
|
||||
sorted_valid_indices = np.take(valid_indices, sorted_valid_score_indices, axis=0)
|
||||
|
||||
while True:
|
||||
# Grab the box with the highest score.
|
||||
i = sorted_valid_indices[0]
|
||||
x1i, y1i, x2i, y2i, area = boxes[i]
|
||||
|
||||
# Project and store the box.
|
||||
px = round((x1i * x_scale) + x_offset)
|
||||
py = round((y1i * y_scale) + y_offset)
|
||||
pw = round((x2i - x1i) * x_scale)
|
||||
ph = round((y2i - y1i) * y_scale)
|
||||
output[classes[i]].append(((px, py, pw, ph), scores[i]))
|
||||
|
||||
# Stop if there's only one box left.
|
||||
if len(sorted_valid_indices) == 1:
|
||||
break
|
||||
|
||||
# Get the rest of the boxes.
|
||||
sorted_valid_indices = sorted_valid_indices[1:]
|
||||
boxes = np.take(boxes, sorted_valid_indices, axis=0)
|
||||
scores = np.take(scores, sorted_valid_indices, axis=0)
|
||||
classes = np.take(classes, sorted_valid_indices, axis=0)
|
||||
|
||||
# Compute IoU of the max box with the rest.
|
||||
xx1 = np.maximum(x1i, boxes[:, 0])
|
||||
yy1 = np.maximum(y1i, boxes[:, 1])
|
||||
xx2 = np.minimum(x2i, boxes[:, 2])
|
||||
yy2 = np.minimum(y2i, boxes[:, 3])
|
||||
iw = np.maximum(0.0, xx2 - xx1)
|
||||
ih = np.maximum(0.0, yy2 - yy1)
|
||||
intersection = iw * ih
|
||||
union = area + boxes[:, 4] - intersection
|
||||
iou = intersection / (union + 1e-6)
|
||||
scores *= np.exp((iou ** 2.0) * sigma_inv)
|
||||
|
||||
# Filter out boxes with low scores.
|
||||
valid_indices = np.nonzero(scores > threshold)[0]
|
||||
if not len(valid_indices):
|
||||
break
|
||||
|
||||
# Sort boxes by scores in descending order.
|
||||
valid_scores = np.take(scores, valid_indices, axis=0)
|
||||
sorted_valid_score_indices = np.argsort(valid_scores, axis=0)[::-1]
|
||||
sorted_valid_indices = np.take(valid_indices, sorted_valid_score_indices, axis=0)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def draw_predictions(
|
||||
|
||||
Loading…
Reference in New Issue
Block a user