mirror of
https://github.com/openmv/openmv.git
synced 2025-09-26 23:09:13 +08:00
scripts/examples: Update fomo example to use the new post-processor.
This commit is contained in:
parent
de7a761d3b
commit
8e72debf11
@ -9,9 +9,8 @@
|
||||
import sensor
|
||||
import time
|
||||
import ml
|
||||
from ml.utils import NMS
|
||||
from ml.postprocessing import fomo_postprocess
|
||||
import math
|
||||
import image
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
@ -19,9 +18,6 @@ sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.set_windowing((240, 240)) # Set 240x240 window.
|
||||
sensor.skip_frames(time=2000) # Let the camera adjust.
|
||||
|
||||
min_confidence = 0.4
|
||||
threshold_list = [(math.ceil(min_confidence * 255), 255)]
|
||||
|
||||
# Load built-in FOMO face detection model
|
||||
model = ml.Model("/rom/fomo_face_detection.tflite")
|
||||
print(model)
|
||||
@ -40,39 +36,13 @@ colors = [ # Add more colors if you are detecting more than 7 types of classes
|
||||
(255, 255, 255),
|
||||
]
|
||||
|
||||
|
||||
# FOMO outputs an image per class where each pixel in the image is the centroid of the trained
|
||||
# object. So, we will get those output images and then run find_blobs() on them to extract the
|
||||
# centroids. We will also run get_stats() on the detected blobs to determine their score.
|
||||
# The Non-Max-Supression (NMS) object then filters out overlapping detections and maps their
|
||||
# position in the output image back to the original input image. The function then returns a
|
||||
# list per class which each contain a list of (rect, score) tuples representing the detected
|
||||
# objects.
|
||||
def fomo_post_process(model, inputs, outputs):
|
||||
n, oh, ow, oc = model.output_shape[0]
|
||||
nms = NMS(ow, oh, inputs[0].roi)
|
||||
for i in range(oc):
|
||||
img = image.Image(outputs[0][0, :, :, i] * 255)
|
||||
blobs = img.find_blobs(
|
||||
threshold_list, x_stride=1, area_threshold=1, pixels_threshold=1
|
||||
)
|
||||
for b in blobs:
|
||||
rect = b.rect()
|
||||
x, y, w, h = rect
|
||||
score = (
|
||||
img.get_statistics(thresholds=threshold_list, roi=rect).l_mean() / 255.0
|
||||
)
|
||||
nms.add_bounding_box(x, y, x + w, y + h, score, i)
|
||||
return nms.get_bounding_boxes()
|
||||
|
||||
|
||||
clock = time.clock()
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
for i, detection_list in enumerate(model.predict([img], callback=fomo_post_process)):
|
||||
for i, detection_list in enumerate(model.predict([img], callback=fomo_postprocess())):
|
||||
if i == 0:
|
||||
continue # background class
|
||||
if len(detection_list) == 0:
|
||||
|
Loading…
Reference in New Issue
Block a user