mirror of
https://github.com/openmv/openmv.git
synced 2025-11-04 14:49:50 +08:00
scripts/examples: Clean up TensorFlow examples.
This commit is contained in:
parent
379e187e07
commit
331bdd67c6
@ -1,31 +0,0 @@
|
|||||||
# Face Collection
|
|
||||||
#
|
|
||||||
# Use this script to gather face images for building a TensorFlow dataset. This script automatically
|
|
||||||
# zooms in the largest face in the field of view which you can then save using the data set editor.
|
|
||||||
|
|
||||||
import sensor, image, time
|
|
||||||
|
|
||||||
sensor.reset()
|
|
||||||
sensor.set_pixformat(sensor.RGB565)
|
|
||||||
sensor.set_framesize(sensor.QVGA)
|
|
||||||
sensor.skip_frames(time = 2000)
|
|
||||||
|
|
||||||
clock = time.clock()
|
|
||||||
|
|
||||||
largest_face = None
|
|
||||||
largest_face_timeout = 0
|
|
||||||
|
|
||||||
while(True):
|
|
||||||
clock.tick()
|
|
||||||
|
|
||||||
faces = sensor.snapshot().gamma_corr(contrast=1.5).find_features(image.HaarCascade("frontalface"))
|
|
||||||
|
|
||||||
if faces:
|
|
||||||
largest_face = max(faces, key = lambda f: f[2] * f[3])
|
|
||||||
largest_face_timeout = 20
|
|
||||||
|
|
||||||
if largest_face_timeout > 0:
|
|
||||||
sensor.get_fb().crop(roi=largest_face)
|
|
||||||
largest_face_timeout -= 1
|
|
||||||
|
|
||||||
print(clock.fps())
|
|
||||||
@ -1,41 +0,0 @@
|
|||||||
# Face Recognition
|
|
||||||
#
|
|
||||||
# Use this script to run a TensorFlow lite image classifier on faces detected within an image.
|
|
||||||
# The classifier is free to do facial recognition, expression detection, or whatever.
|
|
||||||
|
|
||||||
import sensor, image, time, tf
|
|
||||||
|
|
||||||
sensor.reset()
|
|
||||||
sensor.set_pixformat(sensor.RGB565)
|
|
||||||
sensor.set_framesize(sensor.QVGA)
|
|
||||||
sensor.skip_frames(time = 2000)
|
|
||||||
|
|
||||||
clock = time.clock()
|
|
||||||
|
|
||||||
net = tf.load("trained.tflite", load_to_fb=True)
|
|
||||||
labels = [l.rstrip('\n') for l in open("labels.txt")]
|
|
||||||
|
|
||||||
while(True):
|
|
||||||
clock.tick()
|
|
||||||
|
|
||||||
# Take a picture and brighten things up for the frontal face detector.
|
|
||||||
img = sensor.snapshot().gamma_corr(contrast=1.5)
|
|
||||||
|
|
||||||
# Returns a list of rects (x, y, w, h) where faces are.
|
|
||||||
faces = img.find_features(image.HaarCascade("frontalface"))
|
|
||||||
|
|
||||||
for f in faces:
|
|
||||||
|
|
||||||
# Classify a face and get the class scores list
|
|
||||||
scores = net.classify(img, roi=f)[0].output()
|
|
||||||
|
|
||||||
# Find the highest class score and lookup the label for that
|
|
||||||
label = labels[scores.index(max(scores))]
|
|
||||||
|
|
||||||
# Draw a box around the face
|
|
||||||
img.draw_rectangle(f)
|
|
||||||
|
|
||||||
# Draw the label above the face
|
|
||||||
img.draw_string(f[0]+3, f[1]-1, label, mono_space=False)
|
|
||||||
|
|
||||||
print(clock.fps())
|
|
||||||
@ -48,14 +48,13 @@ while(True):
|
|||||||
# down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%.
|
# down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%.
|
||||||
# Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small...
|
# Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small...
|
||||||
|
|
||||||
|
# default settings just do one detection... change them to search the image...
|
||||||
# Setting x_overlap=-1 forces the window to stay centered in the ROI in the x direction always. If
|
# Setting x_overlap=-1 forces the window to stay centered in the ROI in the x direction always. If
|
||||||
# y_overlap is not -1 the method will search in all vertical positions.
|
# y_overlap is not -1 the method will search in all vertical positions.
|
||||||
|
|
||||||
# Setting y_overlap=-1 forces the window to stay centered in the ROI in the y direction always. If
|
# Setting y_overlap=-1 forces the window to stay centered in the ROI in the y direction always. If
|
||||||
# x_overlap is not -1 the method will serach in all horizontal positions.
|
# x_overlap is not -1 the method will serach in all horizontal positions.
|
||||||
|
|
||||||
# default settings just do one detection... change them to search the image...
|
for obj in tf.classify(mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0):
|
||||||
for obj in tf.classify(mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=-1, y_overlap=-1):
|
|
||||||
print("**********\nTop 5 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect())
|
print("**********\nTop 5 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect())
|
||||||
img.draw_rectangle(obj.rect())
|
img.draw_rectangle(obj.rect())
|
||||||
# This combines the labels and confidence values into a list of tuples
|
# This combines the labels and confidence values into a list of tuples
|
||||||
@ -1,60 +0,0 @@
|
|||||||
# TensorFlow Lite Mobilenet V1 Example
|
|
||||||
#
|
|
||||||
# Google's Mobilenet V1 detects 1000 classes of objects
|
|
||||||
#
|
|
||||||
# WARNING: Mobilenet is trained on ImageNet and isn't meant to classify anything
|
|
||||||
# in the real world. It's just designed to score well on the ImageNet dataset.
|
|
||||||
# This example just shows off running mobilenet on the OpenMV Cam. However, the
|
|
||||||
# default model is not really usable for anything. You have to use transfer
|
|
||||||
# learning to apply the model to a target problem by re-training the model.
|
|
||||||
#
|
|
||||||
# NOTE: This example only works on the OpenMV Cam H7 Pro (that has SDRAM) and better!
|
|
||||||
# To get the models please see the CNN Network library in OpenMV IDE under
|
|
||||||
# Tools -> Machine Vision. The labels are there too.
|
|
||||||
# You should insert a microSD card into your camera and copy-paste the mobilenet_labels.txt
|
|
||||||
# file and your chosen model into the root folder for ths script to work.
|
|
||||||
#
|
|
||||||
# In this example we slide the detector window over the image and get a list
|
|
||||||
# of activations. Note that use a CNN with a sliding window is extremely compute
|
|
||||||
# expensive so for an exhaustive search do not expect the CNN to be real-time.
|
|
||||||
|
|
||||||
import sensor, image, time, os, tf
|
|
||||||
|
|
||||||
sensor.reset() # Reset and initialize the sensor.
|
|
||||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
|
||||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
|
||||||
sensor.set_windowing((240, 240)) # Set 240x240 window.
|
|
||||||
sensor.skip_frames(time=2000) # Let the camera adjust.
|
|
||||||
|
|
||||||
mobilenet_version = "1" # 1
|
|
||||||
mobilenet_width = "0.5" # 1.0, 0.75, 0.50, 0.25
|
|
||||||
mobilenet_resolution = "128" # 224, 192, 160, 128
|
|
||||||
|
|
||||||
mobilenet = "mobilenet_v%s_%s_%s_quant.tflite" % (mobilenet_version, mobilenet_width, mobilenet_resolution)
|
|
||||||
labels = [line.rstrip('\n') for line in open("mobilenet_labels.txt")]
|
|
||||||
|
|
||||||
clock = time.clock()
|
|
||||||
while(True):
|
|
||||||
clock.tick()
|
|
||||||
|
|
||||||
img = sensor.snapshot()
|
|
||||||
|
|
||||||
# net.classify() will run the network on an roi in the image (or on the whole image if the roi is not
|
|
||||||
# specified). A classification score output vector will be generated for each location. At each scale the
|
|
||||||
# detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide.
|
|
||||||
# If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note
|
|
||||||
# the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after
|
|
||||||
# sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1)
|
|
||||||
# down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%.
|
|
||||||
# Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small...
|
|
||||||
|
|
||||||
# default settings just do one detection... change them to search the image...
|
|
||||||
for obj in tf.classify(mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0):
|
|
||||||
print("**********\nTop 5 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect())
|
|
||||||
img.draw_rectangle(obj.rect())
|
|
||||||
# This combines the labels and confidence values into a list of tuples
|
|
||||||
# and then sorts that list by the confidence values.
|
|
||||||
sorted_list = sorted(zip(labels, obj.output()), key = lambda x: x[1], reverse = True)
|
|
||||||
for i in range(5):
|
|
||||||
print("%s = %f" % (sorted_list[i][0], sorted_list[i][1]))
|
|
||||||
print(clock.fps(), "fps")
|
|
||||||
@ -1,9 +1,8 @@
|
|||||||
# TensorFlow Lite Object Detection Example
|
# TensorFlow Lite Object Detection Example
|
||||||
#
|
#
|
||||||
# This example shows off object detection. Object detect is much more powerful than
|
# This examples uses the builtin FOMO model to detect faces.
|
||||||
# object classification. It can locate multiple objects in the image.
|
|
||||||
|
|
||||||
import sensor, image, time, os, tf
|
import sensor, image, time, tf, math
|
||||||
|
|
||||||
sensor.reset() # Reset and initialize the sensor.
|
sensor.reset() # Reset and initialize the sensor.
|
||||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||||
@ -11,13 +10,14 @@ sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
|||||||
sensor.set_windowing((240, 240)) # Set 240x240 window.
|
sensor.set_windowing((240, 240)) # Set 240x240 window.
|
||||||
sensor.skip_frames(time=2000) # Let the camera adjust.
|
sensor.skip_frames(time=2000) # Let the camera adjust.
|
||||||
|
|
||||||
net = tf.load('<object_detection_network>', load_to_fb=True)
|
min_confidence = 0.4
|
||||||
labels = []
|
|
||||||
|
|
||||||
try: # Load labels if they exist
|
# Load built-in FOMO face detection model
|
||||||
labels = [line.rstrip('\n') for line in open("labels.txt")]
|
labels, net = tf.load_builtin_model("fomo_face_detection")
|
||||||
except:
|
|
||||||
pass
|
# Alternatively, models can be loaded from the filesystem storage.
|
||||||
|
#net = tf.load('<object_detection_network>', load_to_fb=True)
|
||||||
|
#labels = [line.rstrip('\n') for line in open("labels.txt")]
|
||||||
|
|
||||||
colors = [ # Add more colors if you are detecting more than 7 types of classes at once.
|
colors = [ # Add more colors if you are detecting more than 7 types of classes at once.
|
||||||
(255, 0, 0),
|
(255, 0, 0),
|
||||||
@ -35,17 +35,20 @@ while(True):
|
|||||||
|
|
||||||
img = sensor.snapshot()
|
img = sensor.snapshot()
|
||||||
|
|
||||||
# detect() segments an object using the provided segmentation model. This produces mutliple
|
# detect() returns all objects found in the image (splitted out per class already)
|
||||||
# grayscale images per object class that we are trying to detect. detect() then runs
|
# we skip class index 0, as that is the background, and then draw circles of the center
|
||||||
# find_blobs() internally on the segmented images to find all blob locations and then returns
|
# of our objects
|
||||||
# the bound boxes of all blobs found per object class. So, detect() returns a list of lists of
|
|
||||||
# classification objects and the respective confidence level.
|
|
||||||
|
|
||||||
for i, detection_list in enumerate(net.detect(img, thresholds=[(128, 255)])):
|
for i, detection_list in enumerate(net.detect(img, thresholds=[(math.ceil(min_confidence * 255), 255)])):
|
||||||
if (i < len(labels)):
|
if (i == 0): continue # background class
|
||||||
print("********** %s **********" % labels[i])
|
if (len(detection_list) == 0): continue # no detections for this class?
|
||||||
|
|
||||||
|
print("********** %s **********" % labels[i])
|
||||||
for d in detection_list:
|
for d in detection_list:
|
||||||
print(d)
|
[x, y, w, h] = d.rect()
|
||||||
img.draw_rectangle(d.rect(), color=colors[i])
|
center_x = math.floor(x + (w / 2))
|
||||||
|
center_y = math.floor(y + (h / 2))
|
||||||
|
print(f"x {center_x}\ty {center_y}")
|
||||||
|
img.draw_circle((center_x, center_y, 12), color=colors[i], thickness=2)
|
||||||
|
|
||||||
print(clock.fps(), "fps", end="\n\n")
|
print(clock.fps(), "fps", end="\n")
|
||||||
|
|||||||
@ -1,48 +0,0 @@
|
|||||||
# TensorFlow Lite Person Dection Example
|
|
||||||
#
|
|
||||||
# Google's Person Detection Model detects if a person is in view.
|
|
||||||
#
|
|
||||||
# In this example we slide the detector window over the image and get a list
|
|
||||||
# of activations. Note that use a CNN with a sliding window is extremely compute
|
|
||||||
# expensive so for an exhaustive search do not expect the CNN to be real-time.
|
|
||||||
|
|
||||||
import sensor, image, time, os, tf
|
|
||||||
|
|
||||||
sensor.reset() # Reset and initialize the sensor.
|
|
||||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
|
|
||||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
|
||||||
sensor.set_windowing((240, 240)) # Set 240x240 window.
|
|
||||||
sensor.skip_frames(time=2000) # Let the camera adjust.
|
|
||||||
|
|
||||||
# Load the built-in person detection network (the network is in your OpenMV Cam's firmware).
|
|
||||||
labels, net = tf.load_builtin_model('person_detection')
|
|
||||||
|
|
||||||
clock = time.clock()
|
|
||||||
while(True):
|
|
||||||
clock.tick()
|
|
||||||
|
|
||||||
img = sensor.snapshot()
|
|
||||||
|
|
||||||
# net.classify() will run the network on an roi in the image (or on the whole image if the roi is not
|
|
||||||
# specified). A classification score output vector will be generated for each location. At each scale the
|
|
||||||
# detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide.
|
|
||||||
# If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note
|
|
||||||
# the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after
|
|
||||||
# sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1)
|
|
||||||
# down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%.
|
|
||||||
# Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small...
|
|
||||||
|
|
||||||
# Setting x_overlap=-1 forces the window to stay centered in the ROI in the x direction always. If
|
|
||||||
# y_overlap is not -1 the method will search in all vertical positions.
|
|
||||||
|
|
||||||
# Setting y_overlap=-1 forces the window to stay centered in the ROI in the y direction always. If
|
|
||||||
# x_overlap is not -1 the method will serach in all horizontal positions.
|
|
||||||
|
|
||||||
# default settings just do one detection... change them to search the image...
|
|
||||||
for obj in net.classify(img, min_scale=0.5, scale_mul=0.5, x_overlap=-1, y_overlap=-1):
|
|
||||||
print("**********\nDetections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect())
|
|
||||||
for i in range(len(obj.output())):
|
|
||||||
print("%s = %f" % (labels[i], obj.output()[i]))
|
|
||||||
img.draw_rectangle(obj.rect())
|
|
||||||
img.draw_string(obj.x()+3, obj.y()-1, labels[obj.output().index(max(obj.output()))], mono_space = False)
|
|
||||||
print(clock.fps(), "fps")
|
|
||||||
@ -1,42 +0,0 @@
|
|||||||
# TensorFlow Lite Person Dection Example
|
|
||||||
#
|
|
||||||
# Google's Person Detection Model detects if a person is in view.
|
|
||||||
#
|
|
||||||
# In this example we slide the detector window over the image and get a list
|
|
||||||
# of activations. Note that use a CNN with a sliding window is extremely compute
|
|
||||||
# expensive so for an exhaustive search do not expect the CNN to be real-time.
|
|
||||||
|
|
||||||
import sensor, image, time, os, tf
|
|
||||||
|
|
||||||
sensor.reset() # Reset and initialize the sensor.
|
|
||||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
|
|
||||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
|
||||||
sensor.set_windowing((240, 240)) # Set 240x240 window.
|
|
||||||
sensor.skip_frames(time=2000) # Let the camera adjust.
|
|
||||||
|
|
||||||
# Load the built-in person detection network (the network is in your OpenMV Cam's firmware).
|
|
||||||
labels, net = tf.load_builtin_model('person_detection')
|
|
||||||
|
|
||||||
clock = time.clock()
|
|
||||||
while(True):
|
|
||||||
clock.tick()
|
|
||||||
|
|
||||||
img = sensor.snapshot()
|
|
||||||
|
|
||||||
# net.classify() will run the network on an roi in the image (or on the whole image if the roi is not
|
|
||||||
# specified). A classification score output vector will be generated for each location. At each scale the
|
|
||||||
# detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide.
|
|
||||||
# If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note
|
|
||||||
# the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after
|
|
||||||
# sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1)
|
|
||||||
# down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%.
|
|
||||||
# Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small...
|
|
||||||
|
|
||||||
# default settings just do one detection... change them to search the image...
|
|
||||||
for obj in net.classify(img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0):
|
|
||||||
print("**********\nDetections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect())
|
|
||||||
for i in range(len(obj.output())):
|
|
||||||
print("%s = %f" % (labels[i], obj.output()[i]))
|
|
||||||
img.draw_rectangle(obj.rect())
|
|
||||||
img.draw_string(obj.x()+3, obj.y()-1, labels[obj.output().index(max(obj.output()))], mono_space = False)
|
|
||||||
print(clock.fps(), "fps")
|
|
||||||
Loading…
Reference in New Issue
Block a user