mirror of
https://github.com/openmv/openmv.git
synced 2025-11-04 14:49:50 +08:00
Add facial recog scripts
This commit is contained in:
parent
c6d73c864c
commit
61f1c02e32
31
scripts/examples/25-Machine-Learning/tf_face_collection.py
Normal file
31
scripts/examples/25-Machine-Learning/tf_face_collection.py
Normal file
@ -0,0 +1,31 @@
|
||||
# Face Collection
|
||||
#
|
||||
# Use this script to gather face images for building a TensorFlow dataset. This script automatically
|
||||
# zooms in the largest face in the field of view which you can then save using the data set editor.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
|
||||
clock = time.clock()
|
||||
|
||||
largest_face = None
|
||||
largest_face_timeout = 0
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
faces = sensor.snapshot().gamma_corr(contrast=1.5).find_features(image.HaarCascade("frontalface"))
|
||||
|
||||
if faces:
|
||||
largest_face = max(faces, key = lambda f: f[2] * f[3])
|
||||
largest_face_timeout = 20
|
||||
|
||||
if largest_face_timeout > 0:
|
||||
sensor.get_fb().crop(roi=largest_face)
|
||||
largest_face_timeout -= 1
|
||||
|
||||
print(clock.fps())
|
||||
41
scripts/examples/25-Machine-Learning/tf_face_recognition.py
Normal file
41
scripts/examples/25-Machine-Learning/tf_face_recognition.py
Normal file
@ -0,0 +1,41 @@
|
||||
# Face Recognition
|
||||
#
|
||||
# Use this script to run a TensorFlow lite image classifier on faces detected within an image.
|
||||
# The classifier is free to do facial recognition, expression detection, or whatever.
|
||||
|
||||
import sensor, image, time, tf
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
|
||||
clock = time.clock()
|
||||
|
||||
net = tf.load("trained.tflite", load_to_fb=True)
|
||||
labels = [l.rstrip('\n') for l in open("labels.txt")]
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
# Take a picture and brighten things up for the frontal face detector.
|
||||
img = sensor.snapshot().gamma_corr(contrast=1.5)
|
||||
|
||||
# Returns a list of rects (x, y, w, h) where faces are.
|
||||
faces = img.find_features(image.HaarCascade("frontalface"))
|
||||
|
||||
for f in faces:
|
||||
|
||||
# Classify a face and get the class scores list
|
||||
scores = net.classify(img, roi=f)[0].output()
|
||||
|
||||
# Find the highest class score and lookup the label for that
|
||||
label = labels[scores.index(max(scores))]
|
||||
|
||||
# Draw a box around the face
|
||||
img.draw_rectangle(f)
|
||||
|
||||
# Draw the label above the face
|
||||
img.draw_string(f[0]+3, f[1]-1, label, mono_space=False)
|
||||
|
||||
print(clock.fps())
|
||||
Loading…
Reference in New Issue
Block a user