From 331bdd67c60cfe2908f795436cd78b29528b3b6d Mon Sep 17 00:00:00 2001 From: iabdalkader Date: Sun, 10 Jul 2022 18:35:31 +0200 Subject: [PATCH] scripts/examples: Clean up TensorFlow examples. --- .../00-TensorFlow/tf_face_collection.py | 31 ---------- .../00-TensorFlow/tf_face_recognition.py | 41 ------------- ...t_center.py => tf_image_classification.py} | 5 +- .../tf_mobilenet_search_whole_window.py | 60 ------------------- .../00-TensorFlow/tf_object_detection.py | 43 ++++++------- .../tf_person_detection_search_just_center.py | 48 --------------- ...tf_person_detection_search_whole_window.py | 42 ------------- 7 files changed, 25 insertions(+), 245 deletions(-) delete mode 100644 scripts/examples/05-Machine-Learning/00-TensorFlow/tf_face_collection.py delete mode 100644 scripts/examples/05-Machine-Learning/00-TensorFlow/tf_face_recognition.py rename scripts/examples/05-Machine-Learning/00-TensorFlow/{tf_mobilenet_search_just_center.py => tf_image_classification.py} (98%) delete mode 100644 scripts/examples/05-Machine-Learning/00-TensorFlow/tf_mobilenet_search_whole_window.py delete mode 100644 scripts/examples/05-Machine-Learning/00-TensorFlow/tf_person_detection_search_just_center.py delete mode 100644 scripts/examples/05-Machine-Learning/00-TensorFlow/tf_person_detection_search_whole_window.py diff --git a/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_face_collection.py b/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_face_collection.py deleted file mode 100644 index b8f7a5d26..000000000 --- a/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_face_collection.py +++ /dev/null @@ -1,31 +0,0 @@ -# Face Collection -# -# Use this script to gather face images for building a TensorFlow dataset. This script automatically -# zooms in the largest face in the field of view which you can then save using the data set editor. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) - -clock = time.clock() - -largest_face = None -largest_face_timeout = 0 - -while(True): - clock.tick() - - faces = sensor.snapshot().gamma_corr(contrast=1.5).find_features(image.HaarCascade("frontalface")) - - if faces: - largest_face = max(faces, key = lambda f: f[2] * f[3]) - largest_face_timeout = 20 - - if largest_face_timeout > 0: - sensor.get_fb().crop(roi=largest_face) - largest_face_timeout -= 1 - - print(clock.fps()) diff --git a/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_face_recognition.py b/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_face_recognition.py deleted file mode 100644 index 6bf348e0e..000000000 --- a/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_face_recognition.py +++ /dev/null @@ -1,41 +0,0 @@ -# Face Recognition -# -# Use this script to run a TensorFlow lite image classifier on faces detected within an image. -# The classifier is free to do facial recognition, expression detection, or whatever. - -import sensor, image, time, tf - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) - -clock = time.clock() - -net = tf.load("trained.tflite", load_to_fb=True) -labels = [l.rstrip('\n') for l in open("labels.txt")] - -while(True): - clock.tick() - - # Take a picture and brighten things up for the frontal face detector. - img = sensor.snapshot().gamma_corr(contrast=1.5) - - # Returns a list of rects (x, y, w, h) where faces are. - faces = img.find_features(image.HaarCascade("frontalface")) - - for f in faces: - - # Classify a face and get the class scores list - scores = net.classify(img, roi=f)[0].output() - - # Find the highest class score and lookup the label for that - label = labels[scores.index(max(scores))] - - # Draw a box around the face - img.draw_rectangle(f) - - # Draw the label above the face - img.draw_string(f[0]+3, f[1]-1, label, mono_space=False) - - print(clock.fps()) diff --git a/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_mobilenet_search_just_center.py b/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_image_classification.py similarity index 98% rename from scripts/examples/05-Machine-Learning/00-TensorFlow/tf_mobilenet_search_just_center.py rename to scripts/examples/05-Machine-Learning/00-TensorFlow/tf_image_classification.py index 0aec25b05..a29379c91 100644 --- a/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_mobilenet_search_just_center.py +++ b/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_image_classification.py @@ -48,14 +48,13 @@ while(True): # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... + # default settings just do one detection... change them to search the image... # Setting x_overlap=-1 forces the window to stay centered in the ROI in the x direction always. If # y_overlap is not -1 the method will search in all vertical positions. - # Setting y_overlap=-1 forces the window to stay centered in the ROI in the y direction always. If # x_overlap is not -1 the method will serach in all horizontal positions. - # default settings just do one detection... change them to search the image... - for obj in tf.classify(mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=-1, y_overlap=-1): + for obj in tf.classify(mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0): print("**********\nTop 5 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) img.draw_rectangle(obj.rect()) # This combines the labels and confidence values into a list of tuples diff --git a/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_mobilenet_search_whole_window.py b/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_mobilenet_search_whole_window.py deleted file mode 100644 index 6b105ebf0..000000000 --- a/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_mobilenet_search_whole_window.py +++ /dev/null @@ -1,60 +0,0 @@ -# TensorFlow Lite Mobilenet V1 Example -# -# Google's Mobilenet V1 detects 1000 classes of objects -# -# WARNING: Mobilenet is trained on ImageNet and isn't meant to classify anything -# in the real world. It's just designed to score well on the ImageNet dataset. -# This example just shows off running mobilenet on the OpenMV Cam. However, the -# default model is not really usable for anything. You have to use transfer -# learning to apply the model to a target problem by re-training the model. -# -# NOTE: This example only works on the OpenMV Cam H7 Pro (that has SDRAM) and better! -# To get the models please see the CNN Network library in OpenMV IDE under -# Tools -> Machine Vision. The labels are there too. -# You should insert a microSD card into your camera and copy-paste the mobilenet_labels.txt -# file and your chosen model into the root folder for ths script to work. -# -# In this example we slide the detector window over the image and get a list -# of activations. Note that use a CNN with a sliding window is extremely compute -# expensive so for an exhaustive search do not expect the CNN to be real-time. - -import sensor, image, time, os, tf - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.set_windowing((240, 240)) # Set 240x240 window. -sensor.skip_frames(time=2000) # Let the camera adjust. - -mobilenet_version = "1" # 1 -mobilenet_width = "0.5" # 1.0, 0.75, 0.50, 0.25 -mobilenet_resolution = "128" # 224, 192, 160, 128 - -mobilenet = "mobilenet_v%s_%s_%s_quant.tflite" % (mobilenet_version, mobilenet_width, mobilenet_resolution) -labels = [line.rstrip('\n') for line in open("mobilenet_labels.txt")] - -clock = time.clock() -while(True): - clock.tick() - - img = sensor.snapshot() - - # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not - # specified). A classification score output vector will be generated for each location. At each scale the - # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. - # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note - # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after - # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) - # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. - # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... - - # default settings just do one detection... change them to search the image... - for obj in tf.classify(mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0): - print("**********\nTop 5 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) - img.draw_rectangle(obj.rect()) - # This combines the labels and confidence values into a list of tuples - # and then sorts that list by the confidence values. - sorted_list = sorted(zip(labels, obj.output()), key = lambda x: x[1], reverse = True) - for i in range(5): - print("%s = %f" % (sorted_list[i][0], sorted_list[i][1])) - print(clock.fps(), "fps") diff --git a/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_object_detection.py b/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_object_detection.py index 48a2254ba..cf0625655 100644 --- a/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_object_detection.py +++ b/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_object_detection.py @@ -1,9 +1,8 @@ # TensorFlow Lite Object Detection Example # -# This example shows off object detection. Object detect is much more powerful than -# object classification. It can locate multiple objects in the image. +# This examples uses the builtin FOMO model to detect faces. -import sensor, image, time, os, tf +import sensor, image, time, tf, math sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) @@ -11,13 +10,14 @@ sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) sensor.set_windowing((240, 240)) # Set 240x240 window. sensor.skip_frames(time=2000) # Let the camera adjust. -net = tf.load('', load_to_fb=True) -labels = [] +min_confidence = 0.4 -try: # Load labels if they exist - labels = [line.rstrip('\n') for line in open("labels.txt")] -except: - pass +# Load built-in FOMO face detection model +labels, net = tf.load_builtin_model("fomo_face_detection") + +# Alternatively, models can be loaded from the filesystem storage. +#net = tf.load('', load_to_fb=True) +#labels = [line.rstrip('\n') for line in open("labels.txt")] colors = [ # Add more colors if you are detecting more than 7 types of classes at once. (255, 0, 0), @@ -35,17 +35,20 @@ while(True): img = sensor.snapshot() - # detect() segments an object using the provided segmentation model. This produces mutliple - # grayscale images per object class that we are trying to detect. detect() then runs - # find_blobs() internally on the segmented images to find all blob locations and then returns - # the bound boxes of all blobs found per object class. So, detect() returns a list of lists of - # classification objects and the respective confidence level. + # detect() returns all objects found in the image (splitted out per class already) + # we skip class index 0, as that is the background, and then draw circles of the center + # of our objects - for i, detection_list in enumerate(net.detect(img, thresholds=[(128, 255)])): - if (i < len(labels)): - print("********** %s **********" % labels[i]) + for i, detection_list in enumerate(net.detect(img, thresholds=[(math.ceil(min_confidence * 255), 255)])): + if (i == 0): continue # background class + if (len(detection_list) == 0): continue # no detections for this class? + + print("********** %s **********" % labels[i]) for d in detection_list: - print(d) - img.draw_rectangle(d.rect(), color=colors[i]) + [x, y, w, h] = d.rect() + center_x = math.floor(x + (w / 2)) + center_y = math.floor(y + (h / 2)) + print(f"x {center_x}\ty {center_y}") + img.draw_circle((center_x, center_y, 12), color=colors[i], thickness=2) - print(clock.fps(), "fps", end="\n\n") + print(clock.fps(), "fps", end="\n") diff --git a/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_person_detection_search_just_center.py b/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_person_detection_search_just_center.py deleted file mode 100644 index b1531f58b..000000000 --- a/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_person_detection_search_just_center.py +++ /dev/null @@ -1,48 +0,0 @@ -# TensorFlow Lite Person Dection Example -# -# Google's Person Detection Model detects if a person is in view. -# -# In this example we slide the detector window over the image and get a list -# of activations. Note that use a CNN with a sliding window is extremely compute -# expensive so for an exhaustive search do not expect the CNN to be real-time. - -import sensor, image, time, os, tf - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.set_windowing((240, 240)) # Set 240x240 window. -sensor.skip_frames(time=2000) # Let the camera adjust. - -# Load the built-in person detection network (the network is in your OpenMV Cam's firmware). -labels, net = tf.load_builtin_model('person_detection') - -clock = time.clock() -while(True): - clock.tick() - - img = sensor.snapshot() - - # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not - # specified). A classification score output vector will be generated for each location. At each scale the - # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. - # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note - # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after - # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) - # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. - # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... - - # Setting x_overlap=-1 forces the window to stay centered in the ROI in the x direction always. If - # y_overlap is not -1 the method will search in all vertical positions. - - # Setting y_overlap=-1 forces the window to stay centered in the ROI in the y direction always. If - # x_overlap is not -1 the method will serach in all horizontal positions. - - # default settings just do one detection... change them to search the image... - for obj in net.classify(img, min_scale=0.5, scale_mul=0.5, x_overlap=-1, y_overlap=-1): - print("**********\nDetections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) - for i in range(len(obj.output())): - print("%s = %f" % (labels[i], obj.output()[i])) - img.draw_rectangle(obj.rect()) - img.draw_string(obj.x()+3, obj.y()-1, labels[obj.output().index(max(obj.output()))], mono_space = False) - print(clock.fps(), "fps") diff --git a/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_person_detection_search_whole_window.py b/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_person_detection_search_whole_window.py deleted file mode 100644 index ff05e03d6..000000000 --- a/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_person_detection_search_whole_window.py +++ /dev/null @@ -1,42 +0,0 @@ -# TensorFlow Lite Person Dection Example -# -# Google's Person Detection Model detects if a person is in view. -# -# In this example we slide the detector window over the image and get a list -# of activations. Note that use a CNN with a sliding window is extremely compute -# expensive so for an exhaustive search do not expect the CNN to be real-time. - -import sensor, image, time, os, tf - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.set_windowing((240, 240)) # Set 240x240 window. -sensor.skip_frames(time=2000) # Let the camera adjust. - -# Load the built-in person detection network (the network is in your OpenMV Cam's firmware). -labels, net = tf.load_builtin_model('person_detection') - -clock = time.clock() -while(True): - clock.tick() - - img = sensor.snapshot() - - # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not - # specified). A classification score output vector will be generated for each location. At each scale the - # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. - # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note - # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after - # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) - # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. - # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... - - # default settings just do one detection... change them to search the image... - for obj in net.classify(img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0): - print("**********\nDetections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) - for i in range(len(obj.output())): - print("%s = %f" % (labels[i], obj.output()[i])) - img.draw_rectangle(obj.rect()) - img.draw_string(obj.x()+3, obj.y()-1, labels[obj.output().index(max(obj.output()))], mono_space = False) - print(clock.fps(), "fps")