diff --git a/usr/examples/09-Feature-Detection/freak.py b/usr/examples/09-Feature-Detection/freak.py deleted file mode 100644 index 040a82031..000000000 --- a/usr/examples/09-Feature-Detection/freak.py +++ /dev/null @@ -1,77 +0,0 @@ -# Freak Example -# -# This script shows off keypoint tracking by itself. Put an object in front of -# your OpenMV Cam without anything else in the image (i.e. camera should be -# facing a smooth wall) and the camera will learn the keypoints for an track -# whatever object is in the image. You can save keypoints to disk either via -# the OpenMV IDE or from in your script. -# -# Matching keypoints works by first extracting keypoints from an ROI. Once those -# are extracted then the OpenMV Cam compares the extracted keypoints against all -# the keypoints in an image. It tries to find the center matching point between -# the two sets of keypoints. -# -# Keep in mind that keypoint matching with just one training example isn't very -# robust. If you want professional quality results then stick with getting -# professionally generated Haar Cascades like the frontalface or eye cascade. -# That said, if you're in a very controlled enviroment then keypoint tracking -# allows your OpenMV Cam to learn objects on the fly. -# -# If... you want really good keypoint matching results we suggest you gather -# keypoints from all faces of an object and with multiple rotations and scales. -# Comparing against all theses sets of keypoints helps versus just one. -# -# NOTE: LOTS OF KEYPOINTS MAY CAUSE THE SYSTEM TO RUN OUT OF MEMORY! - -import sensor, time, image - -# Normalized keypoints are not rotation invariant... -NORMALIZED=False -# Keypoint extractor threshold, range from 0 to any number. -# This threshold is used when extracting keypoints, the lower -# the threshold the higher the number of keypoints extracted. -KEYPOINTS_THRESH=30 -# Keypoint-level threshold, range from 0 to 100. -# This threshold is used when matching two keypoint descriptors, it's the -# percentage of the distance between two descriptors to the max distance. -# In other words, the minimum matching percentage between 2 keypoints. -MATCHING_THRESH=80 - -# Reset sensor -sensor.reset() - -# Sensor settings -sensor.set_contrast(1) -sensor.set_gainceiling(16) -sensor.set_framesize(sensor.QQVGA) -sensor.set_pixformat(sensor.GRAYSCALE) - -# Skip a few frames to allow the sensor settle down -# Note: This takes more time when exec from the IDE. -for i in range(0, 30): - img = sensor.snapshot() - img.draw_string(0, 0, "Please wait...") - -kpts1 = None -# Uncomment to load keypoints from file -#kpts1 = image.load_descriptor(image.FREAK, "/desc.freak") -clock = time.clock() - -while (True): - clock.tick() - img = sensor.snapshot() - kpts2 = img.find_keypoints(threshold=KEYPOINTS_THRESH, normalized=NORMALIZED) - - if (kpts1==None): - kpts1 = kpts2 - print(kpts1) - elif kpts2: - c = image.match_descriptor(image.FREAK, kpts1, kpts2, threshold=MATCHING_THRESH) - # C[3] contains the percentage of matching keypoints. - # If more than 25% of the keypoints match, draw stuff. - if (c[2]>25): - img.draw_cross(c[0], c[1], size=15) - img.draw_string(0, 10, "Match %d%%"%(c[2])) - - # Draw FPS - img.draw_string(0, 0, "FPS:%.2f"%(clock.fps())) diff --git a/usr/examples/09-Feature-Detection/keypoints.py b/usr/examples/09-Feature-Detection/keypoints.py new file mode 100644 index 000000000..c91e50a86 --- /dev/null +++ b/usr/examples/09-Feature-Detection/keypoints.py @@ -0,0 +1,53 @@ +# Object tracking with keypoints example. +# Show the camera an object and then run the script. A set of keypoints will be extracted +# once and then tracked in the following frames. If you want a new set of keypoints re-run +# the script. NOTE: see the docs for arguments to tune find_keypoints and match_keypoints. +import sensor, time, image + +# Reset sensor +sensor.reset() + +# Sensor settings +sensor.set_contrast(1) +sensor.set_gainceiling(16) +sensor.set_framesize(sensor.QCIF) +sensor.set_pixformat(sensor.GRAYSCALE) + +sensor.set_auto_gain(False, value=100) +sensor.skip_frames(30) + +def draw_keypoints(img, kpts): + print(kpts) + img.draw_keypoints(kpts) + img = sensor.snapshot() + time.sleep(1000) + +kpts1 = None +# NOTE: uncomment to load a keypoints descriptor from file +#kpts1 = image.load_descriptor(image.ORB, "/desc.orb") +#img = sensor.snapshot() +#draw_keypoints(img, kpts1) + +clock = time.clock() +while (True): + clock.tick() + img = sensor.snapshot() + # NOTE: See the docs for other arguments + kpts2 = img.find_keypoints(max_keypoints=100, scale_factor=1.2) + + if (kpts2 and kpts1 == None): + kpts1 = kpts2 + draw_keypoints(img, kpts1) + elif kpts2: + c = image.match_descriptor(image.ORB, kpts1, kpts2, threshold=70) + match = c[6] # C[6] contains the number of matches. + if (match>2): + img.draw_rectangle(c[2:6]) + img.draw_cross(c[0], c[1], size=10) + + print(kpts2, "matched:%d dt:%d"%(match, c[7])) + # NOTE: uncomment if you want to draw the keypoints + #img.draw_keypoints(kpts2, size=KEYPOINTS_SIZE, matched=True) + + # Draw FPS + img.draw_string(0, 0, "FPS:%.2f"%(clock.fps())) diff --git a/usr/examples/09-Feature-Detection/keypoints_save.py b/usr/examples/09-Feature-Detection/keypoints_save.py new file mode 100644 index 000000000..d454e5c93 --- /dev/null +++ b/usr/examples/09-Feature-Detection/keypoints_save.py @@ -0,0 +1,31 @@ +# Keypoints descriptor example. +# This example shows how to save a keypoints descriptor to file. Show the camera an object +# and then run the script. The script will extract and save a keypoints descriptor and the image. +# You can use the keypoints_editor.py util to remove unwanted keypoints. +# +# NOTE: Please reset the camera after running this script to see the new file. +import sensor, time, image + +# Reset sensor +sensor.reset() + +# Sensor settings +sensor.set_contrast(1) +sensor.set_gainceiling(16) +sensor.set_framesize(sensor.QCIF) +sensor.set_pixformat(sensor.GRAYSCALE) + +sensor.set_auto_gain(False, value=100) +sensor.skip_frames(30) + +FILE_NAME = "desc" +img = sensor.snapshot() +# NOTE: See the docs for other arguments +kpts = img.find_keypoints(scale_factor=1.2) + +image.save_descriptor(image.ORB, "/%s.orb"%(FILE_NAME), kpts) +img.save("/%s.pgm"%(FILE_NAME)) + +img.draw_keypoints(kpts) +sensor.snapshot() +time.sleep(1000)