diff --git a/usr/examples/02-Board-Control/pwm_control.py b/usr/examples/02-Board-Control/pwm_control.py new file mode 100644 index 000000000..2c69f2ad7 --- /dev/null +++ b/usr/examples/02-Board-Control/pwm_control.py @@ -0,0 +1,25 @@ +# PWM Control Example +# +# This example shows how to do PWM with your OpenMV Cam. +# +# WARNING: PWM control is... not easy with MicroPython. You have to use +# the correct timer with the correct pins and channels. As for what the +# correct values are - who knows. If you need to change the pins from the +# example below please try out different timer/channel/pin configs. + +import pyb, time + +t2 = pyb.Timer(1, freq=1000) + +ch1 = t2.channel(2, pyb.Timer.PWM, pin=pyb.Pin("P0")) +ch2 = t2.channel(3, pyb.Timer.PWM, pin=pyb.Pin("P1")) + +while(True): + for i in range(100): + ch1.pulse_width_percent(i) + ch2.pulse_width_percent(100-i) + time.sleep(5) + for i in range(100): + ch1.pulse_width_percent(100-i) + ch2.pulse_width_percent(i) + time.sleep(5) diff --git a/usr/examples/15-Tests/test_binary_2.py b/usr/examples/04-Image-Filters/color_binary_filter.py similarity index 85% rename from usr/examples/15-Tests/test_binary_2.py rename to usr/examples/04-Image-Filters/color_binary_filter.py index 1e3c0266f..c5989e0c8 100644 --- a/usr/examples/15-Tests/test_binary_2.py +++ b/usr/examples/04-Image-Filters/color_binary_filter.py @@ -1,11 +1,20 @@ +# Color Binary Filter Example +# +# This script shows off the binary image filter. This script was originally a +# test script... but, it can be useful for showing how to use binary. + import pyb, sensor, image, math + sensor.reset() sensor.set_framesize(sensor.QVGA) sensor.set_pixformat(sensor.RGB565) + red_threshold = (0,100, 0,127, 0,127) # L A B green_threshold = (0,100, -128,0, 0,127) # L A B blue_threshold = (0,100, -128,127, -128,0) # L A B + while(True): + # Test red threshold for i in range(100): img = sensor.snapshot() diff --git a/usr/examples/15-Tests/test_erode_and_dilate.py b/usr/examples/04-Image-Filters/erode_and_dilate.py similarity index 61% rename from usr/examples/15-Tests/test_erode_and_dilate.py rename to usr/examples/04-Image-Filters/erode_and_dilate.py index 4bcf7da85..06a6fde68 100644 --- a/usr/examples/15-Tests/test_erode_and_dilate.py +++ b/usr/examples/04-Image-Filters/erode_and_dilate.py @@ -1,24 +1,35 @@ -import pyb, sensor, image, math +# Erode and Dilate Example +# +# This example shows off the erode and dilate functions which you can run on +# a binary image to remove noise. This example was originally a test but its +# useful for showing off how these functions work. + +import pyb, sensor, image + sensor.reset() sensor.set_framesize(sensor.QVGA) + grayscale_thres = (170, 255) rgb565_thres = (70, 100, -128, 127, -128, 127) + while(True): + sensor.set_pixformat(sensor.GRAYSCALE) - for i in range(100): + for i in range(20): img = sensor.snapshot() img.binary([grayscale_thres]) img.erode(2) - for i in range(100): + for i in range(20): img = sensor.snapshot() img.binary([grayscale_thres]) img.dilate(2) + sensor.set_pixformat(sensor.RGB565) - for i in range(100): + for i in range(20): img = sensor.snapshot() img.binary([rgb565_thres]) img.erode(2) - for i in range(100): + for i in range(20): img = sensor.snapshot() img.binary([rgb565_thres]) img.dilate(2) diff --git a/usr/examples/15-Tests/test_binary_1.py b/usr/examples/04-Image-Filters/grayscale_binary_filter.py similarity index 78% rename from usr/examples/15-Tests/test_binary_1.py rename to usr/examples/04-Image-Filters/grayscale_binary_filter.py index cac1fc7bb..2c8b5d535 100644 --- a/usr/examples/15-Tests/test_binary_1.py +++ b/usr/examples/04-Image-Filters/grayscale_binary_filter.py @@ -1,9 +1,17 @@ +# Grayscale Binary Filter Example +# +# This script shows off the binary image filter. This script was originally a +# test script... but, it can be useful for showing how to use binary. + import pyb, sensor, image, math + sensor.reset() sensor.set_framesize(sensor.QVGA) sensor.set_pixformat(sensor.GRAYSCALE) + low_threshold = (0, 50) high_threshold = (205, 255) + while(True): # Test low threshold for i in range(100): diff --git a/usr/examples/04-Image-Filters/grayscale_filter.py b/usr/examples/04-Image-Filters/grayscale_filter.py new file mode 100644 index 000000000..8f4ee7493 --- /dev/null +++ b/usr/examples/04-Image-Filters/grayscale_filter.py @@ -0,0 +1,25 @@ +# Grayscale Filter Example +# +# The sensor module can preform some basic image processing while it is reading +# the image in. This example shows off how to apply grayscale thresholds. +# +# WARNING - THIS FEATURE NEEDS TO BE RE-WORKED. THE API MAY CHANGE IN THE +# FUTURE! Please use the binary function for image segmentation if possible. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(10) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +# Segment the image by following thresholds. This segmentation is done while +# the image is being read in so it does not cost any additional time... +sensor.set_image_filter(sensor.FILTER_BW, lower=128, upper=255) + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/usr/examples/04-Image-Filters/skin_filter.py b/usr/examples/04-Image-Filters/skin_filter.py new file mode 100644 index 000000000..571c364a7 --- /dev/null +++ b/usr/examples/04-Image-Filters/skin_filter.py @@ -0,0 +1,27 @@ +# Skin Filter Example +# +# The sensor module can preform some basic image processing while it is reading +# the image in. This example shows off how to apply skin thresholds. +# +# WARNING - THIS FEATURE NEEDS TO BE RE-WORKED. THE API MAY CHANGE IN THE +# FUTURE! Please use the binary function for image segmentation if possible. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(10) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +# Segment the image by following thresholds. This segmentation is done while +# the image is being read in so it does not cost any additional time... +sensor.set_image_filter(sensor.FILTER_SKIN) +# NOTE: The skin filter doesn't really work that well. We do not suggest using +# it at all. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/usr/examples/07-Face-Detection/face_detection.py b/usr/examples/07-Face-Detection/face_detection.py index 8158c03ee..a4d84c566 100644 --- a/usr/examples/07-Face-Detection/face_detection.py +++ b/usr/examples/07-Face-Detection/face_detection.py @@ -1,3 +1,16 @@ +# Face Detection Example +# +# This example shows off the built-in face detection feature of the OpenMV Cam. +# +# Face detection works by using the Haar Cascade feature detector on an image. A +# Haar Cascade is a series of simple area contrasts checks. For the built-in +# frontalface detector there are 25 stages of checks with each stage having +# hundreds of checks a piece. Haar Cascades run fast because later stages are +# only evaluated if previous stages pass. Additionally, your OpenMV Cam uses +# a data structure called the integral image to quickly execute each area +# contrast check in constant time (the reason for feature detection being +# grayscale only is because of the space requirment for the integral image). + import sensor, time, image # Reset sensor @@ -6,6 +19,7 @@ sensor.reset() # Sensor settings sensor.set_contrast(1) sensor.set_gainceiling(16) +# HQVGA and GRAYSCALE are the best for face tracking. sensor.set_framesize(sensor.HQVGA) sensor.set_pixformat(sensor.GRAYSCALE) diff --git a/usr/examples/07-Face-Detection/face_tracking.py b/usr/examples/07-Face-Detection/face_tracking.py index fac1aea07..5b7fb1be4 100644 --- a/usr/examples/07-Face-Detection/face_tracking.py +++ b/usr/examples/07-Face-Detection/face_tracking.py @@ -1,6 +1,16 @@ +# Face Tracking Example +# +# This example shows off using the keypoints feature of your OpenMV Cam to track +# a face after it has been detected by a Haar Cascade. The first part of this +# script finds a face in the image using the frontalface Haar Cascade. +# After which the script uses the keypoints feature to automatically learn your +# face and track it. Keypoints can be used to automatically track anything. +# +# NOTE: LOTS OF KEYPOINTS MAY CAUSE THE SYSTEM TO RUN OUT OF MEMORY! + import sensor, time, image -# Rotation. +# Normalized keypoints are not rotation invariant... NORMALIZED=False # Keypoint extractor threshold, range from 0 to any number. # This threshold is used when extracting keypoints, the lower diff --git a/usr/examples/08-Eye-Tracking/face_eye_detection.py b/usr/examples/08-Eye-Tracking/face_eye_detection.py index d208d90ab..45aa5bf3d 100644 --- a/usr/examples/08-Eye-Tracking/face_eye_detection.py +++ b/usr/examples/08-Eye-Tracking/face_eye_detection.py @@ -1,3 +1,9 @@ +# Face Eye Detection Example +# +# This script uses the built-in frontalface detector to find a face and then +# the eyes within the face. If you want to determine the eye gaze please see the +# iris_detection script for an example on how to do that. + import sensor, time, image # Reset sensor diff --git a/usr/examples/08-Eye-Tracking/iris_detection.py b/usr/examples/08-Eye-Tracking/iris_detection.py index 0811b0c4f..dabafccad 100644 --- a/usr/examples/08-Eye-Tracking/iris_detection.py +++ b/usr/examples/08-Eye-Tracking/iris_detection.py @@ -1,3 +1,10 @@ +# Iris Detection Example +# +# This example shows how to find the eye gaze (pupil detection) after finding +# the eyes in an image. This script uses the find_eyes function which determines +# the center point of roi that should contain a pupil. It does this by basically +# finding the center of the darkest area in the eye roi which is the pupil center. + import sensor, time, image # Reset sensor diff --git a/usr/examples/09-Feature-Detection/freak.py b/usr/examples/09-Feature-Detection/freak.py index 2bbe58a95..040a82031 100644 --- a/usr/examples/09-Feature-Detection/freak.py +++ b/usr/examples/09-Feature-Detection/freak.py @@ -1,11 +1,36 @@ +# Freak Example +# +# This script shows off keypoint tracking by itself. Put an object in front of +# your OpenMV Cam without anything else in the image (i.e. camera should be +# facing a smooth wall) and the camera will learn the keypoints for an track +# whatever object is in the image. You can save keypoints to disk either via +# the OpenMV IDE or from in your script. +# +# Matching keypoints works by first extracting keypoints from an ROI. Once those +# are extracted then the OpenMV Cam compares the extracted keypoints against all +# the keypoints in an image. It tries to find the center matching point between +# the two sets of keypoints. +# +# Keep in mind that keypoint matching with just one training example isn't very +# robust. If you want professional quality results then stick with getting +# professionally generated Haar Cascades like the frontalface or eye cascade. +# That said, if you're in a very controlled enviroment then keypoint tracking +# allows your OpenMV Cam to learn objects on the fly. +# +# If... you want really good keypoint matching results we suggest you gather +# keypoints from all faces of an object and with multiple rotations and scales. +# Comparing against all theses sets of keypoints helps versus just one. +# +# NOTE: LOTS OF KEYPOINTS MAY CAUSE THE SYSTEM TO RUN OUT OF MEMORY! + import sensor, time, image -# Rotation. +# Normalized keypoints are not rotation invariant... NORMALIZED=False # Keypoint extractor threshold, range from 0 to any number. # This threshold is used when extracting keypoints, the lower # the threshold the higher the number of keypoints extracted. -KEYPOINTS_THRESH=20 +KEYPOINTS_THRESH=30 # Keypoint-level threshold, range from 0 to 100. # This threshold is used when matching two keypoint descriptors, it's the # percentage of the distance between two descriptors to the max distance. @@ -45,8 +70,7 @@ while (True): # C[3] contains the percentage of matching keypoints. # If more than 25% of the keypoints match, draw stuff. if (c[2]>25): - img.draw_cross(c[0], c[1], size=5) - img.draw_keypoints(kpts2, color=255, size=12) + img.draw_cross(c[0], c[1], size=15) img.draw_string(0, 10, "Match %d%%"%(c[2])) # Draw FPS diff --git a/usr/examples/09-Feature-Detection/lbp.py b/usr/examples/09-Feature-Detection/lbp.py index 73a610d5e..91f97f055 100644 --- a/usr/examples/09-Feature-Detection/lbp.py +++ b/usr/examples/09-Feature-Detection/lbp.py @@ -1,3 +1,12 @@ +# Local Binary Patterns (LBP) Example +# +# This example shows off how to use the local binary pattern feature descriptor +# on your OpenMV Cam. LBP descriptors work like Freak feature descriptors. +# +# WARNING: LBP supports needs to be reworked! As of right now this feature needs +# a lot of work to be made into somethin useful. This script will reamin to show +# that the functionality exists, but, in its current state is inadequate. + import sensor, time, image sensor.reset() @@ -28,7 +37,7 @@ clock = time.clock() while (True): clock.tick() img = sensor.snapshot() - + objects = img.find_features(face_cascade, threshold=0.5, scale=1.25) if objects: face = objects[0] diff --git a/usr/examples/09-Feature-Detection/template_matching.py b/usr/examples/09-Feature-Detection/template_matching.py index f4fbc77a3..38f0cb6b8 100644 --- a/usr/examples/09-Feature-Detection/template_matching.py +++ b/usr/examples/09-Feature-Detection/template_matching.py @@ -1,3 +1,13 @@ +# Template Matching Example - Normalized Cross Correlation (NCC) +# +# This example shows off how to use the NCC feature of your OpenMV Cam to match +# image patches to parts of an image... expect for extremely controlled enviorments +# NCC is not all to useful. +# +# WARNING: NCC supports needs to be reworked! As of right now this feature needs +# a lot of work to be made into somethin useful. This script will reamin to show +# that the functionality exists, but, in its current state is inadequate. + import time, sensor, image # Reset sensor @@ -12,12 +22,12 @@ sensor.set_framesize(sensor.QQVGA) sensor.set_pixformat(sensor.GRAYSCALE) # Load template -template = image.Image("/template.pgm") +template = image.Image("/template.bmp") # Image should be like 32x32 grayscale. # Run template matching while (True): img = sensor.snapshot() + r = img.find_template(template, 0.75) - if r: + if r: img.draw_rectangle(r) - time.sleep(50) diff --git a/usr/examples/10-Color-Tracking/blob_detection.py b/usr/examples/10-Color-Tracking/blob_detection.py index afd87fbb0..6783b417e 100644 --- a/usr/examples/10-Color-Tracking/blob_detection.py +++ b/usr/examples/10-Color-Tracking/blob_detection.py @@ -1,25 +1,33 @@ -import sensor, time, pyb +# Blob Detection Example +# +# This example shows off how to use the find_blobs function to find color +# blobs in the image. This example in particular looks for dark green objects. -sensor.reset() -sensor.set_framesize(sensor.QVGA) -sensor.set_pixformat(sensor.RGB565) +import sensor, image, time -# Finds a red blob. -COLOR1 = ( 50, 55, 73, 82, 47, 63) -# Select an aera of the image and click copy color to get -# new color tracking parameters for something in the image. +# For color tracking to work really well you should ideally be in a very, very, +# very, controlled enviroment where the lighting is constant... +green_threshold = ( 0, 80, -70, -10, -0, 30) +# You may need to tweak the above settings for tracking green things... +# Select an area in the Framebuffer to copy the color settings. -clock = time.clock() -while (True): - clock.tick() - # Take snapshot - image = sensor.snapshot() +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # use RGB565. +sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed. +sensor.skip_frames(10) # Let new settings take affect. +sensor.set_whitebal(False) # turn this off. +clock = time.clock() # Tracks FPS. - # Detect blobs in image - blobs = image.find_blobs([COLOR1]) +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. - # Draw rectangles around detected blobs - for blob in blobs: - image.draw_rectangle(blob[0:4]) + blobs = img.find_blobs([green_threshold]) + if blobs: + for b in blobs: + # Draw a rect around the blob. + img.draw_rectangle(b[0:4]) # rect + img.draw_cross(b[5], b[6]) # cx, cy - print(clock.fps()) + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/usr/examples/10-Color-Tracking/line_following.py b/usr/examples/10-Color-Tracking/line_following.py new file mode 100644 index 000000000..69c9a3148 --- /dev/null +++ b/usr/examples/10-Color-Tracking/line_following.py @@ -0,0 +1,89 @@ +# Line Following Example +# +# Making a line following robot requires a lot of effort. This example script +# shows how to do the computer vision part of the line following robot. You +# can use the output from this script to drive a differential drive robot to +# follow a line. This script just generates a single turn value that tells +# your robot to go left or right. +# +# For this script to work properly you should point the camera at a line at a +# 45 or so degree angle. Please make sure that only the line is within the +# camera's field of view. + +import sensor, image, time, math + +# Tracks a white line. Use [(0, 64)] for a tracking a black line. +GRAYSCALE_THRESHOLD = [(128, 255)] + +# Each roi is (x, y, w, h). The line detection algorithm will try to find the +# centroid of the largest blob in each roi. The x position of the centroids +# will then be averaged with different weights where the most weight is assigned +# to the roi near the bottom of the image and less to the next roi and so on. +ROIS = [ # [ROI, weight] + (0, 100, 160, 20, 0.7), # You'll need to tweak the weights for you app + (0, 050, 160, 20, 0.3), # depending on how your robot is setup. + (0, 000, 160, 20, 0.1) + ] + +# Compute the weight divisor +weight_sum = 0 +for r in ROIS: weight_sum += r[4] + +# Camera setup... +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # use grayscale. +sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed. +sensor.skip_frames(10) # Let new settings take affect. +sensor.set_whitebal(False) # turn this off. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + centroid_sum = 0 + for r in ROIS: + blobs = img.find_blobs(GRAYSCALE_THRESHOLD, roi=r[0:4]) # r[0:4] is roi tuple. + merged_blobs = img.find_markers(blobs) # merge overlapping blobs + + if merged_blobs: + # Find the index of the blob with the most pixels. + most_pixels = 0 + largest_blob = 0 + for i in range(len(merged_blobs)): + if merged_blobs[i][4] > most_pixels: + most_pixels = merged_blobs[i][4] # [4] is pixels. + largest_blob = i + + # Draw a rect around the blob. + img.draw_rectangle(merged_blobs[largest_blob][0:4]) # rect + img.draw_cross(merged_blobs[largest_blob][5], # cx + merged_blobs[largest_blob][6]) # cy + + # [5] of the blob is the x centroid - r[4] is the weight. + centroid_sum += merged_blobs[largest_blob][5] * r[4] + + center_pos = (centroid_sum / weight_sum) # Determine center of line. + + # Convert the center_pos to a deflection angle. We're using a non-linear + # operation so that the response gets stronger the farther off the line we + # are. Non-linear operations are good to use on the output of algorithms + # like this to cause a response "trigger". + deflection_angle = 0 + # The 80 is from half the X res, the 60 is from half the Y res. The + # equation below is just computing the angle of a triangle where the + # opposite side of the triangle is the deviation of the center position + # from the center and the adjacent side is half the Y res. This limits + # the angle output to around -45 to 45. (It's not quite -45 and 45). + deflection_angle = -math.atan((center_pos-80)/60) + + # Convert angle in radians to degrees. + deflection_angle = math.degrees(deflection_angle) + + # Now you have an angle telling you how much to turn the robot by which + # incorporates the part of the line nearest to the robot and parts of + # the line farther away from the robot for a better prediction. + print("Turn Angle: %f" % deflection_angle) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/usr/examples/10-Color-Tracking/marker_tracking.py b/usr/examples/10-Color-Tracking/marker_tracking.py new file mode 100644 index 000000000..7a0815f2e --- /dev/null +++ b/usr/examples/10-Color-Tracking/marker_tracking.py @@ -0,0 +1,47 @@ +# Marker Tracking Example +# +# This example shows how to use the find_markers function to merge blobs for +# different colors into one blob that represents a marker. +# +# Each blob that find_blobs returns has a bit in a bitmask set for the color +# that blob was produced by which was passed to find_blobs. E.g. if you pass +# find blobs 3 colors then you'll get blobs with possibly a color value of +# (2^0), (2^1), or (2^2). These color values can be or'ed togheter because +# they are a single bit each to represent a mutli-colored blob which you +# can then classify as a marker. + +import sensor, image, time + +# For color tracking to work really well you should ideally be in a very, very, +# very, controlled enviroment where the lighting is constant. Additionally, if +# you want to track more than 2 colors you need to set the boundaries for them +# very narrowly. If you try to track... generally red, green, and blue then +# you will end up just tracking everything which you don't want. +red_threshold = ( 40, 60, 60, 90, 50, 70) +blue_threshold = ( 0, 20, -10, 30, -60, 10) +# You may need to tweak the above settings for tracking red and blue things... +# Select an area in the Framebuffer to copy the color settings. + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # use RGB565. +sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed. +sensor.skip_frames(10) # Let new settings take affect. +sensor.set_whitebal(False) # turn this off. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + blobs = img.find_blobs([red_threshold, blue_threshold]) + merged_blobs = img.find_markers(blobs) + if merged_blobs: + for b in merged_blobs: + # Draw a rect around the blob. + img.draw_rectangle(b[0:4]) # rect + img.draw_cross(b[5], b[6]) # cx, cy + # Draw the color label. b[8] is the color label. + img.draw_string(b[0]+2, b[1]+2, "%d" % b[8]) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/usr/examples/12-Thermopile-Shield/fir.py b/usr/examples/12-Thermopile-Shield/fir.py index 41138138e..d3c56a49d 100644 --- a/usr/examples/12-Thermopile-Shield/fir.py +++ b/usr/examples/12-Thermopile-Shield/fir.py @@ -1,6 +1,7 @@ # Thermopile Shield Demo # -# Note: To run this example you will need a Thermopile Shield for your OpenMV Cam. +# Note: To run this example you will need a Thermopile Shield for your OpenMV +# Cam. Also, please disable JPEG mode in the IDE. # # The Thermopile Shield allows your OpenMV Cam to see heat! diff --git a/usr/examples/12-Thermopile-Shield/fir_lcd.py b/usr/examples/12-Thermopile-Shield/fir_lcd.py index ef4a94acc..560c0ebfb 100644 --- a/usr/examples/12-Thermopile-Shield/fir_lcd.py +++ b/usr/examples/12-Thermopile-Shield/fir_lcd.py @@ -1,7 +1,9 @@ -# Thermopile Shield Demo 2 +# Thermopile Shield Demo with LCD # # Note: To run this example you will need a Thermopile Shield for your OpenMV -# Cam and a LCD Shield. +# Cam and a LCD Shield. Also, please disable JPEG mode in the IDE. +# +# The Thermopile Shield allows your OpenMV Cam to see heat! import sensor, image, time, fir, lcd diff --git a/usr/examples/14-WiFi-Shield/connect.py b/usr/examples/14-WiFi-Shield/connect.py index d442b76fa..5341e1757 100644 --- a/usr/examples/14-WiFi-Shield/connect.py +++ b/usr/examples/14-WiFi-Shield/connect.py @@ -1,10 +1,15 @@ -# Simple WiFi scan example -import time, pyb, network +# Connect Example +# +# This example shows how to connect your OpenMV Cam with a WiFi shield to the net. + +import network SSID='' # Network SSID KEY='' # Network key # Init wlan module and connect to network +print("Trying to connect... (may take a while)...") + wlan = network.WINC() wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK) diff --git a/usr/examples/14-WiFi-Shield/dns.py b/usr/examples/14-WiFi-Shield/dns.py index 41978c366..7d5ed6608 100644 --- a/usr/examples/14-WiFi-Shield/dns.py +++ b/usr/examples/14-WiFi-Shield/dns.py @@ -1,11 +1,16 @@ -# Simple DNS example -import time, pyb, network, usocket +# DNS Example +# +# This example shows how to get the IP address for websites via DNS. + +import network, usocket # AP info SSID='' # Network SSID KEY='' # Network key # Init wlan module and connect to network +print("Trying to connect... (may take a while)...") + wlan = network.WINC() wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK) diff --git a/usr/examples/14-WiFi-Shield/echo_server.py b/usr/examples/14-WiFi-Shield/echo_server.py deleted file mode 100644 index 89d7cf673..000000000 --- a/usr/examples/14-WiFi-Shield/echo_server.py +++ /dev/null @@ -1,70 +0,0 @@ -''' - Simple echo server -''' -import wlan -import socket -import select -import led, time - -SSID='' # Network SSID -KEY='' # Network key -HOST = '' # Use first available interface -PORT = 8000 # Arbitrary non-privileged port - -led.off(led.RED) -led.off(led.BLUE) -led.on(led.GREEN) - -# Init wlan module and connect to network -wlan.init() -wlan.connect(SSID, sec=wlan.WPA2, key=KEY) -led.off(led.GREEN) - -# Wait for connection to be established -while (True): - led.toggle(led.BLUE) - time.sleep(250) - led.toggle(led.BLUE) - time.sleep(250) - if wlan.connected(): - led.on(led.BLUE) - break; - -# We should have a valid IP now via DHCP -wlan.ifconfig() - -# Create server socket -s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) - -# Set socket in blocking mode -s.setblocking(True) - -# Bind and listen -s.bind((HOST, PORT)) -s.listen(5) - -while(True): - print ('Waiting for connections..') - client, addr = s.accept() - print ('Connected to ' + addr[0] + ':' + str(addr[1])) - - # Set client socket non-blocking - client.setblocking(False) - - while (True): - rfds, wfds, xfds = select.select([client], [], [client], 1.0) - if xfds: - print("socket exception") - break - elif rfds: - buf = client.recv(1024) - if len(buf) == 0: # peer has shutdown - print("socket closed") - client.close() - break - print ("recv:"+str(buf)) - client.send(buf) - elif wfds: - print ("wfds") - else: - print ("timeout") diff --git a/usr/examples/14-WiFi-Shield/fw_update.py b/usr/examples/14-WiFi-Shield/fw_update.py index 365759007..f83c82a56 100644 --- a/usr/examples/14-WiFi-Shield/fw_update.py +++ b/usr/examples/14-WiFi-Shield/fw_update.py @@ -1,13 +1,15 @@ -''' - Firmware update examples - Note: copy the WINC1500/firmware folder to uSD -''' -import time, network +# WINC Firmware Update Script +# +# To start have a successful firmware update create a "firmware" folder on the +# uSD card and but a bin file in it. The firmware update code will load that +# new firmware onto the WINC module. -# Init wlan module in Download mode +import network + +# Init wlan module in Download mode. wlan = network.WINC(True) #print("Firmware version:", wlan.fw_version()) # Start the firmware update process. wlan.fw_update() -#print("Firmware version:", wlan.fw_version()) \ No newline at end of file +#print("Firmware version:", wlan.fw_version()) diff --git a/usr/examples/14-WiFi-Shield/mjpeg_streamer.py b/usr/examples/14-WiFi-Shield/mjpeg_streamer.py index 495f93e32..3a0fc9aad 100644 --- a/usr/examples/14-WiFi-Shield/mjpeg_streamer.py +++ b/usr/examples/14-WiFi-Shield/mjpeg_streamer.py @@ -1,16 +1,15 @@ -''' - Simple MJPEG streaming server -''' -import time, sensor, pyb, network, usocket +# MJPEG Streaming +# +# This example shows off how to do MJPEG streaming to a FIREFOX webrowser +# (IE and Chrome do not work). Just input your network SSID and KEY and then +# connect to the IP address/port printed out from ifconfig. -SSID='' # Network SSID -KEY='' # Network key -HOST = '' # Use first available interface -PORT = 8000 # Arbitrary non-privileged port +import sensor, image, time, network, usocket -led_r = pyb.LED(1) -led_b = pyb.LED(2) -led_g = pyb.LED(3) +SSID='' # Network SSID +KEY='' # Network key +HOST = '' # Use first available interface +PORT = 8000 # Arbitrary non-privileged port # Reset sensor sensor.reset() @@ -24,6 +23,7 @@ sensor.set_framesize(sensor.QVGA) sensor.set_pixformat(sensor.GRAYSCALE) # Init wlan module and connect to network +print("Trying to connect... (may take a while)...") wlan = network.WINC() wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK) @@ -34,7 +34,7 @@ print(wlan.ifconfig()) s = usocket.socket(usocket.AF_INET, usocket.SOCK_STREAM) # Bind and listen -s.bind((HOST, PORT)) +s.bind([HOST, PORT]) s.listen(5) # Set timeout to 1s @@ -50,8 +50,8 @@ data = client.recv(1024) # Should parse client request here # Send multipart header -client.send("HTTP/1.1 200 OK\r\n" \ - "Server: OpenMV\r\n" \ +client.send("HTTP/1.1 200 OK\r\n" \ + "Server: OpenMV\r\n" \ "Content-Type: multipart/x-mixed-replace;boundary=openmv\r\n" \ "Cache-Control: no-cache\r\n" \ "Pragma: no-cache\r\n\r\n") @@ -62,10 +62,10 @@ clock = time.clock() while (True): clock.tick() # Track elapsed milliseconds between snapshots(). frame = sensor.snapshot() - client.send("\r\n--openmv\r\n" \ + cframe = frame.compress(35) + client.send("\r\n--openmv\r\n" \ "Content-Type: image/jpeg\r\n"\ - "Content-Length:"+str(frame.size())+"\r\n\r\n") - client.send(frame.compress(35)) + "Content-Length:"+str(cframe.size())+"\r\n\r\n") + client.send(cframe) print(clock.fps()) - -client.close() \ No newline at end of file +client.close() diff --git a/usr/examples/14-WiFi-Shield/mjpeg_streamer_fir.py b/usr/examples/14-WiFi-Shield/mjpeg_streamer_fir.py index 25ccc3a05..6cf824b3a 100644 --- a/usr/examples/14-WiFi-Shield/mjpeg_streamer_fir.py +++ b/usr/examples/14-WiFi-Shield/mjpeg_streamer_fir.py @@ -1,16 +1,15 @@ -''' - Simple MJPEG streaming server + FIR -''' -import time, sensor, pyb, network, usocket, fir +# MJPEG Streaming with FIR +# +# This example shows off how to do MJPEG streaming to a FIREFOX webrowser +# (IE and Chrome do not work). Just input your network SSID and KEY and then +# connect to the IP address/port printed out from ifconfig. -SSID='' # Network SSID -KEY='' # Network key -HOST = '' # Use first available interface -PORT = 8000 # Arbitrary non-privileged port +import sensor, image, network, usocket, fir -led_r = pyb.LED(1) -led_b = pyb.LED(2) -led_g = pyb.LED(3) +SSID='' # Network SSID +KEY='' # Network key +HOST = '' # Use first available interface +PORT = 8000 # Arbitrary non-privileged port # Reset sensor sensor.reset() @@ -20,13 +19,14 @@ sensor.set_contrast(1) sensor.set_brightness(1) sensor.set_saturation(1) sensor.set_gainceiling(16) -sensor.set_framesize(sensor.QVGA) -sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.QQVGA) +sensor.set_pixformat(sensor.RGB565) # Initialize the thermal sensor fir.init() # Init wlan module and connect to network +print("Trying to connect... (may take a while)...") wlan = network.WINC() wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK) @@ -62,6 +62,7 @@ client.send("HTTP/1.1 200 OK\r\n" \ # Start streaming images while (True): image = sensor.snapshot() + # Capture FIR data # ta: Ambient temperature # ir: Object temperatures (IR array) @@ -77,10 +78,10 @@ while (True): image.draw_string(0, 8, "To min: %0.2f"%to_min, color = (0xFF, 0x00, 0x00)) image.draw_string(0, 16, "To max: %0.2f"%to_max, color = (0xFF, 0x00, 0x00)) - - client.send("\r\n--openmv\r\n" \ + cimage = image.compress(90) + client.send("\r\n--openmv\r\n" \ "Content-Type: image/jpeg\r\n"\ - "Content-Length:"+str(image.size())+"\r\n\r\n") - client.send(image.compress(35)) - + "Content-Length:"+str(cimage.size())+"\r\n\r\n") + client.send(cimage) + client.close() diff --git a/usr/examples/14-WiFi-Shield/ntp.py b/usr/examples/14-WiFi-Shield/ntp.py index 056caa05e..1037d3ff8 100644 --- a/usr/examples/14-WiFi-Shield/ntp.py +++ b/usr/examples/14-WiFi-Shield/ntp.py @@ -1,11 +1,17 @@ -# Simple NTP client -import time, pyb, network, usocket, ustruct, utime +# NTP Example +# +# This example shows how to get the current time using NTP with the WiFi shield. + +import network, usocket, ustruct, utime SSID='' # Network SSID KEY='' # Network key + TIMESTAMP = 2208988800+946684800 # Init wlan module and connect to network +print("Trying to connect... (may take a while)...") + wlan = network.WINC() wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK) diff --git a/usr/examples/14-WiFi-Shield/scan.py b/usr/examples/14-WiFi-Shield/scan.py index 107976c45..273cc4fcb 100644 --- a/usr/examples/14-WiFi-Shield/scan.py +++ b/usr/examples/14-WiFi-Shield/scan.py @@ -1,4 +1,7 @@ -# Simple WiFi scan example +# Scan Example +# +# This example shows how to scan for networks with the WiFi shield. + import time, network wlan = network.WINC() @@ -9,4 +12,4 @@ while (True): for ap in scan_result: print("Channel:%d RSSI:%d Auth:%d BSSID:%s SSID:%s"%(ap)) print() - time.sleep(1000) \ No newline at end of file + time.sleep(1000) diff --git a/usr/examples/14-WiFi-Shield/tcp_client.py b/usr/examples/14-WiFi-Shield/tcp_client.py index e5fbc7e14..2163cdbad 100644 --- a/usr/examples/14-WiFi-Shield/tcp_client.py +++ b/usr/examples/14-WiFi-Shield/tcp_client.py @@ -1,11 +1,16 @@ -# Simple NTP client -import time, pyb, network, usocket +# TCP Client Example +# +# This example shows how to send and receive TCP traffic with the WiFi shield. + +import network, usocket # AP info SSID='' # Network SSID KEY='' # Network key # Init wlan module and connect to network +print("Trying to connect... (may take a while)...") + wlan = network.WINC() wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK) diff --git a/usr/examples/15-Tests/colorbar.py b/usr/examples/15-Tests/colorbar.py index 472570192..124957b56 100644 --- a/usr/examples/15-Tests/colorbar.py +++ b/usr/examples/15-Tests/colorbar.py @@ -1,7 +1,13 @@ +# Colorbar Test Example +# +# This example is the color bar test run by each OpenMV Cam before being allowed +# out of the factory. The OMV sensors can output a color bar image which you +# can threshold to check the the camera bus is connected correctly. + import sensor, time sensor.reset() -# Set sensor settings +# Set sensor settings sensor.set_brightness(0) sensor.set_saturation(0) sensor.set_gainceiling(8) @@ -14,11 +20,11 @@ sensor.set_pixformat(sensor.RGB565) # Enable colorbar test mode sensor.set_colorbar(True) -# Skip a few frames to allow the sensor settle down -for i in range(0, 30): +# Skip a few frames to allow the sensor settle down +for i in range(0, 100): image = sensor.snapshot() -#color bars thresholds +# Color bars thresholds t = [lambda r, g, b: r < 50 and g < 50 and b < 50, # Black lambda r, g, b: r < 50 and g < 50 and b > 200, # Blue lambda r, g, b: r > 200 and g < 50 and b < 50, # Red @@ -28,13 +34,13 @@ t = [lambda r, g, b: r < 50 and g < 50 and b < 50, # Black lambda r, g, b: r > 200 and g > 200 and b < 50, # Yellow lambda r, g, b: r > 200 and g > 200 and b > 200] # White -#320x240 image with 8 color bars each one is approx 40 pixels. -#we start from the center of the frame buffer, and average the -#values of 10 sample pixels from the center of each color bar. +# 320x240 image with 8 color bars each one is approx 40 pixels. +# we start from the center of the frame buffer, and average the +# values of 10 sample pixels from the center of each color bar. for i in range(0, 8): avg = (0, 0, 0) - idx = 40*i+20 #center of colorbars - for off in range(0, 10): #avg 10 pixels + idx = 40*i+20 # center of colorbars + for off in range(0, 10): # avg 10 pixels rgb = image.get_pixel(idx+off, 120) avg = tuple(map(sum, zip(avg, rgb))) diff --git a/usr/examples/15-Tests/selftest.py b/usr/examples/15-Tests/selftest.py index 7e97e2988..2fd1efeaa 100644 --- a/usr/examples/15-Tests/selftest.py +++ b/usr/examples/15-Tests/selftest.py @@ -1,13 +1,19 @@ +# Self Test Example +# +# This example shows how your OpenMV Cam tests itself before being allowed out +# of the factory. Every OpenMV Cam should pass this test. + import sensor, time, pyb def test_int_adc(): - adc = pyb.ADCAll(12) + + adc = pyb.ADCAll(12) # Test VBAT vbat = adc.read_core_vbat() vbat_diff = abs(vbat-3.3) if (vbat_diff > 0.1): raise Exception("INTERNAL ADC TEST FAILED VBAT=%fv"%vbat) - + # Test VREF vref = adc.read_core_vref() vref_diff = abs(vref-1.2) @@ -17,13 +23,14 @@ def test_int_adc(): print("\nINTERNAL ADC TEST PASSED...") def test_color_bars(): + sensor.reset() - # Set sensor settings + # Set sensor settings sensor.set_brightness(0) sensor.set_saturation(0) sensor.set_gainceiling(8) sensor.set_contrast(2) - + # Set sensor pixel format sensor.set_framesize(sensor.QVGA) sensor.set_pixformat(sensor.RGB565) @@ -33,10 +40,10 @@ def test_color_bars(): # Skip a few frames to allow the sensor settle down # Note: This takes more time when exec from the IDE. - for i in range(0, 10): + for i in range(0, 100): image = sensor.snapshot() - - #color bars thresholds + + # Color bars thresholds t = [lambda r, g, b: r < 50 and g < 50 and b < 50, # Black lambda r, g, b: r < 50 and g < 50 and b > 200, # Blue lambda r, g, b: r > 200 and g < 50 and b < 50, # Red @@ -45,21 +52,21 @@ def test_color_bars(): lambda r, g, b: r < 50 and g > 200 and b > 200, # Aqua lambda r, g, b: r > 200 and g > 200 and b < 50, # Yellow lambda r, g, b: r > 200 and g > 200 and b > 200] # White - - #320x240 image with 8 color bars each one is approx 40 pixels. - #we start from the center of the frame buffer, and average the - #values of 10 sample pixels from the center of each color bar. + + # 320x240 image with 8 color bars each one is approx 40 pixels. + # we start from the center of the frame buffer, and average the + # values of 10 sample pixels from the center of each color bar. for i in range(0, 8): avg = (0, 0, 0) - idx = 40*i+20 #center of colorbars - for off in range(0, 10): #avg 10 pixels + idx = 40*i+20 # center of colorbars + for off in range(0, 10): # avg 10 pixels rgb = image.get_pixel(idx+off, 120) avg = tuple(map(sum, zip(avg, rgb))) - + if not t[i](avg[0]/10, avg[1]/10, avg[2]/10): raise Exception("COLOR BARS TEST FAILED. " "BAR#(%d): RGB(%d,%d,%d)"%(i+1, avg[0]/10, avg[1]/10, avg[2]/10)) - + print("COLOR BARS TEST PASSED...") if __name__ == "__main__": diff --git a/usr/examples/15-Tests/test_save.py b/usr/examples/15-Tests/test_save.py deleted file mode 100644 index 691ee9d97..000000000 --- a/usr/examples/15-Tests/test_save.py +++ /dev/null @@ -1,111 +0,0 @@ -import pyb, sensor, image, os, time -sensor.reset() -sensor.set_framesize(sensor.QVGA) -if not "test" in os.listdir(): os.mkdir("test") -while(True): - sensor.set_pixformat(sensor.GRAYSCALE) - for i in range(2): - img = sensor.snapshot() - num = pyb.rng() - print("Saving %d" % num) - img.save("test/image-%d" % num) - # - img = sensor.snapshot() - num = pyb.rng() - print("Saving %d" % num) - img.save("test/image-%d.bmp" % num) - # - img = sensor.snapshot() - num = pyb.rng() - print("Saving %d" % num) - img.save("test/image-%d.pgm" % num) - # - img = sensor.snapshot() - num = pyb.rng() - print("Saving %d" % num) - img.save("/test/image-%d" % num) - # - img = sensor.snapshot() - num = pyb.rng() - print("Saving %d" % num) - img.save("/test/image-%d.bmp" % num) - # - img = sensor.snapshot() - num = pyb.rng() - print("Saving %d" % num) - img.save("/test/image-%d.pgm" % num) - # - sensor.set_pixformat(sensor.RGB565) - for i in range(2): - img = sensor.snapshot() - num = pyb.rng() - print("Saving %d" % num) - img.save("test/image-%d" % num) - # - img = sensor.snapshot() - num = pyb.rng() - print("Saving %d" % num) - img.save("test/image-%d.bmp" % num) - # - img = sensor.snapshot() - num = pyb.rng() - print("Saving %d" % num) - img.save("test/image-%d.ppm" % num) - # - img = sensor.snapshot() - num = pyb.rng() - print("Saving %d" % num) - img.save("/test/image-%d" % num) - # - img = sensor.snapshot() - num = pyb.rng() - print("Saving %d" % num) - img.save("/test/image-%d.bmp" % num) - # - img = sensor.snapshot() - num = pyb.rng() - print("Saving %d" % num) - img.save("/test/image-%d.ppm" % num) - # - sensor.set_pixformat(sensor.JPEG) - for i in range(2): - img = sensor.snapshot() - num = pyb.rng() - print("Saving %d" % num) - img.save("test/image-%d" % num) - # - img = sensor.snapshot() - num = pyb.rng() - print("Saving %d" % num) - img.save("test/image-%d.jpg" % num) - # - img = sensor.snapshot() - num = pyb.rng() - print("Saving %d" % num) - img.save("test/image-%d.jpeg" % num) - # - img = sensor.snapshot() - num = pyb.rng() - print("Saving %d" % num) - img.save("/test/image-%d" % num) - # - img = sensor.snapshot() - num = pyb.rng() - print("Saving %d" % num) - img.save("/test/image-%d.jpg" % num) - # - img = sensor.snapshot() - num = pyb.rng() - print("Saving %d" % num) - img.save("/test/image-%d.jpeg" % num) - # - print("Sleeping 5...") - time.sleep(1000) - print("Sleeping 4...") - time.sleep(1000) - print("Sleeping 3...") - time.sleep(1000) - print("Sleeping 2...") - time.sleep(1000) - print("Sleeping 1...") - time.sleep(1000)