mirror of
https://github.com/openmv/openmv.git
synced 2025-11-04 14:49:50 +08:00
60 Scripts.
Everything except the DAC script works. That has to be fixed. Anyway, we have a ton of example for launch. So, hopefully, comments about how to do stuff should be limited. That said, the PYB module is in a poor state still. Stuff kinda works and kinda doesn't from it. One day... There won't be any fires to put out on this project and I can stop working so hard.
This commit is contained in:
parent
bb9d79d744
commit
4c88c110cf
25
usr/examples/02-Board-Control/pwm_control.py
Normal file
25
usr/examples/02-Board-Control/pwm_control.py
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
# PWM Control Example
|
||||||
|
#
|
||||||
|
# This example shows how to do PWM with your OpenMV Cam.
|
||||||
|
#
|
||||||
|
# WARNING: PWM control is... not easy with MicroPython. You have to use
|
||||||
|
# the correct timer with the correct pins and channels. As for what the
|
||||||
|
# correct values are - who knows. If you need to change the pins from the
|
||||||
|
# example below please try out different timer/channel/pin configs.
|
||||||
|
|
||||||
|
import pyb, time
|
||||||
|
|
||||||
|
t2 = pyb.Timer(1, freq=1000)
|
||||||
|
|
||||||
|
ch1 = t2.channel(2, pyb.Timer.PWM, pin=pyb.Pin("P0"))
|
||||||
|
ch2 = t2.channel(3, pyb.Timer.PWM, pin=pyb.Pin("P1"))
|
||||||
|
|
||||||
|
while(True):
|
||||||
|
for i in range(100):
|
||||||
|
ch1.pulse_width_percent(i)
|
||||||
|
ch2.pulse_width_percent(100-i)
|
||||||
|
time.sleep(5)
|
||||||
|
for i in range(100):
|
||||||
|
ch1.pulse_width_percent(100-i)
|
||||||
|
ch2.pulse_width_percent(i)
|
||||||
|
time.sleep(5)
|
||||||
@ -1,11 +1,20 @@
|
|||||||
|
# Color Binary Filter Example
|
||||||
|
#
|
||||||
|
# This script shows off the binary image filter. This script was originally a
|
||||||
|
# test script... but, it can be useful for showing how to use binary.
|
||||||
|
|
||||||
import pyb, sensor, image, math
|
import pyb, sensor, image, math
|
||||||
|
|
||||||
sensor.reset()
|
sensor.reset()
|
||||||
sensor.set_framesize(sensor.QVGA)
|
sensor.set_framesize(sensor.QVGA)
|
||||||
sensor.set_pixformat(sensor.RGB565)
|
sensor.set_pixformat(sensor.RGB565)
|
||||||
|
|
||||||
red_threshold = (0,100, 0,127, 0,127) # L A B
|
red_threshold = (0,100, 0,127, 0,127) # L A B
|
||||||
green_threshold = (0,100, -128,0, 0,127) # L A B
|
green_threshold = (0,100, -128,0, 0,127) # L A B
|
||||||
blue_threshold = (0,100, -128,127, -128,0) # L A B
|
blue_threshold = (0,100, -128,127, -128,0) # L A B
|
||||||
|
|
||||||
while(True):
|
while(True):
|
||||||
|
|
||||||
# Test red threshold
|
# Test red threshold
|
||||||
for i in range(100):
|
for i in range(100):
|
||||||
img = sensor.snapshot()
|
img = sensor.snapshot()
|
||||||
@ -1,24 +1,35 @@
|
|||||||
import pyb, sensor, image, math
|
# Erode and Dilate Example
|
||||||
|
#
|
||||||
|
# This example shows off the erode and dilate functions which you can run on
|
||||||
|
# a binary image to remove noise. This example was originally a test but its
|
||||||
|
# useful for showing off how these functions work.
|
||||||
|
|
||||||
|
import pyb, sensor, image
|
||||||
|
|
||||||
sensor.reset()
|
sensor.reset()
|
||||||
sensor.set_framesize(sensor.QVGA)
|
sensor.set_framesize(sensor.QVGA)
|
||||||
|
|
||||||
grayscale_thres = (170, 255)
|
grayscale_thres = (170, 255)
|
||||||
rgb565_thres = (70, 100, -128, 127, -128, 127)
|
rgb565_thres = (70, 100, -128, 127, -128, 127)
|
||||||
|
|
||||||
while(True):
|
while(True):
|
||||||
|
|
||||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||||
for i in range(100):
|
for i in range(20):
|
||||||
img = sensor.snapshot()
|
img = sensor.snapshot()
|
||||||
img.binary([grayscale_thres])
|
img.binary([grayscale_thres])
|
||||||
img.erode(2)
|
img.erode(2)
|
||||||
for i in range(100):
|
for i in range(20):
|
||||||
img = sensor.snapshot()
|
img = sensor.snapshot()
|
||||||
img.binary([grayscale_thres])
|
img.binary([grayscale_thres])
|
||||||
img.dilate(2)
|
img.dilate(2)
|
||||||
|
|
||||||
sensor.set_pixformat(sensor.RGB565)
|
sensor.set_pixformat(sensor.RGB565)
|
||||||
for i in range(100):
|
for i in range(20):
|
||||||
img = sensor.snapshot()
|
img = sensor.snapshot()
|
||||||
img.binary([rgb565_thres])
|
img.binary([rgb565_thres])
|
||||||
img.erode(2)
|
img.erode(2)
|
||||||
for i in range(100):
|
for i in range(20):
|
||||||
img = sensor.snapshot()
|
img = sensor.snapshot()
|
||||||
img.binary([rgb565_thres])
|
img.binary([rgb565_thres])
|
||||||
img.dilate(2)
|
img.dilate(2)
|
||||||
@ -1,9 +1,17 @@
|
|||||||
|
# Grayscale Binary Filter Example
|
||||||
|
#
|
||||||
|
# This script shows off the binary image filter. This script was originally a
|
||||||
|
# test script... but, it can be useful for showing how to use binary.
|
||||||
|
|
||||||
import pyb, sensor, image, math
|
import pyb, sensor, image, math
|
||||||
|
|
||||||
sensor.reset()
|
sensor.reset()
|
||||||
sensor.set_framesize(sensor.QVGA)
|
sensor.set_framesize(sensor.QVGA)
|
||||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||||
|
|
||||||
low_threshold = (0, 50)
|
low_threshold = (0, 50)
|
||||||
high_threshold = (205, 255)
|
high_threshold = (205, 255)
|
||||||
|
|
||||||
while(True):
|
while(True):
|
||||||
# Test low threshold
|
# Test low threshold
|
||||||
for i in range(100):
|
for i in range(100):
|
||||||
25
usr/examples/04-Image-Filters/grayscale_filter.py
Normal file
25
usr/examples/04-Image-Filters/grayscale_filter.py
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
# Grayscale Filter Example
|
||||||
|
#
|
||||||
|
# The sensor module can preform some basic image processing while it is reading
|
||||||
|
# the image in. This example shows off how to apply grayscale thresholds.
|
||||||
|
#
|
||||||
|
# WARNING - THIS FEATURE NEEDS TO BE RE-WORKED. THE API MAY CHANGE IN THE
|
||||||
|
# FUTURE! Please use the binary function for image segmentation if possible.
|
||||||
|
|
||||||
|
import sensor, image, time
|
||||||
|
|
||||||
|
sensor.reset() # Initialize the camera sensor.
|
||||||
|
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||||
|
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
|
||||||
|
sensor.skip_frames(10) # Let new settings take affect.
|
||||||
|
clock = time.clock() # Tracks FPS.
|
||||||
|
|
||||||
|
# Segment the image by following thresholds. This segmentation is done while
|
||||||
|
# the image is being read in so it does not cost any additional time...
|
||||||
|
sensor.set_image_filter(sensor.FILTER_BW, lower=128, upper=255)
|
||||||
|
|
||||||
|
while(True):
|
||||||
|
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||||
|
img = sensor.snapshot() # Take a picture and return the image.
|
||||||
|
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||||
|
# connected to your computer. The FPS should increase once disconnected.
|
||||||
27
usr/examples/04-Image-Filters/skin_filter.py
Normal file
27
usr/examples/04-Image-Filters/skin_filter.py
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
# Skin Filter Example
|
||||||
|
#
|
||||||
|
# The sensor module can preform some basic image processing while it is reading
|
||||||
|
# the image in. This example shows off how to apply skin thresholds.
|
||||||
|
#
|
||||||
|
# WARNING - THIS FEATURE NEEDS TO BE RE-WORKED. THE API MAY CHANGE IN THE
|
||||||
|
# FUTURE! Please use the binary function for image segmentation if possible.
|
||||||
|
|
||||||
|
import sensor, image, time
|
||||||
|
|
||||||
|
sensor.reset() # Initialize the camera sensor.
|
||||||
|
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||||
|
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
|
||||||
|
sensor.skip_frames(10) # Let new settings take affect.
|
||||||
|
clock = time.clock() # Tracks FPS.
|
||||||
|
|
||||||
|
# Segment the image by following thresholds. This segmentation is done while
|
||||||
|
# the image is being read in so it does not cost any additional time...
|
||||||
|
sensor.set_image_filter(sensor.FILTER_SKIN)
|
||||||
|
# NOTE: The skin filter doesn't really work that well. We do not suggest using
|
||||||
|
# it at all.
|
||||||
|
|
||||||
|
while(True):
|
||||||
|
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||||
|
img = sensor.snapshot() # Take a picture and return the image.
|
||||||
|
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||||
|
# connected to your computer. The FPS should increase once disconnected.
|
||||||
@ -1,3 +1,16 @@
|
|||||||
|
# Face Detection Example
|
||||||
|
#
|
||||||
|
# This example shows off the built-in face detection feature of the OpenMV Cam.
|
||||||
|
#
|
||||||
|
# Face detection works by using the Haar Cascade feature detector on an image. A
|
||||||
|
# Haar Cascade is a series of simple area contrasts checks. For the built-in
|
||||||
|
# frontalface detector there are 25 stages of checks with each stage having
|
||||||
|
# hundreds of checks a piece. Haar Cascades run fast because later stages are
|
||||||
|
# only evaluated if previous stages pass. Additionally, your OpenMV Cam uses
|
||||||
|
# a data structure called the integral image to quickly execute each area
|
||||||
|
# contrast check in constant time (the reason for feature detection being
|
||||||
|
# grayscale only is because of the space requirment for the integral image).
|
||||||
|
|
||||||
import sensor, time, image
|
import sensor, time, image
|
||||||
|
|
||||||
# Reset sensor
|
# Reset sensor
|
||||||
@ -6,6 +19,7 @@ sensor.reset()
|
|||||||
# Sensor settings
|
# Sensor settings
|
||||||
sensor.set_contrast(1)
|
sensor.set_contrast(1)
|
||||||
sensor.set_gainceiling(16)
|
sensor.set_gainceiling(16)
|
||||||
|
# HQVGA and GRAYSCALE are the best for face tracking.
|
||||||
sensor.set_framesize(sensor.HQVGA)
|
sensor.set_framesize(sensor.HQVGA)
|
||||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||||
|
|
||||||
|
|||||||
@ -1,6 +1,16 @@
|
|||||||
|
# Face Tracking Example
|
||||||
|
#
|
||||||
|
# This example shows off using the keypoints feature of your OpenMV Cam to track
|
||||||
|
# a face after it has been detected by a Haar Cascade. The first part of this
|
||||||
|
# script finds a face in the image using the frontalface Haar Cascade.
|
||||||
|
# After which the script uses the keypoints feature to automatically learn your
|
||||||
|
# face and track it. Keypoints can be used to automatically track anything.
|
||||||
|
#
|
||||||
|
# NOTE: LOTS OF KEYPOINTS MAY CAUSE THE SYSTEM TO RUN OUT OF MEMORY!
|
||||||
|
|
||||||
import sensor, time, image
|
import sensor, time, image
|
||||||
|
|
||||||
# Rotation.
|
# Normalized keypoints are not rotation invariant...
|
||||||
NORMALIZED=False
|
NORMALIZED=False
|
||||||
# Keypoint extractor threshold, range from 0 to any number.
|
# Keypoint extractor threshold, range from 0 to any number.
|
||||||
# This threshold is used when extracting keypoints, the lower
|
# This threshold is used when extracting keypoints, the lower
|
||||||
|
|||||||
@ -1,3 +1,9 @@
|
|||||||
|
# Face Eye Detection Example
|
||||||
|
#
|
||||||
|
# This script uses the built-in frontalface detector to find a face and then
|
||||||
|
# the eyes within the face. If you want to determine the eye gaze please see the
|
||||||
|
# iris_detection script for an example on how to do that.
|
||||||
|
|
||||||
import sensor, time, image
|
import sensor, time, image
|
||||||
|
|
||||||
# Reset sensor
|
# Reset sensor
|
||||||
|
|||||||
@ -1,3 +1,10 @@
|
|||||||
|
# Iris Detection Example
|
||||||
|
#
|
||||||
|
# This example shows how to find the eye gaze (pupil detection) after finding
|
||||||
|
# the eyes in an image. This script uses the find_eyes function which determines
|
||||||
|
# the center point of roi that should contain a pupil. It does this by basically
|
||||||
|
# finding the center of the darkest area in the eye roi which is the pupil center.
|
||||||
|
|
||||||
import sensor, time, image
|
import sensor, time, image
|
||||||
|
|
||||||
# Reset sensor
|
# Reset sensor
|
||||||
|
|||||||
@ -1,11 +1,36 @@
|
|||||||
|
# Freak Example
|
||||||
|
#
|
||||||
|
# This script shows off keypoint tracking by itself. Put an object in front of
|
||||||
|
# your OpenMV Cam without anything else in the image (i.e. camera should be
|
||||||
|
# facing a smooth wall) and the camera will learn the keypoints for an track
|
||||||
|
# whatever object is in the image. You can save keypoints to disk either via
|
||||||
|
# the OpenMV IDE or from in your script.
|
||||||
|
#
|
||||||
|
# Matching keypoints works by first extracting keypoints from an ROI. Once those
|
||||||
|
# are extracted then the OpenMV Cam compares the extracted keypoints against all
|
||||||
|
# the keypoints in an image. It tries to find the center matching point between
|
||||||
|
# the two sets of keypoints.
|
||||||
|
#
|
||||||
|
# Keep in mind that keypoint matching with just one training example isn't very
|
||||||
|
# robust. If you want professional quality results then stick with getting
|
||||||
|
# professionally generated Haar Cascades like the frontalface or eye cascade.
|
||||||
|
# That said, if you're in a very controlled enviroment then keypoint tracking
|
||||||
|
# allows your OpenMV Cam to learn objects on the fly.
|
||||||
|
#
|
||||||
|
# If... you want really good keypoint matching results we suggest you gather
|
||||||
|
# keypoints from all faces of an object and with multiple rotations and scales.
|
||||||
|
# Comparing against all theses sets of keypoints helps versus just one.
|
||||||
|
#
|
||||||
|
# NOTE: LOTS OF KEYPOINTS MAY CAUSE THE SYSTEM TO RUN OUT OF MEMORY!
|
||||||
|
|
||||||
import sensor, time, image
|
import sensor, time, image
|
||||||
|
|
||||||
# Rotation.
|
# Normalized keypoints are not rotation invariant...
|
||||||
NORMALIZED=False
|
NORMALIZED=False
|
||||||
# Keypoint extractor threshold, range from 0 to any number.
|
# Keypoint extractor threshold, range from 0 to any number.
|
||||||
# This threshold is used when extracting keypoints, the lower
|
# This threshold is used when extracting keypoints, the lower
|
||||||
# the threshold the higher the number of keypoints extracted.
|
# the threshold the higher the number of keypoints extracted.
|
||||||
KEYPOINTS_THRESH=20
|
KEYPOINTS_THRESH=30
|
||||||
# Keypoint-level threshold, range from 0 to 100.
|
# Keypoint-level threshold, range from 0 to 100.
|
||||||
# This threshold is used when matching two keypoint descriptors, it's the
|
# This threshold is used when matching two keypoint descriptors, it's the
|
||||||
# percentage of the distance between two descriptors to the max distance.
|
# percentage of the distance between two descriptors to the max distance.
|
||||||
@ -45,8 +70,7 @@ while (True):
|
|||||||
# C[3] contains the percentage of matching keypoints.
|
# C[3] contains the percentage of matching keypoints.
|
||||||
# If more than 25% of the keypoints match, draw stuff.
|
# If more than 25% of the keypoints match, draw stuff.
|
||||||
if (c[2]>25):
|
if (c[2]>25):
|
||||||
img.draw_cross(c[0], c[1], size=5)
|
img.draw_cross(c[0], c[1], size=15)
|
||||||
img.draw_keypoints(kpts2, color=255, size=12)
|
|
||||||
img.draw_string(0, 10, "Match %d%%"%(c[2]))
|
img.draw_string(0, 10, "Match %d%%"%(c[2]))
|
||||||
|
|
||||||
# Draw FPS
|
# Draw FPS
|
||||||
|
|||||||
@ -1,3 +1,12 @@
|
|||||||
|
# Local Binary Patterns (LBP) Example
|
||||||
|
#
|
||||||
|
# This example shows off how to use the local binary pattern feature descriptor
|
||||||
|
# on your OpenMV Cam. LBP descriptors work like Freak feature descriptors.
|
||||||
|
#
|
||||||
|
# WARNING: LBP supports needs to be reworked! As of right now this feature needs
|
||||||
|
# a lot of work to be made into somethin useful. This script will reamin to show
|
||||||
|
# that the functionality exists, but, in its current state is inadequate.
|
||||||
|
|
||||||
import sensor, time, image
|
import sensor, time, image
|
||||||
sensor.reset()
|
sensor.reset()
|
||||||
|
|
||||||
@ -28,7 +37,7 @@ clock = time.clock()
|
|||||||
while (True):
|
while (True):
|
||||||
clock.tick()
|
clock.tick()
|
||||||
img = sensor.snapshot()
|
img = sensor.snapshot()
|
||||||
|
|
||||||
objects = img.find_features(face_cascade, threshold=0.5, scale=1.25)
|
objects = img.find_features(face_cascade, threshold=0.5, scale=1.25)
|
||||||
if objects:
|
if objects:
|
||||||
face = objects[0]
|
face = objects[0]
|
||||||
|
|||||||
@ -1,3 +1,13 @@
|
|||||||
|
# Template Matching Example - Normalized Cross Correlation (NCC)
|
||||||
|
#
|
||||||
|
# This example shows off how to use the NCC feature of your OpenMV Cam to match
|
||||||
|
# image patches to parts of an image... expect for extremely controlled enviorments
|
||||||
|
# NCC is not all to useful.
|
||||||
|
#
|
||||||
|
# WARNING: NCC supports needs to be reworked! As of right now this feature needs
|
||||||
|
# a lot of work to be made into somethin useful. This script will reamin to show
|
||||||
|
# that the functionality exists, but, in its current state is inadequate.
|
||||||
|
|
||||||
import time, sensor, image
|
import time, sensor, image
|
||||||
|
|
||||||
# Reset sensor
|
# Reset sensor
|
||||||
@ -12,12 +22,12 @@ sensor.set_framesize(sensor.QQVGA)
|
|||||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||||
|
|
||||||
# Load template
|
# Load template
|
||||||
template = image.Image("/template.pgm")
|
template = image.Image("/template.bmp") # Image should be like 32x32 grayscale.
|
||||||
|
|
||||||
# Run template matching
|
# Run template matching
|
||||||
while (True):
|
while (True):
|
||||||
img = sensor.snapshot()
|
img = sensor.snapshot()
|
||||||
|
|
||||||
r = img.find_template(template, 0.75)
|
r = img.find_template(template, 0.75)
|
||||||
if r:
|
if r:
|
||||||
img.draw_rectangle(r)
|
img.draw_rectangle(r)
|
||||||
time.sleep(50)
|
|
||||||
|
|||||||
@ -1,25 +1,33 @@
|
|||||||
import sensor, time, pyb
|
# Blob Detection Example
|
||||||
|
#
|
||||||
|
# This example shows off how to use the find_blobs function to find color
|
||||||
|
# blobs in the image. This example in particular looks for dark green objects.
|
||||||
|
|
||||||
sensor.reset()
|
import sensor, image, time
|
||||||
sensor.set_framesize(sensor.QVGA)
|
|
||||||
sensor.set_pixformat(sensor.RGB565)
|
|
||||||
|
|
||||||
# Finds a red blob.
|
# For color tracking to work really well you should ideally be in a very, very,
|
||||||
COLOR1 = ( 50, 55, 73, 82, 47, 63)
|
# very, controlled enviroment where the lighting is constant...
|
||||||
# Select an aera of the image and click copy color to get
|
green_threshold = ( 0, 80, -70, -10, -0, 30)
|
||||||
# new color tracking parameters for something in the image.
|
# You may need to tweak the above settings for tracking green things...
|
||||||
|
# Select an area in the Framebuffer to copy the color settings.
|
||||||
|
|
||||||
clock = time.clock()
|
sensor.reset() # Initialize the camera sensor.
|
||||||
while (True):
|
sensor.set_pixformat(sensor.RGB565) # use RGB565.
|
||||||
clock.tick()
|
sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed.
|
||||||
# Take snapshot
|
sensor.skip_frames(10) # Let new settings take affect.
|
||||||
image = sensor.snapshot()
|
sensor.set_whitebal(False) # turn this off.
|
||||||
|
clock = time.clock() # Tracks FPS.
|
||||||
|
|
||||||
# Detect blobs in image
|
while(True):
|
||||||
blobs = image.find_blobs([COLOR1])
|
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||||
|
img = sensor.snapshot() # Take a picture and return the image.
|
||||||
|
|
||||||
# Draw rectangles around detected blobs
|
blobs = img.find_blobs([green_threshold])
|
||||||
for blob in blobs:
|
if blobs:
|
||||||
image.draw_rectangle(blob[0:4])
|
for b in blobs:
|
||||||
|
# Draw a rect around the blob.
|
||||||
|
img.draw_rectangle(b[0:4]) # rect
|
||||||
|
img.draw_cross(b[5], b[6]) # cx, cy
|
||||||
|
|
||||||
print(clock.fps())
|
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||||
|
# connected to your computer. The FPS should increase once disconnected.
|
||||||
|
|||||||
89
usr/examples/10-Color-Tracking/line_following.py
Normal file
89
usr/examples/10-Color-Tracking/line_following.py
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
# Line Following Example
|
||||||
|
#
|
||||||
|
# Making a line following robot requires a lot of effort. This example script
|
||||||
|
# shows how to do the computer vision part of the line following robot. You
|
||||||
|
# can use the output from this script to drive a differential drive robot to
|
||||||
|
# follow a line. This script just generates a single turn value that tells
|
||||||
|
# your robot to go left or right.
|
||||||
|
#
|
||||||
|
# For this script to work properly you should point the camera at a line at a
|
||||||
|
# 45 or so degree angle. Please make sure that only the line is within the
|
||||||
|
# camera's field of view.
|
||||||
|
|
||||||
|
import sensor, image, time, math
|
||||||
|
|
||||||
|
# Tracks a white line. Use [(0, 64)] for a tracking a black line.
|
||||||
|
GRAYSCALE_THRESHOLD = [(128, 255)]
|
||||||
|
|
||||||
|
# Each roi is (x, y, w, h). The line detection algorithm will try to find the
|
||||||
|
# centroid of the largest blob in each roi. The x position of the centroids
|
||||||
|
# will then be averaged with different weights where the most weight is assigned
|
||||||
|
# to the roi near the bottom of the image and less to the next roi and so on.
|
||||||
|
ROIS = [ # [ROI, weight]
|
||||||
|
(0, 100, 160, 20, 0.7), # You'll need to tweak the weights for you app
|
||||||
|
(0, 050, 160, 20, 0.3), # depending on how your robot is setup.
|
||||||
|
(0, 000, 160, 20, 0.1)
|
||||||
|
]
|
||||||
|
|
||||||
|
# Compute the weight divisor
|
||||||
|
weight_sum = 0
|
||||||
|
for r in ROIS: weight_sum += r[4]
|
||||||
|
|
||||||
|
# Camera setup...
|
||||||
|
sensor.reset() # Initialize the camera sensor.
|
||||||
|
sensor.set_pixformat(sensor.GRAYSCALE) # use grayscale.
|
||||||
|
sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed.
|
||||||
|
sensor.skip_frames(10) # Let new settings take affect.
|
||||||
|
sensor.set_whitebal(False) # turn this off.
|
||||||
|
clock = time.clock() # Tracks FPS.
|
||||||
|
|
||||||
|
while(True):
|
||||||
|
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||||
|
img = sensor.snapshot() # Take a picture and return the image.
|
||||||
|
|
||||||
|
centroid_sum = 0
|
||||||
|
for r in ROIS:
|
||||||
|
blobs = img.find_blobs(GRAYSCALE_THRESHOLD, roi=r[0:4]) # r[0:4] is roi tuple.
|
||||||
|
merged_blobs = img.find_markers(blobs) # merge overlapping blobs
|
||||||
|
|
||||||
|
if merged_blobs:
|
||||||
|
# Find the index of the blob with the most pixels.
|
||||||
|
most_pixels = 0
|
||||||
|
largest_blob = 0
|
||||||
|
for i in range(len(merged_blobs)):
|
||||||
|
if merged_blobs[i][4] > most_pixels:
|
||||||
|
most_pixels = merged_blobs[i][4] # [4] is pixels.
|
||||||
|
largest_blob = i
|
||||||
|
|
||||||
|
# Draw a rect around the blob.
|
||||||
|
img.draw_rectangle(merged_blobs[largest_blob][0:4]) # rect
|
||||||
|
img.draw_cross(merged_blobs[largest_blob][5], # cx
|
||||||
|
merged_blobs[largest_blob][6]) # cy
|
||||||
|
|
||||||
|
# [5] of the blob is the x centroid - r[4] is the weight.
|
||||||
|
centroid_sum += merged_blobs[largest_blob][5] * r[4]
|
||||||
|
|
||||||
|
center_pos = (centroid_sum / weight_sum) # Determine center of line.
|
||||||
|
|
||||||
|
# Convert the center_pos to a deflection angle. We're using a non-linear
|
||||||
|
# operation so that the response gets stronger the farther off the line we
|
||||||
|
# are. Non-linear operations are good to use on the output of algorithms
|
||||||
|
# like this to cause a response "trigger".
|
||||||
|
deflection_angle = 0
|
||||||
|
# The 80 is from half the X res, the 60 is from half the Y res. The
|
||||||
|
# equation below is just computing the angle of a triangle where the
|
||||||
|
# opposite side of the triangle is the deviation of the center position
|
||||||
|
# from the center and the adjacent side is half the Y res. This limits
|
||||||
|
# the angle output to around -45 to 45. (It's not quite -45 and 45).
|
||||||
|
deflection_angle = -math.atan((center_pos-80)/60)
|
||||||
|
|
||||||
|
# Convert angle in radians to degrees.
|
||||||
|
deflection_angle = math.degrees(deflection_angle)
|
||||||
|
|
||||||
|
# Now you have an angle telling you how much to turn the robot by which
|
||||||
|
# incorporates the part of the line nearest to the robot and parts of
|
||||||
|
# the line farther away from the robot for a better prediction.
|
||||||
|
print("Turn Angle: %f" % deflection_angle)
|
||||||
|
|
||||||
|
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||||
|
# connected to your computer. The FPS should increase once disconnected.
|
||||||
47
usr/examples/10-Color-Tracking/marker_tracking.py
Normal file
47
usr/examples/10-Color-Tracking/marker_tracking.py
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
# Marker Tracking Example
|
||||||
|
#
|
||||||
|
# This example shows how to use the find_markers function to merge blobs for
|
||||||
|
# different colors into one blob that represents a marker.
|
||||||
|
#
|
||||||
|
# Each blob that find_blobs returns has a bit in a bitmask set for the color
|
||||||
|
# that blob was produced by which was passed to find_blobs. E.g. if you pass
|
||||||
|
# find blobs 3 colors then you'll get blobs with possibly a color value of
|
||||||
|
# (2^0), (2^1), or (2^2). These color values can be or'ed togheter because
|
||||||
|
# they are a single bit each to represent a mutli-colored blob which you
|
||||||
|
# can then classify as a marker.
|
||||||
|
|
||||||
|
import sensor, image, time
|
||||||
|
|
||||||
|
# For color tracking to work really well you should ideally be in a very, very,
|
||||||
|
# very, controlled enviroment where the lighting is constant. Additionally, if
|
||||||
|
# you want to track more than 2 colors you need to set the boundaries for them
|
||||||
|
# very narrowly. If you try to track... generally red, green, and blue then
|
||||||
|
# you will end up just tracking everything which you don't want.
|
||||||
|
red_threshold = ( 40, 60, 60, 90, 50, 70)
|
||||||
|
blue_threshold = ( 0, 20, -10, 30, -60, 10)
|
||||||
|
# You may need to tweak the above settings for tracking red and blue things...
|
||||||
|
# Select an area in the Framebuffer to copy the color settings.
|
||||||
|
|
||||||
|
sensor.reset() # Initialize the camera sensor.
|
||||||
|
sensor.set_pixformat(sensor.RGB565) # use RGB565.
|
||||||
|
sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed.
|
||||||
|
sensor.skip_frames(10) # Let new settings take affect.
|
||||||
|
sensor.set_whitebal(False) # turn this off.
|
||||||
|
clock = time.clock() # Tracks FPS.
|
||||||
|
|
||||||
|
while(True):
|
||||||
|
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||||
|
img = sensor.snapshot() # Take a picture and return the image.
|
||||||
|
|
||||||
|
blobs = img.find_blobs([red_threshold, blue_threshold])
|
||||||
|
merged_blobs = img.find_markers(blobs)
|
||||||
|
if merged_blobs:
|
||||||
|
for b in merged_blobs:
|
||||||
|
# Draw a rect around the blob.
|
||||||
|
img.draw_rectangle(b[0:4]) # rect
|
||||||
|
img.draw_cross(b[5], b[6]) # cx, cy
|
||||||
|
# Draw the color label. b[8] is the color label.
|
||||||
|
img.draw_string(b[0]+2, b[1]+2, "%d" % b[8])
|
||||||
|
|
||||||
|
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||||
|
# connected to your computer. The FPS should increase once disconnected.
|
||||||
@ -1,6 +1,7 @@
|
|||||||
# Thermopile Shield Demo
|
# Thermopile Shield Demo
|
||||||
#
|
#
|
||||||
# Note: To run this example you will need a Thermopile Shield for your OpenMV Cam.
|
# Note: To run this example you will need a Thermopile Shield for your OpenMV
|
||||||
|
# Cam. Also, please disable JPEG mode in the IDE.
|
||||||
#
|
#
|
||||||
# The Thermopile Shield allows your OpenMV Cam to see heat!
|
# The Thermopile Shield allows your OpenMV Cam to see heat!
|
||||||
|
|
||||||
|
|||||||
@ -1,7 +1,9 @@
|
|||||||
# Thermopile Shield Demo 2
|
# Thermopile Shield Demo with LCD
|
||||||
#
|
#
|
||||||
# Note: To run this example you will need a Thermopile Shield for your OpenMV
|
# Note: To run this example you will need a Thermopile Shield for your OpenMV
|
||||||
# Cam and a LCD Shield.
|
# Cam and a LCD Shield. Also, please disable JPEG mode in the IDE.
|
||||||
|
#
|
||||||
|
# The Thermopile Shield allows your OpenMV Cam to see heat!
|
||||||
|
|
||||||
import sensor, image, time, fir, lcd
|
import sensor, image, time, fir, lcd
|
||||||
|
|
||||||
|
|||||||
@ -1,10 +1,15 @@
|
|||||||
# Simple WiFi scan example
|
# Connect Example
|
||||||
import time, pyb, network
|
#
|
||||||
|
# This example shows how to connect your OpenMV Cam with a WiFi shield to the net.
|
||||||
|
|
||||||
|
import network
|
||||||
|
|
||||||
SSID='' # Network SSID
|
SSID='' # Network SSID
|
||||||
KEY='' # Network key
|
KEY='' # Network key
|
||||||
|
|
||||||
# Init wlan module and connect to network
|
# Init wlan module and connect to network
|
||||||
|
print("Trying to connect... (may take a while)...")
|
||||||
|
|
||||||
wlan = network.WINC()
|
wlan = network.WINC()
|
||||||
wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK)
|
wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK)
|
||||||
|
|
||||||
|
|||||||
@ -1,11 +1,16 @@
|
|||||||
# Simple DNS example
|
# DNS Example
|
||||||
import time, pyb, network, usocket
|
#
|
||||||
|
# This example shows how to get the IP address for websites via DNS.
|
||||||
|
|
||||||
|
import network, usocket
|
||||||
|
|
||||||
# AP info
|
# AP info
|
||||||
SSID='' # Network SSID
|
SSID='' # Network SSID
|
||||||
KEY='' # Network key
|
KEY='' # Network key
|
||||||
|
|
||||||
# Init wlan module and connect to network
|
# Init wlan module and connect to network
|
||||||
|
print("Trying to connect... (may take a while)...")
|
||||||
|
|
||||||
wlan = network.WINC()
|
wlan = network.WINC()
|
||||||
wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK)
|
wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK)
|
||||||
|
|
||||||
|
|||||||
@ -1,70 +0,0 @@
|
|||||||
'''
|
|
||||||
Simple echo server
|
|
||||||
'''
|
|
||||||
import wlan
|
|
||||||
import socket
|
|
||||||
import select
|
|
||||||
import led, time
|
|
||||||
|
|
||||||
SSID='' # Network SSID
|
|
||||||
KEY='' # Network key
|
|
||||||
HOST = '' # Use first available interface
|
|
||||||
PORT = 8000 # Arbitrary non-privileged port
|
|
||||||
|
|
||||||
led.off(led.RED)
|
|
||||||
led.off(led.BLUE)
|
|
||||||
led.on(led.GREEN)
|
|
||||||
|
|
||||||
# Init wlan module and connect to network
|
|
||||||
wlan.init()
|
|
||||||
wlan.connect(SSID, sec=wlan.WPA2, key=KEY)
|
|
||||||
led.off(led.GREEN)
|
|
||||||
|
|
||||||
# Wait for connection to be established
|
|
||||||
while (True):
|
|
||||||
led.toggle(led.BLUE)
|
|
||||||
time.sleep(250)
|
|
||||||
led.toggle(led.BLUE)
|
|
||||||
time.sleep(250)
|
|
||||||
if wlan.connected():
|
|
||||||
led.on(led.BLUE)
|
|
||||||
break;
|
|
||||||
|
|
||||||
# We should have a valid IP now via DHCP
|
|
||||||
wlan.ifconfig()
|
|
||||||
|
|
||||||
# Create server socket
|
|
||||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
|
|
||||||
|
|
||||||
# Set socket in blocking mode
|
|
||||||
s.setblocking(True)
|
|
||||||
|
|
||||||
# Bind and listen
|
|
||||||
s.bind((HOST, PORT))
|
|
||||||
s.listen(5)
|
|
||||||
|
|
||||||
while(True):
|
|
||||||
print ('Waiting for connections..')
|
|
||||||
client, addr = s.accept()
|
|
||||||
print ('Connected to ' + addr[0] + ':' + str(addr[1]))
|
|
||||||
|
|
||||||
# Set client socket non-blocking
|
|
||||||
client.setblocking(False)
|
|
||||||
|
|
||||||
while (True):
|
|
||||||
rfds, wfds, xfds = select.select([client], [], [client], 1.0)
|
|
||||||
if xfds:
|
|
||||||
print("socket exception")
|
|
||||||
break
|
|
||||||
elif rfds:
|
|
||||||
buf = client.recv(1024)
|
|
||||||
if len(buf) == 0: # peer has shutdown
|
|
||||||
print("socket closed")
|
|
||||||
client.close()
|
|
||||||
break
|
|
||||||
print ("recv:"+str(buf))
|
|
||||||
client.send(buf)
|
|
||||||
elif wfds:
|
|
||||||
print ("wfds")
|
|
||||||
else:
|
|
||||||
print ("timeout")
|
|
||||||
@ -1,13 +1,15 @@
|
|||||||
'''
|
# WINC Firmware Update Script
|
||||||
Firmware update examples
|
#
|
||||||
Note: copy the WINC1500/firmware folder to uSD
|
# To start have a successful firmware update create a "firmware" folder on the
|
||||||
'''
|
# uSD card and but a bin file in it. The firmware update code will load that
|
||||||
import time, network
|
# new firmware onto the WINC module.
|
||||||
|
|
||||||
# Init wlan module in Download mode
|
import network
|
||||||
|
|
||||||
|
# Init wlan module in Download mode.
|
||||||
wlan = network.WINC(True)
|
wlan = network.WINC(True)
|
||||||
#print("Firmware version:", wlan.fw_version())
|
#print("Firmware version:", wlan.fw_version())
|
||||||
|
|
||||||
# Start the firmware update process.
|
# Start the firmware update process.
|
||||||
wlan.fw_update()
|
wlan.fw_update()
|
||||||
#print("Firmware version:", wlan.fw_version())
|
#print("Firmware version:", wlan.fw_version())
|
||||||
|
|||||||
@ -1,16 +1,15 @@
|
|||||||
'''
|
# MJPEG Streaming
|
||||||
Simple MJPEG streaming server
|
#
|
||||||
'''
|
# This example shows off how to do MJPEG streaming to a FIREFOX webrowser
|
||||||
import time, sensor, pyb, network, usocket
|
# (IE and Chrome do not work). Just input your network SSID and KEY and then
|
||||||
|
# connect to the IP address/port printed out from ifconfig.
|
||||||
|
|
||||||
SSID='' # Network SSID
|
import sensor, image, time, network, usocket
|
||||||
KEY='' # Network key
|
|
||||||
HOST = '' # Use first available interface
|
|
||||||
PORT = 8000 # Arbitrary non-privileged port
|
|
||||||
|
|
||||||
led_r = pyb.LED(1)
|
SSID='' # Network SSID
|
||||||
led_b = pyb.LED(2)
|
KEY='' # Network key
|
||||||
led_g = pyb.LED(3)
|
HOST = '' # Use first available interface
|
||||||
|
PORT = 8000 # Arbitrary non-privileged port
|
||||||
|
|
||||||
# Reset sensor
|
# Reset sensor
|
||||||
sensor.reset()
|
sensor.reset()
|
||||||
@ -24,6 +23,7 @@ sensor.set_framesize(sensor.QVGA)
|
|||||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||||
|
|
||||||
# Init wlan module and connect to network
|
# Init wlan module and connect to network
|
||||||
|
print("Trying to connect... (may take a while)...")
|
||||||
wlan = network.WINC()
|
wlan = network.WINC()
|
||||||
wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK)
|
wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK)
|
||||||
|
|
||||||
@ -34,7 +34,7 @@ print(wlan.ifconfig())
|
|||||||
s = usocket.socket(usocket.AF_INET, usocket.SOCK_STREAM)
|
s = usocket.socket(usocket.AF_INET, usocket.SOCK_STREAM)
|
||||||
|
|
||||||
# Bind and listen
|
# Bind and listen
|
||||||
s.bind((HOST, PORT))
|
s.bind([HOST, PORT])
|
||||||
s.listen(5)
|
s.listen(5)
|
||||||
|
|
||||||
# Set timeout to 1s
|
# Set timeout to 1s
|
||||||
@ -50,8 +50,8 @@ data = client.recv(1024)
|
|||||||
# Should parse client request here
|
# Should parse client request here
|
||||||
|
|
||||||
# Send multipart header
|
# Send multipart header
|
||||||
client.send("HTTP/1.1 200 OK\r\n" \
|
client.send("HTTP/1.1 200 OK\r\n" \
|
||||||
"Server: OpenMV\r\n" \
|
"Server: OpenMV\r\n" \
|
||||||
"Content-Type: multipart/x-mixed-replace;boundary=openmv\r\n" \
|
"Content-Type: multipart/x-mixed-replace;boundary=openmv\r\n" \
|
||||||
"Cache-Control: no-cache\r\n" \
|
"Cache-Control: no-cache\r\n" \
|
||||||
"Pragma: no-cache\r\n\r\n")
|
"Pragma: no-cache\r\n\r\n")
|
||||||
@ -62,10 +62,10 @@ clock = time.clock()
|
|||||||
while (True):
|
while (True):
|
||||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||||
frame = sensor.snapshot()
|
frame = sensor.snapshot()
|
||||||
client.send("\r\n--openmv\r\n" \
|
cframe = frame.compress(35)
|
||||||
|
client.send("\r\n--openmv\r\n" \
|
||||||
"Content-Type: image/jpeg\r\n"\
|
"Content-Type: image/jpeg\r\n"\
|
||||||
"Content-Length:"+str(frame.size())+"\r\n\r\n")
|
"Content-Length:"+str(cframe.size())+"\r\n\r\n")
|
||||||
client.send(frame.compress(35))
|
client.send(cframe)
|
||||||
print(clock.fps())
|
print(clock.fps())
|
||||||
|
client.close()
|
||||||
client.close()
|
|
||||||
|
|||||||
@ -1,16 +1,15 @@
|
|||||||
'''
|
# MJPEG Streaming with FIR
|
||||||
Simple MJPEG streaming server + FIR
|
#
|
||||||
'''
|
# This example shows off how to do MJPEG streaming to a FIREFOX webrowser
|
||||||
import time, sensor, pyb, network, usocket, fir
|
# (IE and Chrome do not work). Just input your network SSID and KEY and then
|
||||||
|
# connect to the IP address/port printed out from ifconfig.
|
||||||
|
|
||||||
SSID='' # Network SSID
|
import sensor, image, network, usocket, fir
|
||||||
KEY='' # Network key
|
|
||||||
HOST = '' # Use first available interface
|
|
||||||
PORT = 8000 # Arbitrary non-privileged port
|
|
||||||
|
|
||||||
led_r = pyb.LED(1)
|
SSID='' # Network SSID
|
||||||
led_b = pyb.LED(2)
|
KEY='' # Network key
|
||||||
led_g = pyb.LED(3)
|
HOST = '' # Use first available interface
|
||||||
|
PORT = 8000 # Arbitrary non-privileged port
|
||||||
|
|
||||||
# Reset sensor
|
# Reset sensor
|
||||||
sensor.reset()
|
sensor.reset()
|
||||||
@ -20,13 +19,14 @@ sensor.set_contrast(1)
|
|||||||
sensor.set_brightness(1)
|
sensor.set_brightness(1)
|
||||||
sensor.set_saturation(1)
|
sensor.set_saturation(1)
|
||||||
sensor.set_gainceiling(16)
|
sensor.set_gainceiling(16)
|
||||||
sensor.set_framesize(sensor.QVGA)
|
sensor.set_framesize(sensor.QQVGA)
|
||||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
sensor.set_pixformat(sensor.RGB565)
|
||||||
|
|
||||||
# Initialize the thermal sensor
|
# Initialize the thermal sensor
|
||||||
fir.init()
|
fir.init()
|
||||||
|
|
||||||
# Init wlan module and connect to network
|
# Init wlan module and connect to network
|
||||||
|
print("Trying to connect... (may take a while)...")
|
||||||
wlan = network.WINC()
|
wlan = network.WINC()
|
||||||
wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK)
|
wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK)
|
||||||
|
|
||||||
@ -62,6 +62,7 @@ client.send("HTTP/1.1 200 OK\r\n" \
|
|||||||
# Start streaming images
|
# Start streaming images
|
||||||
while (True):
|
while (True):
|
||||||
image = sensor.snapshot()
|
image = sensor.snapshot()
|
||||||
|
|
||||||
# Capture FIR data
|
# Capture FIR data
|
||||||
# ta: Ambient temperature
|
# ta: Ambient temperature
|
||||||
# ir: Object temperatures (IR array)
|
# ir: Object temperatures (IR array)
|
||||||
@ -77,10 +78,10 @@ while (True):
|
|||||||
image.draw_string(0, 8, "To min: %0.2f"%to_min, color = (0xFF, 0x00, 0x00))
|
image.draw_string(0, 8, "To min: %0.2f"%to_min, color = (0xFF, 0x00, 0x00))
|
||||||
image.draw_string(0, 16, "To max: %0.2f"%to_max, color = (0xFF, 0x00, 0x00))
|
image.draw_string(0, 16, "To max: %0.2f"%to_max, color = (0xFF, 0x00, 0x00))
|
||||||
|
|
||||||
|
cimage = image.compress(90)
|
||||||
client.send("\r\n--openmv\r\n" \
|
client.send("\r\n--openmv\r\n" \
|
||||||
"Content-Type: image/jpeg\r\n"\
|
"Content-Type: image/jpeg\r\n"\
|
||||||
"Content-Length:"+str(image.size())+"\r\n\r\n")
|
"Content-Length:"+str(cimage.size())+"\r\n\r\n")
|
||||||
client.send(image.compress(35))
|
client.send(cimage)
|
||||||
|
|
||||||
client.close()
|
client.close()
|
||||||
|
|||||||
@ -1,11 +1,17 @@
|
|||||||
# Simple NTP client
|
# NTP Example
|
||||||
import time, pyb, network, usocket, ustruct, utime
|
#
|
||||||
|
# This example shows how to get the current time using NTP with the WiFi shield.
|
||||||
|
|
||||||
|
import network, usocket, ustruct, utime
|
||||||
|
|
||||||
SSID='' # Network SSID
|
SSID='' # Network SSID
|
||||||
KEY='' # Network key
|
KEY='' # Network key
|
||||||
|
|
||||||
TIMESTAMP = 2208988800+946684800
|
TIMESTAMP = 2208988800+946684800
|
||||||
|
|
||||||
# Init wlan module and connect to network
|
# Init wlan module and connect to network
|
||||||
|
print("Trying to connect... (may take a while)...")
|
||||||
|
|
||||||
wlan = network.WINC()
|
wlan = network.WINC()
|
||||||
wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK)
|
wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK)
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,7 @@
|
|||||||
# Simple WiFi scan example
|
# Scan Example
|
||||||
|
#
|
||||||
|
# This example shows how to scan for networks with the WiFi shield.
|
||||||
|
|
||||||
import time, network
|
import time, network
|
||||||
|
|
||||||
wlan = network.WINC()
|
wlan = network.WINC()
|
||||||
@ -9,4 +12,4 @@ while (True):
|
|||||||
for ap in scan_result:
|
for ap in scan_result:
|
||||||
print("Channel:%d RSSI:%d Auth:%d BSSID:%s SSID:%s"%(ap))
|
print("Channel:%d RSSI:%d Auth:%d BSSID:%s SSID:%s"%(ap))
|
||||||
print()
|
print()
|
||||||
time.sleep(1000)
|
time.sleep(1000)
|
||||||
|
|||||||
@ -1,11 +1,16 @@
|
|||||||
# Simple NTP client
|
# TCP Client Example
|
||||||
import time, pyb, network, usocket
|
#
|
||||||
|
# This example shows how to send and receive TCP traffic with the WiFi shield.
|
||||||
|
|
||||||
|
import network, usocket
|
||||||
|
|
||||||
# AP info
|
# AP info
|
||||||
SSID='' # Network SSID
|
SSID='' # Network SSID
|
||||||
KEY='' # Network key
|
KEY='' # Network key
|
||||||
|
|
||||||
# Init wlan module and connect to network
|
# Init wlan module and connect to network
|
||||||
|
print("Trying to connect... (may take a while)...")
|
||||||
|
|
||||||
wlan = network.WINC()
|
wlan = network.WINC()
|
||||||
wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK)
|
wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK)
|
||||||
|
|
||||||
|
|||||||
@ -1,7 +1,13 @@
|
|||||||
|
# Colorbar Test Example
|
||||||
|
#
|
||||||
|
# This example is the color bar test run by each OpenMV Cam before being allowed
|
||||||
|
# out of the factory. The OMV sensors can output a color bar image which you
|
||||||
|
# can threshold to check the the camera bus is connected correctly.
|
||||||
|
|
||||||
import sensor, time
|
import sensor, time
|
||||||
|
|
||||||
sensor.reset()
|
sensor.reset()
|
||||||
# Set sensor settings
|
# Set sensor settings
|
||||||
sensor.set_brightness(0)
|
sensor.set_brightness(0)
|
||||||
sensor.set_saturation(0)
|
sensor.set_saturation(0)
|
||||||
sensor.set_gainceiling(8)
|
sensor.set_gainceiling(8)
|
||||||
@ -14,11 +20,11 @@ sensor.set_pixformat(sensor.RGB565)
|
|||||||
# Enable colorbar test mode
|
# Enable colorbar test mode
|
||||||
sensor.set_colorbar(True)
|
sensor.set_colorbar(True)
|
||||||
|
|
||||||
# Skip a few frames to allow the sensor settle down
|
# Skip a few frames to allow the sensor settle down
|
||||||
for i in range(0, 30):
|
for i in range(0, 100):
|
||||||
image = sensor.snapshot()
|
image = sensor.snapshot()
|
||||||
|
|
||||||
#color bars thresholds
|
# Color bars thresholds
|
||||||
t = [lambda r, g, b: r < 50 and g < 50 and b < 50, # Black
|
t = [lambda r, g, b: r < 50 and g < 50 and b < 50, # Black
|
||||||
lambda r, g, b: r < 50 and g < 50 and b > 200, # Blue
|
lambda r, g, b: r < 50 and g < 50 and b > 200, # Blue
|
||||||
lambda r, g, b: r > 200 and g < 50 and b < 50, # Red
|
lambda r, g, b: r > 200 and g < 50 and b < 50, # Red
|
||||||
@ -28,13 +34,13 @@ t = [lambda r, g, b: r < 50 and g < 50 and b < 50, # Black
|
|||||||
lambda r, g, b: r > 200 and g > 200 and b < 50, # Yellow
|
lambda r, g, b: r > 200 and g > 200 and b < 50, # Yellow
|
||||||
lambda r, g, b: r > 200 and g > 200 and b > 200] # White
|
lambda r, g, b: r > 200 and g > 200 and b > 200] # White
|
||||||
|
|
||||||
#320x240 image with 8 color bars each one is approx 40 pixels.
|
# 320x240 image with 8 color bars each one is approx 40 pixels.
|
||||||
#we start from the center of the frame buffer, and average the
|
# we start from the center of the frame buffer, and average the
|
||||||
#values of 10 sample pixels from the center of each color bar.
|
# values of 10 sample pixels from the center of each color bar.
|
||||||
for i in range(0, 8):
|
for i in range(0, 8):
|
||||||
avg = (0, 0, 0)
|
avg = (0, 0, 0)
|
||||||
idx = 40*i+20 #center of colorbars
|
idx = 40*i+20 # center of colorbars
|
||||||
for off in range(0, 10): #avg 10 pixels
|
for off in range(0, 10): # avg 10 pixels
|
||||||
rgb = image.get_pixel(idx+off, 120)
|
rgb = image.get_pixel(idx+off, 120)
|
||||||
avg = tuple(map(sum, zip(avg, rgb)))
|
avg = tuple(map(sum, zip(avg, rgb)))
|
||||||
|
|
||||||
|
|||||||
@ -1,13 +1,19 @@
|
|||||||
|
# Self Test Example
|
||||||
|
#
|
||||||
|
# This example shows how your OpenMV Cam tests itself before being allowed out
|
||||||
|
# of the factory. Every OpenMV Cam should pass this test.
|
||||||
|
|
||||||
import sensor, time, pyb
|
import sensor, time, pyb
|
||||||
|
|
||||||
def test_int_adc():
|
def test_int_adc():
|
||||||
adc = pyb.ADCAll(12)
|
|
||||||
|
adc = pyb.ADCAll(12)
|
||||||
# Test VBAT
|
# Test VBAT
|
||||||
vbat = adc.read_core_vbat()
|
vbat = adc.read_core_vbat()
|
||||||
vbat_diff = abs(vbat-3.3)
|
vbat_diff = abs(vbat-3.3)
|
||||||
if (vbat_diff > 0.1):
|
if (vbat_diff > 0.1):
|
||||||
raise Exception("INTERNAL ADC TEST FAILED VBAT=%fv"%vbat)
|
raise Exception("INTERNAL ADC TEST FAILED VBAT=%fv"%vbat)
|
||||||
|
|
||||||
# Test VREF
|
# Test VREF
|
||||||
vref = adc.read_core_vref()
|
vref = adc.read_core_vref()
|
||||||
vref_diff = abs(vref-1.2)
|
vref_diff = abs(vref-1.2)
|
||||||
@ -17,13 +23,14 @@ def test_int_adc():
|
|||||||
print("\nINTERNAL ADC TEST PASSED...")
|
print("\nINTERNAL ADC TEST PASSED...")
|
||||||
|
|
||||||
def test_color_bars():
|
def test_color_bars():
|
||||||
|
|
||||||
sensor.reset()
|
sensor.reset()
|
||||||
# Set sensor settings
|
# Set sensor settings
|
||||||
sensor.set_brightness(0)
|
sensor.set_brightness(0)
|
||||||
sensor.set_saturation(0)
|
sensor.set_saturation(0)
|
||||||
sensor.set_gainceiling(8)
|
sensor.set_gainceiling(8)
|
||||||
sensor.set_contrast(2)
|
sensor.set_contrast(2)
|
||||||
|
|
||||||
# Set sensor pixel format
|
# Set sensor pixel format
|
||||||
sensor.set_framesize(sensor.QVGA)
|
sensor.set_framesize(sensor.QVGA)
|
||||||
sensor.set_pixformat(sensor.RGB565)
|
sensor.set_pixformat(sensor.RGB565)
|
||||||
@ -33,10 +40,10 @@ def test_color_bars():
|
|||||||
|
|
||||||
# Skip a few frames to allow the sensor settle down
|
# Skip a few frames to allow the sensor settle down
|
||||||
# Note: This takes more time when exec from the IDE.
|
# Note: This takes more time when exec from the IDE.
|
||||||
for i in range(0, 10):
|
for i in range(0, 100):
|
||||||
image = sensor.snapshot()
|
image = sensor.snapshot()
|
||||||
|
|
||||||
#color bars thresholds
|
# Color bars thresholds
|
||||||
t = [lambda r, g, b: r < 50 and g < 50 and b < 50, # Black
|
t = [lambda r, g, b: r < 50 and g < 50 and b < 50, # Black
|
||||||
lambda r, g, b: r < 50 and g < 50 and b > 200, # Blue
|
lambda r, g, b: r < 50 and g < 50 and b > 200, # Blue
|
||||||
lambda r, g, b: r > 200 and g < 50 and b < 50, # Red
|
lambda r, g, b: r > 200 and g < 50 and b < 50, # Red
|
||||||
@ -45,21 +52,21 @@ def test_color_bars():
|
|||||||
lambda r, g, b: r < 50 and g > 200 and b > 200, # Aqua
|
lambda r, g, b: r < 50 and g > 200 and b > 200, # Aqua
|
||||||
lambda r, g, b: r > 200 and g > 200 and b < 50, # Yellow
|
lambda r, g, b: r > 200 and g > 200 and b < 50, # Yellow
|
||||||
lambda r, g, b: r > 200 and g > 200 and b > 200] # White
|
lambda r, g, b: r > 200 and g > 200 and b > 200] # White
|
||||||
|
|
||||||
#320x240 image with 8 color bars each one is approx 40 pixels.
|
# 320x240 image with 8 color bars each one is approx 40 pixels.
|
||||||
#we start from the center of the frame buffer, and average the
|
# we start from the center of the frame buffer, and average the
|
||||||
#values of 10 sample pixels from the center of each color bar.
|
# values of 10 sample pixels from the center of each color bar.
|
||||||
for i in range(0, 8):
|
for i in range(0, 8):
|
||||||
avg = (0, 0, 0)
|
avg = (0, 0, 0)
|
||||||
idx = 40*i+20 #center of colorbars
|
idx = 40*i+20 # center of colorbars
|
||||||
for off in range(0, 10): #avg 10 pixels
|
for off in range(0, 10): # avg 10 pixels
|
||||||
rgb = image.get_pixel(idx+off, 120)
|
rgb = image.get_pixel(idx+off, 120)
|
||||||
avg = tuple(map(sum, zip(avg, rgb)))
|
avg = tuple(map(sum, zip(avg, rgb)))
|
||||||
|
|
||||||
if not t[i](avg[0]/10, avg[1]/10, avg[2]/10):
|
if not t[i](avg[0]/10, avg[1]/10, avg[2]/10):
|
||||||
raise Exception("COLOR BARS TEST FAILED. "
|
raise Exception("COLOR BARS TEST FAILED. "
|
||||||
"BAR#(%d): RGB(%d,%d,%d)"%(i+1, avg[0]/10, avg[1]/10, avg[2]/10))
|
"BAR#(%d): RGB(%d,%d,%d)"%(i+1, avg[0]/10, avg[1]/10, avg[2]/10))
|
||||||
|
|
||||||
print("COLOR BARS TEST PASSED...")
|
print("COLOR BARS TEST PASSED...")
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|||||||
@ -1,111 +0,0 @@
|
|||||||
import pyb, sensor, image, os, time
|
|
||||||
sensor.reset()
|
|
||||||
sensor.set_framesize(sensor.QVGA)
|
|
||||||
if not "test" in os.listdir(): os.mkdir("test")
|
|
||||||
while(True):
|
|
||||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
|
||||||
for i in range(2):
|
|
||||||
img = sensor.snapshot()
|
|
||||||
num = pyb.rng()
|
|
||||||
print("Saving %d" % num)
|
|
||||||
img.save("test/image-%d" % num)
|
|
||||||
#
|
|
||||||
img = sensor.snapshot()
|
|
||||||
num = pyb.rng()
|
|
||||||
print("Saving %d" % num)
|
|
||||||
img.save("test/image-%d.bmp" % num)
|
|
||||||
#
|
|
||||||
img = sensor.snapshot()
|
|
||||||
num = pyb.rng()
|
|
||||||
print("Saving %d" % num)
|
|
||||||
img.save("test/image-%d.pgm" % num)
|
|
||||||
#
|
|
||||||
img = sensor.snapshot()
|
|
||||||
num = pyb.rng()
|
|
||||||
print("Saving %d" % num)
|
|
||||||
img.save("/test/image-%d" % num)
|
|
||||||
#
|
|
||||||
img = sensor.snapshot()
|
|
||||||
num = pyb.rng()
|
|
||||||
print("Saving %d" % num)
|
|
||||||
img.save("/test/image-%d.bmp" % num)
|
|
||||||
#
|
|
||||||
img = sensor.snapshot()
|
|
||||||
num = pyb.rng()
|
|
||||||
print("Saving %d" % num)
|
|
||||||
img.save("/test/image-%d.pgm" % num)
|
|
||||||
#
|
|
||||||
sensor.set_pixformat(sensor.RGB565)
|
|
||||||
for i in range(2):
|
|
||||||
img = sensor.snapshot()
|
|
||||||
num = pyb.rng()
|
|
||||||
print("Saving %d" % num)
|
|
||||||
img.save("test/image-%d" % num)
|
|
||||||
#
|
|
||||||
img = sensor.snapshot()
|
|
||||||
num = pyb.rng()
|
|
||||||
print("Saving %d" % num)
|
|
||||||
img.save("test/image-%d.bmp" % num)
|
|
||||||
#
|
|
||||||
img = sensor.snapshot()
|
|
||||||
num = pyb.rng()
|
|
||||||
print("Saving %d" % num)
|
|
||||||
img.save("test/image-%d.ppm" % num)
|
|
||||||
#
|
|
||||||
img = sensor.snapshot()
|
|
||||||
num = pyb.rng()
|
|
||||||
print("Saving %d" % num)
|
|
||||||
img.save("/test/image-%d" % num)
|
|
||||||
#
|
|
||||||
img = sensor.snapshot()
|
|
||||||
num = pyb.rng()
|
|
||||||
print("Saving %d" % num)
|
|
||||||
img.save("/test/image-%d.bmp" % num)
|
|
||||||
#
|
|
||||||
img = sensor.snapshot()
|
|
||||||
num = pyb.rng()
|
|
||||||
print("Saving %d" % num)
|
|
||||||
img.save("/test/image-%d.ppm" % num)
|
|
||||||
#
|
|
||||||
sensor.set_pixformat(sensor.JPEG)
|
|
||||||
for i in range(2):
|
|
||||||
img = sensor.snapshot()
|
|
||||||
num = pyb.rng()
|
|
||||||
print("Saving %d" % num)
|
|
||||||
img.save("test/image-%d" % num)
|
|
||||||
#
|
|
||||||
img = sensor.snapshot()
|
|
||||||
num = pyb.rng()
|
|
||||||
print("Saving %d" % num)
|
|
||||||
img.save("test/image-%d.jpg" % num)
|
|
||||||
#
|
|
||||||
img = sensor.snapshot()
|
|
||||||
num = pyb.rng()
|
|
||||||
print("Saving %d" % num)
|
|
||||||
img.save("test/image-%d.jpeg" % num)
|
|
||||||
#
|
|
||||||
img = sensor.snapshot()
|
|
||||||
num = pyb.rng()
|
|
||||||
print("Saving %d" % num)
|
|
||||||
img.save("/test/image-%d" % num)
|
|
||||||
#
|
|
||||||
img = sensor.snapshot()
|
|
||||||
num = pyb.rng()
|
|
||||||
print("Saving %d" % num)
|
|
||||||
img.save("/test/image-%d.jpg" % num)
|
|
||||||
#
|
|
||||||
img = sensor.snapshot()
|
|
||||||
num = pyb.rng()
|
|
||||||
print("Saving %d" % num)
|
|
||||||
img.save("/test/image-%d.jpeg" % num)
|
|
||||||
#
|
|
||||||
print("Sleeping 5...")
|
|
||||||
time.sleep(1000)
|
|
||||||
print("Sleeping 4...")
|
|
||||||
time.sleep(1000)
|
|
||||||
print("Sleeping 3...")
|
|
||||||
time.sleep(1000)
|
|
||||||
print("Sleeping 2...")
|
|
||||||
time.sleep(1000)
|
|
||||||
print("Sleeping 1...")
|
|
||||||
time.sleep(1000)
|
|
||||||
Loading…
Reference in New Issue
Block a user