mirror of
https://github.com/openmv/openmv.git
synced 2025-11-04 14:49:50 +08:00
scripts: Make examples more portable.
This commit is contained in:
parent
003177b034
commit
96e89664e7
12
scripts/examples/00-HelloWorld/blinky.py
Normal file
12
scripts/examples/00-HelloWorld/blinky.py
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
# Blinky example
|
||||||
|
|
||||||
|
import time
|
||||||
|
from machine import LED
|
||||||
|
|
||||||
|
led = LED("LED_BLUE")
|
||||||
|
|
||||||
|
while True:
|
||||||
|
led.on()
|
||||||
|
time.sleep_ms(500)
|
||||||
|
led.off()
|
||||||
|
time.sleep_ms(500)
|
||||||
@ -1,32 +1,28 @@
|
|||||||
# Emboss Snapshot Example
|
# Emboss Snapshot Example
|
||||||
#
|
#
|
||||||
# Note: You will need an SD card to run this example.
|
# Note: You will need an SD card to run this example.
|
||||||
#
|
|
||||||
# You can use your OpenMV Cam to save modified image files.
|
# You can use your OpenMV Cam to save modified image files.
|
||||||
|
|
||||||
import sensor
|
import sensor
|
||||||
import pyb
|
import time
|
||||||
|
import machine
|
||||||
RED_LED_PIN = 1
|
|
||||||
BLUE_LED_PIN = 3
|
|
||||||
|
|
||||||
sensor.reset() # Reset and initialize the sensor.
|
sensor.reset() # Reset and initialize the sensor.
|
||||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||||
|
|
||||||
pyb.LED(RED_LED_PIN).on()
|
led = machine.LED("LED_BLUE")
|
||||||
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
|
||||||
|
|
||||||
pyb.LED(RED_LED_PIN).off()
|
start = time.ticks_ms()
|
||||||
pyb.LED(BLUE_LED_PIN).on()
|
while time.ticks_diff(time.ticks_ms(), start) < 3000:
|
||||||
|
sensor.snapshot()
|
||||||
|
led.toggle()
|
||||||
|
|
||||||
|
led.off()
|
||||||
|
|
||||||
print("You're on camera!")
|
|
||||||
img = sensor.snapshot()
|
img = sensor.snapshot()
|
||||||
|
|
||||||
img.morph(1, [+2, +1, +0, +1, +1, -1, +0, -1, -2]) # Emboss the image.
|
img.morph(1, [+2, +1, +0, +1, +1, -1, +0, -1, -2]) # Emboss the image.
|
||||||
|
|
||||||
img.save("example.jpg") # or "example.bmp" (or others)
|
img.save("example.jpg") # or "example.bmp" (or others)
|
||||||
|
|
||||||
pyb.LED(BLUE_LED_PIN).off()
|
raise (Exception("Please reset the camera to see the new file."))
|
||||||
print("Done! Reset the camera to see the saved image.")
|
|
||||||
|
|||||||
@ -1,28 +1,27 @@
|
|||||||
# Snapshot Example
|
# Snapshot Example
|
||||||
#
|
#
|
||||||
# Note: You will need an SD card to run this example.
|
# Note: You will need an SD card to run this example.
|
||||||
#
|
|
||||||
# You can use your OpenMV Cam to save image files.
|
# You can use your OpenMV Cam to save image files.
|
||||||
|
|
||||||
import sensor
|
import sensor
|
||||||
import pyb
|
import time
|
||||||
|
import machine
|
||||||
RED_LED_PIN = 1
|
|
||||||
BLUE_LED_PIN = 3
|
|
||||||
|
|
||||||
sensor.reset() # Reset and initialize the sensor.
|
sensor.reset() # Reset and initialize the sensor.
|
||||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||||
|
|
||||||
pyb.LED(RED_LED_PIN).on()
|
led = machine.LED("LED_BLUE")
|
||||||
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
|
||||||
|
|
||||||
pyb.LED(RED_LED_PIN).off()
|
start = time.ticks_ms()
|
||||||
pyb.LED(BLUE_LED_PIN).on()
|
while time.ticks_diff(time.ticks_ms(), start) < 3000:
|
||||||
|
sensor.snapshot()
|
||||||
|
led.toggle()
|
||||||
|
|
||||||
print("You're on camera!")
|
led.off()
|
||||||
sensor.snapshot().save("example.jpg") # or "example.bmp" (or others)
|
|
||||||
|
|
||||||
pyb.LED(BLUE_LED_PIN).off()
|
img = sensor.snapshot()
|
||||||
print("Done! Reset the camera to see the saved image.")
|
img.save("example.jpg") # or "example.bmp" (or others)
|
||||||
|
|
||||||
|
raise (Exception("Please reset the camera to see the new file."))
|
||||||
|
|||||||
@ -7,16 +7,16 @@
|
|||||||
|
|
||||||
import sensor
|
import sensor
|
||||||
import image
|
import image
|
||||||
import pyb
|
import random
|
||||||
|
import machine
|
||||||
RED_LED_PIN = 1
|
|
||||||
BLUE_LED_PIN = 3
|
|
||||||
|
|
||||||
sensor.reset() # Reset and initialize the sensor.
|
sensor.reset() # Reset and initialize the sensor.
|
||||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
|
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||||
sensor.set_framesize(sensor.HQVGA) # Set frame size to HQVGA
|
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA
|
||||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||||
|
|
||||||
|
led = machine.LED("LED_RED")
|
||||||
|
|
||||||
# Load up a face detection HaarCascade. This is object that your OpenMV Cam
|
# Load up a face detection HaarCascade. This is object that your OpenMV Cam
|
||||||
# can use to detect faces using the find_features() method below. Your OpenMV
|
# can use to detect faces using the find_features() method below. Your OpenMV
|
||||||
# Cam has fontalface HaarCascade built-in. By default, all the stages of the
|
# Cam has fontalface HaarCascade built-in. By default, all the stages of the
|
||||||
@ -26,15 +26,12 @@ sensor.skip_frames(time=2000) # Wait for settings take effect.
|
|||||||
face_cascade = image.HaarCascade("frontalface", stages=25)
|
face_cascade = image.HaarCascade("frontalface", stages=25)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
pyb.LED(RED_LED_PIN).on()
|
|
||||||
print("About to start detecting faces...")
|
print("About to start detecting faces...")
|
||||||
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
||||||
|
|
||||||
pyb.LED(RED_LED_PIN).off()
|
|
||||||
print("Now detecting faces!")
|
print("Now detecting faces!")
|
||||||
pyb.LED(BLUE_LED_PIN).on()
|
|
||||||
|
|
||||||
diff = 10 # We'll say we detected a face after 10 frames.
|
diff = 10 # We'll say we detected a face after 10 frames.
|
||||||
|
|
||||||
while diff:
|
while diff:
|
||||||
img = sensor.snapshot()
|
img = sensor.snapshot()
|
||||||
# Threshold can be between 0.0 and 1.0. A higher threshold results in a
|
# Threshold can be between 0.0 and 1.0. A higher threshold results in a
|
||||||
@ -46,7 +43,7 @@ while True:
|
|||||||
diff -= 1
|
diff -= 1
|
||||||
for r in faces:
|
for r in faces:
|
||||||
img.draw_rectangle(r)
|
img.draw_rectangle(r)
|
||||||
|
led.on()
|
||||||
pyb.LED(BLUE_LED_PIN).off()
|
|
||||||
print("Face detected! Saving image...")
|
print("Face detected! Saving image...")
|
||||||
sensor.snapshot().save("snapshot-%d.jpg" % pyb.rng()) # Save Pic.
|
sensor.snapshot().save("snapshot-%d.jpg" % random.getrandbits(32)) # Save Pic.
|
||||||
|
led.off()
|
||||||
|
|||||||
@ -6,11 +6,9 @@
|
|||||||
# motion detection. After motion is detected your OpenMV Cam will take picture.
|
# motion detection. After motion is detected your OpenMV Cam will take picture.
|
||||||
|
|
||||||
import sensor
|
import sensor
|
||||||
import pyb
|
import random
|
||||||
import os
|
import os
|
||||||
|
import machine
|
||||||
RED_LED_PIN = 1
|
|
||||||
BLUE_LED_PIN = 3
|
|
||||||
|
|
||||||
sensor.reset() # Reset and initialize the sensor.
|
sensor.reset() # Reset and initialize the sensor.
|
||||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||||
@ -18,18 +16,17 @@ sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
|||||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||||
sensor.set_auto_whitebal(False) # Turn off white balance.
|
sensor.set_auto_whitebal(False) # Turn off white balance.
|
||||||
|
|
||||||
|
led = machine.LED("LED_RED")
|
||||||
|
|
||||||
if not "temp" in os.listdir():
|
if not "temp" in os.listdir():
|
||||||
os.mkdir("temp") # Make a temp directory
|
os.mkdir("temp") # Make a temp directory
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
pyb.LED(RED_LED_PIN).on()
|
|
||||||
print("About to save background image...")
|
print("About to save background image...")
|
||||||
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
||||||
|
|
||||||
pyb.LED(RED_LED_PIN).off()
|
|
||||||
sensor.snapshot().save("temp/bg.bmp")
|
sensor.snapshot().save("temp/bg.bmp")
|
||||||
print("Saved background image - Now detecting motion!")
|
print("Saved background image - Now detecting motion!")
|
||||||
pyb.LED(BLUE_LED_PIN).on()
|
|
||||||
|
|
||||||
diff = 10 # We'll say we detected motion after 10 frames of motion.
|
diff = 10 # We'll say we detected motion after 10 frames of motion.
|
||||||
while diff:
|
while diff:
|
||||||
@ -42,6 +39,7 @@ while True:
|
|||||||
if stats[5] > 20:
|
if stats[5] > 20:
|
||||||
diff -= 1
|
diff -= 1
|
||||||
|
|
||||||
pyb.LED(BLUE_LED_PIN).off()
|
led.on()
|
||||||
print("Movement detected! Saving image...")
|
print("Movement detected! Saving image...")
|
||||||
sensor.snapshot().save("temp/snapshot-%d.jpg" % pyb.rng()) # Save Pic.
|
sensor.snapshot().save("temp/snapshot-%d.jpg" % random.getrandbits(32)) # Save Pic.
|
||||||
|
led.off()
|
||||||
|
|||||||
@ -8,21 +8,19 @@
|
|||||||
# pictures it will run the bootloader each time. Please power the camera
|
# pictures it will run the bootloader each time. Please power the camera
|
||||||
# from something other than USB to not have the bootloader run.
|
# from something other than USB to not have the bootloader run.
|
||||||
|
|
||||||
import pyb
|
|
||||||
import machine
|
import machine
|
||||||
import sensor
|
import sensor
|
||||||
import os
|
import os
|
||||||
|
|
||||||
# Create and init RTC object. This will allow us to set the current time for
|
# Create and init RTC object. This will allow us to set the current time for
|
||||||
# the RTC and let us set an interrupt to wake up later on.
|
# the RTC and let us set an interrupt to wake up later on.
|
||||||
rtc = pyb.RTC()
|
rtc = machine.RTC()
|
||||||
newFile = False
|
newFile = False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
os.stat("time.txt")
|
os.stat("time.txt")
|
||||||
except (
|
except OSError:
|
||||||
OSError
|
# If the log file doesn't exist then set the RTC and set newFile to True
|
||||||
): # If the log file doesn't exist then set the RTC and set newFile to True
|
|
||||||
# datetime format: year, month, day, weekday (Monday=1, Sunday=7),
|
# datetime format: year, month, day, weekday (Monday=1, Sunday=7),
|
||||||
# hours (24 hour clock), minutes, seconds, subseconds (counts down from 255 to 0)
|
# hours (24 hour clock), minutes, seconds, subseconds (counts down from 255 to 0)
|
||||||
rtc.datetime((2018, 3, 9, 5, 13, 0, 0, 0))
|
rtc.datetime((2018, 3, 9, 5, 13, 0, 0, 0))
|
||||||
@ -38,9 +36,8 @@ minute = "%02d" % dateTime[5]
|
|||||||
second = "%02d" % dateTime[6]
|
second = "%02d" % dateTime[6]
|
||||||
subSecond = str(dateTime[7])
|
subSecond = str(dateTime[7])
|
||||||
|
|
||||||
newName = (
|
# Image file name based on RTC
|
||||||
"I" + year + month + day + hour + minute + second
|
newName = "I" + year + month + day + hour + minute + second
|
||||||
) # Image file name based on RTC
|
|
||||||
|
|
||||||
# Enable RTC interrupts every 10 seconds, camera will RESET after wakeup from deepsleep Mode.
|
# Enable RTC interrupts every 10 seconds, camera will RESET after wakeup from deepsleep Mode.
|
||||||
rtc.wakeup(10000)
|
rtc.wakeup(10000)
|
||||||
@ -52,65 +49,31 @@ sensor.set_pixformat(sensor.GRAYSCALE)
|
|||||||
sensor.set_framesize(sensor.VGA)
|
sensor.set_framesize(sensor.VGA)
|
||||||
sensor.skip_frames(time=1000) # Let new settings take affect.
|
sensor.skip_frames(time=1000) # Let new settings take affect.
|
||||||
|
|
||||||
# Let folks know we are about to take a picture.
|
led = machine.LED("LED_BLUE")
|
||||||
pyb.LED(BLUE_LED_PIN).on()
|
|
||||||
|
|
||||||
if newFile: # If log file does not exist then create it.
|
if newFile:
|
||||||
with open(
|
# If log file does not exist then create it.
|
||||||
"time.txt", "a"
|
with open("time.txt", "a") as timeFile:
|
||||||
) as timeFile: # Write text file to keep track of date, time and image number.
|
# Write text file to keep track of date, time and image number.
|
||||||
|
timeFile.write("name, year, month, day, hours, minutes, seconds, subseconds\n")
|
||||||
timeFile.write(
|
timeFile.write(
|
||||||
"Date and time format: year, month, day, hours, minutes, seconds, subseconds"
|
f"{newName},{year},{month},{day},{hour},{minute},{second},{subSecond}\n"
|
||||||
+ "\n"
|
|
||||||
)
|
|
||||||
timeFile.write(
|
|
||||||
newName
|
|
||||||
+ ","
|
|
||||||
+ year
|
|
||||||
+ ","
|
|
||||||
+ month
|
|
||||||
+ ","
|
|
||||||
+ day
|
|
||||||
+ ","
|
|
||||||
+ hour
|
|
||||||
+ ","
|
|
||||||
+ minute
|
|
||||||
+ ","
|
|
||||||
+ second
|
|
||||||
+ ","
|
|
||||||
+ subSecond
|
|
||||||
+ "\n"
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
with open(
|
with open("time.txt", "a") as timeFile:
|
||||||
"time.txt", "a"
|
# Append to date, time and image number to text file.
|
||||||
) as timeFile: # Append to date, time and image number to text file.
|
|
||||||
timeFile.write(
|
timeFile.write(
|
||||||
newName
|
f"{newName},{year},{month},{day},{hour},{minute},{second},{subSecond}\n"
|
||||||
+ ","
|
|
||||||
+ year
|
|
||||||
+ ","
|
|
||||||
+ month
|
|
||||||
+ ","
|
|
||||||
+ day
|
|
||||||
+ ","
|
|
||||||
+ hour
|
|
||||||
+ ","
|
|
||||||
+ minute
|
|
||||||
+ ","
|
|
||||||
+ second
|
|
||||||
+ ","
|
|
||||||
+ subSecond
|
|
||||||
+ "\n"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if not "images" in os.listdir():
|
if not "images" in os.listdir():
|
||||||
os.mkdir("images") # Make a temp directory
|
os.mkdir("images") # Make a temp directory
|
||||||
|
|
||||||
# Take photo and save to SD card
|
# Take photo and save to SD card
|
||||||
|
led.on()
|
||||||
img = sensor.snapshot()
|
img = sensor.snapshot()
|
||||||
img.save("images/" + newName, quality=90)
|
img.save("images/" + newName, quality=90)
|
||||||
pyb.LED(BLUE_LED_PIN).off()
|
led.off()
|
||||||
|
|
||||||
# Enter Deepsleep Mode (i.e. the OpenMV Cam effectively turns itself off except for the RTC).
|
# Enter Deepsleep Mode (i.e. the OpenMV Cam effectively turns itself off except for the RTC).
|
||||||
machine.deepsleep()
|
machine.deepsleep()
|
||||||
|
|||||||
@ -9,26 +9,19 @@
|
|||||||
import sensor
|
import sensor
|
||||||
import time
|
import time
|
||||||
import gif
|
import gif
|
||||||
import pyb
|
import machine
|
||||||
|
|
||||||
RED_LED_PIN = 1
|
|
||||||
BLUE_LED_PIN = 3
|
|
||||||
|
|
||||||
sensor.reset() # Reset and initialize the sensor.
|
sensor.reset() # Reset and initialize the sensor.
|
||||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||||
sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120)
|
sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120)
|
||||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||||
clock = time.clock() # Create a clock object to track the FPS.
|
|
||||||
|
|
||||||
pyb.LED(RED_LED_PIN).on()
|
led = machine.LED("LED_RED")
|
||||||
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
|
||||||
|
|
||||||
pyb.LED(RED_LED_PIN).off()
|
|
||||||
pyb.LED(BLUE_LED_PIN).on()
|
|
||||||
|
|
||||||
|
led.on()
|
||||||
g = gif.Gif("example.gif", loop=True)
|
g = gif.Gif("example.gif", loop=True)
|
||||||
|
|
||||||
print("You're on camera!")
|
clock = time.clock() # Create a clock object to track the FPS.
|
||||||
for i in range(100):
|
for i in range(100):
|
||||||
clock.tick()
|
clock.tick()
|
||||||
# clock.avg() returns the milliseconds between frames - gif delay is in
|
# clock.avg() returns the milliseconds between frames - gif delay is in
|
||||||
@ -36,5 +29,6 @@ for i in range(100):
|
|||||||
print(clock.fps())
|
print(clock.fps())
|
||||||
|
|
||||||
g.close()
|
g.close()
|
||||||
pyb.LED(BLUE_LED_PIN).off()
|
led.off()
|
||||||
print("Done! Reset the camera to see the saved recording.")
|
|
||||||
|
raise (Exception("Please reset the camera to see the new file."))
|
||||||
|
|||||||
@ -13,16 +13,16 @@ import sensor
|
|||||||
import image
|
import image
|
||||||
import time
|
import time
|
||||||
import gif
|
import gif
|
||||||
import pyb
|
import machine
|
||||||
|
import random
|
||||||
RED_LED_PIN = 1
|
|
||||||
BLUE_LED_PIN = 3
|
|
||||||
|
|
||||||
sensor.reset() # Initialize the camera sensor.
|
sensor.reset() # Initialize the camera sensor.
|
||||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
|
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||||
sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120)
|
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA
|
||||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||||
|
|
||||||
|
led = machine.LED("LED_RED")
|
||||||
|
|
||||||
# Load up a face detection HaarCascade. This is object that your OpenMV Cam
|
# Load up a face detection HaarCascade. This is object that your OpenMV Cam
|
||||||
# can use to detect faces using the find_features() method below. Your OpenMV
|
# can use to detect faces using the find_features() method below. Your OpenMV
|
||||||
# Cam has fontalface HaarCascade built-in. By default, all the stages of the
|
# Cam has fontalface HaarCascade built-in. By default, all the stages of the
|
||||||
@ -32,13 +32,10 @@ sensor.skip_frames(time=2000) # Wait for settings take effect.
|
|||||||
face_cascade = image.HaarCascade("frontalface", stages=25)
|
face_cascade = image.HaarCascade("frontalface", stages=25)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
pyb.LED(RED_LED_PIN).on()
|
|
||||||
print("About to start detecting faces...")
|
print("About to start detecting faces...")
|
||||||
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
||||||
|
|
||||||
pyb.LED(RED_LED_PIN).off()
|
|
||||||
print("Now detecting faces!")
|
print("Now detecting faces!")
|
||||||
pyb.LED(BLUE_LED_PIN).on()
|
|
||||||
|
|
||||||
diff = 10 # We'll say we detected a face after 10 frames.
|
diff = 10 # We'll say we detected a face after 10 frames.
|
||||||
while diff:
|
while diff:
|
||||||
@ -53,10 +50,10 @@ while True:
|
|||||||
for r in faces:
|
for r in faces:
|
||||||
img.draw_rectangle(r)
|
img.draw_rectangle(r)
|
||||||
|
|
||||||
g = gif.Gif("example-%d.gif" % pyb.rng(), loop=True)
|
led.on()
|
||||||
|
g = gif.Gif("example-%d.gif" % random.getrandbits(32), loop=True)
|
||||||
|
|
||||||
clock = time.clock() # Tracks FPS.
|
clock = time.clock() # Tracks FPS.
|
||||||
print("You're on camera!")
|
|
||||||
for i in range(100):
|
for i in range(100):
|
||||||
clock.tick()
|
clock.tick()
|
||||||
# clock.avg() returns the milliseconds between frames - gif delay is in
|
# clock.avg() returns the milliseconds between frames - gif delay is in
|
||||||
@ -64,5 +61,5 @@ while True:
|
|||||||
print(clock.fps())
|
print(clock.fps())
|
||||||
|
|
||||||
g.close()
|
g.close()
|
||||||
pyb.LED(BLUE_LED_PIN).off()
|
led.off()
|
||||||
print("Restarting...")
|
print("Restarting...")
|
||||||
|
|||||||
@ -12,11 +12,9 @@
|
|||||||
import sensor
|
import sensor
|
||||||
import time
|
import time
|
||||||
import gif
|
import gif
|
||||||
import pyb
|
|
||||||
import os
|
import os
|
||||||
|
import machine
|
||||||
RED_LED_PIN = 1
|
import random
|
||||||
BLUE_LED_PIN = 3
|
|
||||||
|
|
||||||
sensor.reset() # Reset and initialize the sensor.
|
sensor.reset() # Reset and initialize the sensor.
|
||||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||||
@ -24,18 +22,17 @@ sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120)
|
|||||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||||
sensor.set_auto_whitebal(False) # Turn off white balance.
|
sensor.set_auto_whitebal(False) # Turn off white balance.
|
||||||
|
|
||||||
|
led = machine.LED("LED_RED")
|
||||||
|
|
||||||
if not "temp" in os.listdir():
|
if not "temp" in os.listdir():
|
||||||
os.mkdir("temp") # Make a temp directory
|
os.mkdir("temp") # Make a temp directory
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
pyb.LED(RED_LED_PIN).on()
|
|
||||||
print("About to save background image...")
|
print("About to save background image...")
|
||||||
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
||||||
|
|
||||||
pyb.LED(RED_LED_PIN).off()
|
|
||||||
sensor.snapshot().save("temp/bg.bmp")
|
sensor.snapshot().save("temp/bg.bmp")
|
||||||
print("Saved background image - Now detecting motion!")
|
print("Saved background image - Now detecting motion!")
|
||||||
pyb.LED(BLUE_LED_PIN).on()
|
|
||||||
|
|
||||||
diff = 10 # We'll say we detected motion after 10 frames of motion.
|
diff = 10 # We'll say we detected motion after 10 frames of motion.
|
||||||
while diff:
|
while diff:
|
||||||
@ -48,10 +45,10 @@ while True:
|
|||||||
if stats[5] > 20:
|
if stats[5] > 20:
|
||||||
diff -= 1
|
diff -= 1
|
||||||
|
|
||||||
g = gif.Gif("example-%d.gif" % pyb.rng(), loop=True)
|
led.on()
|
||||||
|
g = gif.Gif("example-%d.gif" % random.getrandbits(32), loop=True)
|
||||||
|
|
||||||
clock = time.clock() # Tracks FPS.
|
clock = time.clock() # Tracks FPS.
|
||||||
print("You're on camera!")
|
|
||||||
for i in range(100):
|
for i in range(100):
|
||||||
clock.tick()
|
clock.tick()
|
||||||
# clock.avg() returns the milliseconds between frames - gif delay is in
|
# clock.avg() returns the milliseconds between frames - gif delay is in
|
||||||
@ -59,5 +56,5 @@ while True:
|
|||||||
print(clock.fps())
|
print(clock.fps())
|
||||||
|
|
||||||
g.close()
|
g.close()
|
||||||
pyb.LED(BLUE_LED_PIN).off()
|
led.off()
|
||||||
print("Restarting...")
|
print("Restarting...")
|
||||||
|
|||||||
@ -1,10 +1,10 @@
|
|||||||
# Image Reader Example
|
# Image Reader Example
|
||||||
#
|
#
|
||||||
# USE THIS EXAMPLE WITH A USD CARD!
|
# NOTE: This example requires an SD card.
|
||||||
#
|
#
|
||||||
# This example shows how to use the Image Reader object to replay snapshots of what your
|
# This example shows how to use the Image Reader object to replay snapshots of what your
|
||||||
# OpenMV Cam saw saved by the Image Writer object for testing machine vision algorithms.
|
# OpenMV Cam saw saved by the Image Writer object for testing machine vision algorithms.
|
||||||
|
#
|
||||||
# Altered to allow full speed reading from SD card for extraction of sequences to the network etc.
|
# Altered to allow full speed reading from SD card for extraction of sequences to the network etc.
|
||||||
# Set the new pause parameter to false
|
# Set the new pause parameter to false
|
||||||
|
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
# Image Writer Example
|
# Image Writer Example
|
||||||
#
|
#
|
||||||
# USE THIS EXAMPLE WITH A USD CARD! Reset the camera after recording to see the file.
|
# NOTE: This example requires an SD card.
|
||||||
#
|
#
|
||||||
# This example shows how to use the Image Writer object to record snapshots of what your
|
# This example shows how to use the Image Writer object to record snapshots of what your
|
||||||
# OpenMV Cam sees for later analysis using the Image Reader object. Images written to disk
|
# OpenMV Cam sees for later analysis using the Image Reader object. Images written to disk
|
||||||
@ -8,8 +8,8 @@
|
|||||||
|
|
||||||
import sensor
|
import sensor
|
||||||
import image
|
import image
|
||||||
import pyb
|
|
||||||
import time
|
import time
|
||||||
|
import machine
|
||||||
|
|
||||||
record_time = 10000 # 10 seconds in milliseconds
|
record_time = 10000 # 10 seconds in milliseconds
|
||||||
|
|
||||||
@ -19,13 +19,14 @@ sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120)
|
|||||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||||
clock = time.clock() # Create a clock object to track the FPS.
|
clock = time.clock() # Create a clock object to track the FPS.
|
||||||
|
|
||||||
|
led = machine.LED("LED_RED")
|
||||||
stream = image.ImageIO("/stream.bin", "w")
|
stream = image.ImageIO("/stream.bin", "w")
|
||||||
|
|
||||||
# Red LED on means we are capturing frames.
|
# Red LED on means we are capturing frames.
|
||||||
pyb.LED(1).on()
|
led.on()
|
||||||
|
|
||||||
start = pyb.millis()
|
start = time.ticks_ms()
|
||||||
while pyb.elapsed_millis(start) < record_time:
|
while time.ticks_diff(time.ticks_ms(), start) < record_time:
|
||||||
clock.tick()
|
clock.tick()
|
||||||
img = sensor.snapshot()
|
img = sensor.snapshot()
|
||||||
# Modify the image if you feel like here...
|
# Modify the image if you feel like here...
|
||||||
@ -33,7 +34,6 @@ while pyb.elapsed_millis(start) < record_time:
|
|||||||
print(clock.fps())
|
print(clock.fps())
|
||||||
|
|
||||||
stream.close()
|
stream.close()
|
||||||
|
led.off()
|
||||||
|
|
||||||
# Blue LED on means we are done.
|
raise (Exception("Please reset the camera to see the new file."))
|
||||||
pyb.LED(1).off()
|
|
||||||
pyb.LED(3).on()
|
|
||||||
|
|||||||
@ -10,31 +10,25 @@
|
|||||||
import sensor
|
import sensor
|
||||||
import time
|
import time
|
||||||
import mjpeg
|
import mjpeg
|
||||||
import pyb
|
import machine
|
||||||
|
|
||||||
RED_LED_PIN = 1
|
|
||||||
BLUE_LED_PIN = 3
|
|
||||||
|
|
||||||
sensor.reset() # Reset and initialize the sensor.
|
sensor.reset() # Reset and initialize the sensor.
|
||||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||||
clock = time.clock() # Create a clock object to track the FPS.
|
|
||||||
|
|
||||||
pyb.LED(RED_LED_PIN).on()
|
led = machine.LED("LED_RED")
|
||||||
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
|
||||||
|
|
||||||
pyb.LED(RED_LED_PIN).off()
|
|
||||||
pyb.LED(BLUE_LED_PIN).on()
|
|
||||||
|
|
||||||
|
led.on()
|
||||||
m = mjpeg.Mjpeg("example.mjpeg")
|
m = mjpeg.Mjpeg("example.mjpeg")
|
||||||
|
|
||||||
print("You're on camera!")
|
clock = time.clock() # Create a clock object to track the FPS.
|
||||||
for i in range(200):
|
for i in range(200):
|
||||||
clock.tick()
|
clock.tick()
|
||||||
m.add_frame(sensor.snapshot())
|
m.add_frame(sensor.snapshot())
|
||||||
print(clock.fps())
|
print(clock.fps())
|
||||||
|
|
||||||
m.close(clock.fps())
|
m.close(clock.fps())
|
||||||
pyb.LED(BLUE_LED_PIN).off()
|
led.off()
|
||||||
print("Done! Reset the camera to see the saved recording.")
|
|
||||||
|
raise (Exception("Please reset the camera to see the new file."))
|
||||||
|
|||||||
@ -14,16 +14,15 @@ import sensor
|
|||||||
import image
|
import image
|
||||||
import time
|
import time
|
||||||
import mjpeg
|
import mjpeg
|
||||||
import pyb
|
import random
|
||||||
|
|
||||||
RED_LED_PIN = 1
|
|
||||||
BLUE_LED_PIN = 3
|
|
||||||
|
|
||||||
sensor.reset() # Reset and initialize the sensor.
|
sensor.reset() # Reset and initialize the sensor.
|
||||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
|
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||||
sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120)
|
sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120)
|
||||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||||
|
|
||||||
|
led = machine.LED("LED_RED")
|
||||||
|
|
||||||
# Load up a face detection HaarCascade. This is object that your OpenMV Cam
|
# Load up a face detection HaarCascade. This is object that your OpenMV Cam
|
||||||
# can use to detect faces using the find_features() method below. Your OpenMV
|
# can use to detect faces using the find_features() method below. Your OpenMV
|
||||||
# Cam has fontalface HaarCascade built-in. By default, all the stages of the
|
# Cam has fontalface HaarCascade built-in. By default, all the stages of the
|
||||||
@ -33,13 +32,10 @@ sensor.skip_frames(time=2000) # Wait for settings take effect.
|
|||||||
face_cascade = image.HaarCascade("frontalface", stages=25)
|
face_cascade = image.HaarCascade("frontalface", stages=25)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
pyb.LED(RED_LED_PIN).on()
|
|
||||||
print("About to start detecting faces...")
|
print("About to start detecting faces...")
|
||||||
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
||||||
|
|
||||||
pyb.LED(RED_LED_PIN).off()
|
|
||||||
print("Now detecting faces!")
|
print("Now detecting faces!")
|
||||||
pyb.LED(BLUE_LED_PIN).on()
|
|
||||||
|
|
||||||
diff = 10 # We'll say we detected a face after 10 frames.
|
diff = 10 # We'll say we detected a face after 10 frames.
|
||||||
while diff:
|
while diff:
|
||||||
@ -54,15 +50,15 @@ while True:
|
|||||||
for r in faces:
|
for r in faces:
|
||||||
img.draw_rectangle(r)
|
img.draw_rectangle(r)
|
||||||
|
|
||||||
m = mjpeg.Mjpeg("example-%d.mjpeg" % pyb.rng())
|
led.on()
|
||||||
|
m = mjpeg.Mjpeg("example-%d.mjpeg" % random.getrandbits(32))
|
||||||
|
|
||||||
clock = time.clock() # Tracks FPS.
|
clock = time.clock() # Tracks FPS.
|
||||||
print("You're on camera!")
|
|
||||||
for i in range(200):
|
for i in range(200):
|
||||||
clock.tick()
|
clock.tick()
|
||||||
m.add_frame(sensor.snapshot())
|
m.add_frame(sensor.snapshot())
|
||||||
print(clock.fps())
|
print(clock.fps())
|
||||||
|
|
||||||
m.close(clock.fps())
|
m.close(clock.fps())
|
||||||
pyb.LED(BLUE_LED_PIN).off()
|
led.off()
|
||||||
print("Restarting...")
|
print("Restarting...")
|
||||||
|
|||||||
@ -13,11 +13,9 @@
|
|||||||
import sensor
|
import sensor
|
||||||
import time
|
import time
|
||||||
import mjpeg
|
import mjpeg
|
||||||
import pyb
|
|
||||||
import os
|
import os
|
||||||
|
import machine
|
||||||
RED_LED_PIN = 1
|
import random
|
||||||
BLUE_LED_PIN = 3
|
|
||||||
|
|
||||||
sensor.reset() # Reset and initialize the sensor.
|
sensor.reset() # Reset and initialize the sensor.
|
||||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||||
@ -25,18 +23,17 @@ sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
|||||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||||
sensor.set_auto_whitebal(False) # Turn off white balance.
|
sensor.set_auto_whitebal(False) # Turn off white balance.
|
||||||
|
|
||||||
|
led = machine.LED("LED_RED")
|
||||||
|
|
||||||
if not "temp" in os.listdir():
|
if not "temp" in os.listdir():
|
||||||
os.mkdir("temp") # Make a temp directory
|
os.mkdir("temp") # Make a temp directory
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
pyb.LED(RED_LED_PIN).on()
|
|
||||||
print("About to save background image...")
|
print("About to save background image...")
|
||||||
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
||||||
|
|
||||||
pyb.LED(RED_LED_PIN).off()
|
|
||||||
sensor.snapshot().save("temp/bg.bmp")
|
sensor.snapshot().save("temp/bg.bmp")
|
||||||
print("Saved background image - Now detecting motion!")
|
print("Saved background image - Now detecting motion!")
|
||||||
pyb.LED(BLUE_LED_PIN).on()
|
|
||||||
|
|
||||||
diff = 10 # We'll say we detected motion after 10 frames of motion.
|
diff = 10 # We'll say we detected motion after 10 frames of motion.
|
||||||
while diff:
|
while diff:
|
||||||
@ -49,15 +46,15 @@ while True:
|
|||||||
if stats[5] > 20:
|
if stats[5] > 20:
|
||||||
diff -= 1
|
diff -= 1
|
||||||
|
|
||||||
m = mjpeg.Mjpeg("example-%d.mjpeg" % pyb.rng())
|
led.on()
|
||||||
|
m = mjpeg.Mjpeg("example-%d.mjpeg" % random.getrandbits(32))
|
||||||
|
|
||||||
clock = time.clock() # Tracks FPS.
|
clock = time.clock() # Tracks FPS.
|
||||||
print("You're on camera!")
|
|
||||||
for i in range(200):
|
for i in range(200):
|
||||||
clock.tick()
|
clock.tick()
|
||||||
m.add_frame(sensor.snapshot())
|
m.add_frame(sensor.snapshot())
|
||||||
print(clock.fps())
|
print(clock.fps())
|
||||||
|
|
||||||
m.close(clock.fps())
|
m.close(clock.fps())
|
||||||
pyb.LED(BLUE_LED_PIN).off()
|
led.off()
|
||||||
print("Restarting...")
|
print("Restarting...")
|
||||||
|
|||||||
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
import sensor
|
import sensor
|
||||||
import time
|
import time
|
||||||
import pyb
|
from random import randint
|
||||||
|
|
||||||
sensor.reset()
|
sensor.reset()
|
||||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||||
@ -18,13 +18,14 @@ while True:
|
|||||||
img = sensor.snapshot()
|
img = sensor.snapshot()
|
||||||
|
|
||||||
for i in range(10):
|
for i in range(10):
|
||||||
x0 = (pyb.rng() % (2 * img.width())) - (img.width() // 2)
|
x0 = randint(0, 2 * img.width()) - img.width() // 2
|
||||||
y0 = (pyb.rng() % (2 * img.height())) - (img.height() // 2)
|
y0 = randint(0, 2 * img.height()) - img.height() // 2
|
||||||
x1 = (pyb.rng() % (2 * img.width())) - (img.width() // 2)
|
x1 = randint(0, 2 * img.width()) - img.width() // 2
|
||||||
y1 = (pyb.rng() % (2 * img.height())) - (img.height() // 2)
|
y1 = randint(0, 2 * img.height()) - img.height() // 2
|
||||||
r = (pyb.rng() % 127) + 128
|
|
||||||
g = (pyb.rng() % 127) + 128
|
r = randint(0, 127) + 128
|
||||||
b = (pyb.rng() % 127) + 128
|
g = randint(0, 127) + 128
|
||||||
|
b = randint(0, 127) + 128
|
||||||
|
|
||||||
# If the first argument is a scaler then this method expects
|
# If the first argument is a scaler then this method expects
|
||||||
# to see x0, y0, x1, and y1. Otherwise, it expects a (x0,y0,x1,y1) tuple.
|
# to see x0, y0, x1, and y1. Otherwise, it expects a (x0,y0,x1,y1) tuple.
|
||||||
|
|||||||
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
import sensor
|
import sensor
|
||||||
import time
|
import time
|
||||||
import pyb
|
from random import randint
|
||||||
|
|
||||||
sensor.reset()
|
sensor.reset()
|
||||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||||
@ -18,13 +18,13 @@ while True:
|
|||||||
img = sensor.snapshot()
|
img = sensor.snapshot()
|
||||||
|
|
||||||
for i in range(10):
|
for i in range(10):
|
||||||
x = (pyb.rng() % (2 * img.width())) - (img.width() // 2)
|
x = randint(0, 2 * img.width()) - img.width() // 2
|
||||||
y = (pyb.rng() % (2 * img.height())) - (img.height() // 2)
|
y = randint(0, 2 * img.height()) - img.height() // 2
|
||||||
radius = pyb.rng() % (max(img.height(), img.width()) // 2)
|
radius = randint(0, max(img.height(), img.width()) // 2)
|
||||||
|
|
||||||
r = (pyb.rng() % 127) + 128
|
r = randint(0, 127) + 128
|
||||||
g = (pyb.rng() % 127) + 128
|
g = randint(0, 127) + 128
|
||||||
b = (pyb.rng() % 127) + 128
|
b = randint(0, 127) + 128
|
||||||
|
|
||||||
# If the first argument is a scaler then this method expects
|
# If the first argument is a scaler then this method expects
|
||||||
# to see x, y, and radius. Otherwise, it expects a (x,y,radius) tuple.
|
# to see x, y, and radius. Otherwise, it expects a (x,y,radius) tuple.
|
||||||
|
|||||||
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
import sensor
|
import sensor
|
||||||
import time
|
import time
|
||||||
import pyb
|
from random import randint
|
||||||
|
|
||||||
sensor.reset()
|
sensor.reset()
|
||||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||||
@ -18,11 +18,12 @@ while True:
|
|||||||
img = sensor.snapshot()
|
img = sensor.snapshot()
|
||||||
|
|
||||||
for i in range(10):
|
for i in range(10):
|
||||||
x = (pyb.rng() % (2 * img.width())) - (img.width() // 2)
|
x = randint(0, 2 * img.width()) - img.width() // 2
|
||||||
y = (pyb.rng() % (2 * img.height())) - (img.height() // 2)
|
y = randint(0, 2 * img.height()) - img.height() // 2
|
||||||
r = (pyb.rng() % 127) + 128
|
|
||||||
g = (pyb.rng() % 127) + 128
|
r = randint(0, 127) + 128
|
||||||
b = (pyb.rng() % 127) + 128
|
g = randint(0, 127) + 128
|
||||||
|
b = randint(0, 127) + 128
|
||||||
|
|
||||||
# If the first argument is a scaler then this method expects
|
# If the first argument is a scaler then this method expects
|
||||||
# to see x and y. Otherwise, it expects a (x,y) tuple.
|
# to see x and y. Otherwise, it expects a (x,y) tuple.
|
||||||
|
|||||||
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
import sensor
|
import sensor
|
||||||
import time
|
import time
|
||||||
import pyb
|
from random import randint
|
||||||
|
|
||||||
sensor.reset()
|
sensor.reset()
|
||||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||||
@ -18,21 +18,21 @@ while True:
|
|||||||
img = sensor.snapshot()
|
img = sensor.snapshot()
|
||||||
|
|
||||||
for i in range(10):
|
for i in range(10):
|
||||||
x = (pyb.rng() % (2 * img.width())) - (img.width() // 2)
|
x = randint(0, 2 * img.width()) - img.width() // 2
|
||||||
y = (pyb.rng() % (2 * img.height())) - (img.height() // 2)
|
y = randint(0, 2 * img.height()) - img.height() // 2
|
||||||
radius_x = pyb.rng() % (max(img.height(), img.width()) // 2)
|
rx = randint(0, max(img.height(), img.width()) // 2)
|
||||||
radius_y = pyb.rng() % (max(img.height(), img.width()) // 2)
|
ry = randint(0, max(img.height(), img.width()) // 2)
|
||||||
rot = pyb.rng()
|
rot = randint(0, 360)
|
||||||
|
|
||||||
r = (pyb.rng() % 127) + 128
|
r = randint(0, 127) + 128
|
||||||
g = (pyb.rng() % 127) + 128
|
g = randint(0, 127) + 128
|
||||||
b = (pyb.rng() % 127) + 128
|
b = randint(0, 127) + 128
|
||||||
|
|
||||||
# If the first argument is a scaler then this method expects
|
# If the first argument is a scaler then this method expects
|
||||||
# to see x, y, radius x, and radius y.
|
# to see x, y, radius x, and radius y.
|
||||||
# Otherwise, it expects a (x,y,radius_x,radius_y) tuple.
|
# Otherwise, it expects a (x,y,rx,ry) tuple.
|
||||||
img.draw_ellipse(
|
img.draw_ellipse(
|
||||||
x, y, radius_x, radius_y, rot, color=(r, g, b), thickness=2, fill=False
|
x, y, rx, ry, rot, color=(r, g, b), thickness=2, fill=False
|
||||||
)
|
)
|
||||||
|
|
||||||
print(clock.fps())
|
print(clock.fps())
|
||||||
|
|||||||
@ -5,7 +5,7 @@
|
|||||||
|
|
||||||
import sensor
|
import sensor
|
||||||
import time
|
import time
|
||||||
import pyb
|
from random import randint
|
||||||
|
|
||||||
sensor.reset()
|
sensor.reset()
|
||||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||||
@ -19,13 +19,13 @@ while True:
|
|||||||
img = sensor.snapshot()
|
img = sensor.snapshot()
|
||||||
|
|
||||||
for i in range(20):
|
for i in range(20):
|
||||||
x = (pyb.rng() % (2 * img.width())) - (img.width() // 2)
|
x = randint(0, 2 * img.width()) - img.width() // 2
|
||||||
y = (pyb.rng() % (2 * img.height())) - (img.height() // 2)
|
y = randint(0, 2 * img.height()) - img.height() // 2
|
||||||
rot = pyb.rng() % 360
|
rot = randint(0, 360)
|
||||||
|
|
||||||
r = (pyb.rng() % 127) + 128
|
r = randint(0, 127) + 128
|
||||||
g = (pyb.rng() % 127) + 128
|
g = randint(0, 127) + 128
|
||||||
b = (pyb.rng() % 127) + 128
|
b = randint(0, 127) + 128
|
||||||
|
|
||||||
# This method draws a keypoints object or a list of (x, y, rot) tuples...
|
# This method draws a keypoints object or a list of (x, y, rot) tuples...
|
||||||
img.draw_keypoints(
|
img.draw_keypoints(
|
||||||
|
|||||||
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
import sensor
|
import sensor
|
||||||
import time
|
import time
|
||||||
import pyb
|
from random import randint
|
||||||
|
|
||||||
sensor.reset()
|
sensor.reset()
|
||||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||||
@ -18,13 +18,14 @@ while True:
|
|||||||
img = sensor.snapshot()
|
img = sensor.snapshot()
|
||||||
|
|
||||||
for i in range(10):
|
for i in range(10):
|
||||||
x0 = (pyb.rng() % (2 * img.width())) - (img.width() // 2)
|
x0 = randint(0, 2 * img.width()) - img.width() // 2
|
||||||
y0 = (pyb.rng() % (2 * img.height())) - (img.height() // 2)
|
y0 = randint(0, 2 * img.height()) - img.height() // 2
|
||||||
x1 = (pyb.rng() % (2 * img.width())) - (img.width() // 2)
|
x1 = randint(0, 2 * img.width()) - img.width() // 2
|
||||||
y1 = (pyb.rng() % (2 * img.height())) - (img.height() // 2)
|
y1 = randint(0, 2 * img.height()) - img.height() // 2
|
||||||
r = (pyb.rng() % 127) + 128
|
|
||||||
g = (pyb.rng() % 127) + 128
|
r = randint(0, 127) + 128
|
||||||
b = (pyb.rng() % 127) + 128
|
g = randint(0, 127) + 128
|
||||||
|
b = randint(0, 127) + 128
|
||||||
|
|
||||||
# If the first argument is a scaler then this method expects
|
# If the first argument is a scaler then this method expects
|
||||||
# to see x0, y0, x1, and y1. Otherwise, it expects a (x0,y0,x1,y1) tuple.
|
# to see x0, y0, x1, and y1. Otherwise, it expects a (x0,y0,x1,y1) tuple.
|
||||||
|
|||||||
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
import sensor
|
import sensor
|
||||||
import time
|
import time
|
||||||
import pyb
|
from random import randint
|
||||||
|
|
||||||
sensor.reset()
|
sensor.reset()
|
||||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||||
@ -18,13 +18,14 @@ while True:
|
|||||||
img = sensor.snapshot()
|
img = sensor.snapshot()
|
||||||
|
|
||||||
for i in range(10):
|
for i in range(10):
|
||||||
x = (pyb.rng() % (2 * img.width())) - (img.width() // 2)
|
x = randint(0, 2 * img.width()) - img.width() // 2
|
||||||
y = (pyb.rng() % (2 * img.height())) - (img.height() // 2)
|
y = randint(0, 2 * img.height()) - img.height() // 2
|
||||||
w = pyb.rng() % (img.width() // 2)
|
w = randint(0, img.width() // 2)
|
||||||
h = pyb.rng() % (img.height() // 2)
|
h = randint(0, img.height() // 2)
|
||||||
r = (pyb.rng() % 127) + 128
|
|
||||||
g = (pyb.rng() % 127) + 128
|
r = randint(0, 127) + 128
|
||||||
b = (pyb.rng() % 127) + 128
|
g = randint(0, 127) + 128
|
||||||
|
b = randint(0, 127) + 128
|
||||||
|
|
||||||
# If the first argument is a scaler then this method expects
|
# If the first argument is a scaler then this method expects
|
||||||
# to see x, y, w, and h. Otherwise, it expects a (x,y,w,h) tuple.
|
# to see x, y, w, and h. Otherwise, it expects a (x,y,w,h) tuple.
|
||||||
|
|||||||
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
import sensor
|
import sensor
|
||||||
import time
|
import time
|
||||||
import pyb
|
from random import randint
|
||||||
|
|
||||||
sensor.reset()
|
sensor.reset()
|
||||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||||
@ -18,11 +18,12 @@ while True:
|
|||||||
img = sensor.snapshot()
|
img = sensor.snapshot()
|
||||||
|
|
||||||
for i in range(10):
|
for i in range(10):
|
||||||
x = (pyb.rng() % (2 * img.width())) - (img.width() // 2)
|
x = randint(0, 2 * img.width()) - img.width() // 2
|
||||||
y = (pyb.rng() % (2 * img.height())) - (img.height() // 2)
|
y = randint(0, 2 * img.height()) - img.height() // 2
|
||||||
r = (pyb.rng() % 127) + 128
|
|
||||||
g = (pyb.rng() % 127) + 128
|
r = randint(0, 127) + 128
|
||||||
b = (pyb.rng() % 127) + 128
|
g = randint(0, 127) + 128
|
||||||
|
b = randint(0, 127) + 128
|
||||||
|
|
||||||
# If the first argument is a scaler then this method expects
|
# If the first argument is a scaler then this method expects
|
||||||
# to see x, y, and text. Otherwise, it expects a (x,y,text) tuple.
|
# to see x, y, and text. Otherwise, it expects a (x,y,text) tuple.
|
||||||
|
|||||||
@ -10,7 +10,6 @@
|
|||||||
|
|
||||||
import sensor
|
import sensor
|
||||||
import time
|
import time
|
||||||
import pyb
|
|
||||||
|
|
||||||
sensor.reset()
|
sensor.reset()
|
||||||
sensor.set_pixformat(sensor.RGB565)
|
sensor.set_pixformat(sensor.RGB565)
|
||||||
@ -18,7 +17,7 @@ sensor.set_framesize(sensor.QVGA)
|
|||||||
sensor.skip_frames(time=2000)
|
sensor.skip_frames(time=2000)
|
||||||
clock = time.clock()
|
clock = time.clock()
|
||||||
|
|
||||||
mills = pyb.millis()
|
ticks = time.ticks_ms()
|
||||||
counter = 0
|
counter = 0
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
@ -30,8 +29,8 @@ while True:
|
|||||||
transpose=(counter // 8) % 2,
|
transpose=(counter // 8) % 2,
|
||||||
)
|
)
|
||||||
|
|
||||||
if pyb.millis() > (mills + 1000):
|
if time.ticks_diff(time.ticks_ms(), ticks) > 1000:
|
||||||
mills = pyb.millis()
|
ticks = time.ticks_ms()
|
||||||
counter += 1
|
counter += 1
|
||||||
|
|
||||||
print(clock.fps())
|
print(clock.fps())
|
||||||
|
|||||||
@ -2,8 +2,6 @@
|
|||||||
# This example shows how to save a keypoints descriptor to file. Show the camera an object
|
# This example shows how to save a keypoints descriptor to file. Show the camera an object
|
||||||
# and then run the script. The script will extract and save a keypoints descriptor and the image.
|
# and then run the script. The script will extract and save a keypoints descriptor and the image.
|
||||||
# You can use the keypoints_editor.py util to remove unwanted keypoints.
|
# You can use the keypoints_editor.py util to remove unwanted keypoints.
|
||||||
#
|
|
||||||
# NOTE: Please reset the camera after running this script to see the new file.
|
|
||||||
import sensor
|
import sensor
|
||||||
import time
|
import time
|
||||||
import image
|
import image
|
||||||
@ -36,4 +34,5 @@ img.save("/%s.pgm" % (FILE_NAME))
|
|||||||
img.draw_keypoints(kpts)
|
img.draw_keypoints(kpts)
|
||||||
sensor.snapshot()
|
sensor.snapshot()
|
||||||
time.sleep_ms(1000)
|
time.sleep_ms(1000)
|
||||||
raise (Exception("Done! Please reset the camera"))
|
|
||||||
|
raise (Exception("Please reset the camera to see the new file."))
|
||||||
|
|||||||
@ -10,9 +10,7 @@ import math
|
|||||||
|
|
||||||
sensor.reset()
|
sensor.reset()
|
||||||
sensor.set_pixformat(sensor.RGB565)
|
sensor.set_pixformat(sensor.RGB565)
|
||||||
sensor.set_framesize(
|
sensor.set_framesize(sensor.QQVGA)
|
||||||
sensor.QQVGA
|
|
||||||
) # we run out of memory if the resolution is much bigger...
|
|
||||||
sensor.skip_frames(time=2000)
|
sensor.skip_frames(time=2000)
|
||||||
sensor.set_auto_gain(False) # must turn this off to prevent image washout...
|
sensor.set_auto_gain(False) # must turn this off to prevent image washout...
|
||||||
sensor.set_auto_whitebal(False) # must turn this off to prevent image washout...
|
sensor.set_auto_whitebal(False) # must turn this off to prevent image washout...
|
||||||
|
|||||||
@ -9,9 +9,7 @@ import math
|
|||||||
|
|
||||||
sensor.reset()
|
sensor.reset()
|
||||||
sensor.set_pixformat(sensor.RGB565)
|
sensor.set_pixformat(sensor.RGB565)
|
||||||
sensor.set_framesize(
|
sensor.set_framesize(sensor.QQVGA)
|
||||||
sensor.QQVGA
|
|
||||||
) # we run out of memory if the resolution is much bigger...
|
|
||||||
sensor.skip_frames(time=2000)
|
sensor.skip_frames(time=2000)
|
||||||
sensor.set_auto_gain(False) # must turn this off to prevent image washout...
|
sensor.set_auto_gain(False) # must turn this off to prevent image washout...
|
||||||
sensor.set_auto_whitebal(False) # must turn this off to prevent image washout...
|
sensor.set_auto_whitebal(False) # must turn this off to prevent image washout...
|
||||||
|
|||||||
@ -11,9 +11,8 @@ import omv
|
|||||||
|
|
||||||
sensor.reset()
|
sensor.reset()
|
||||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||||
sensor.set_framesize(
|
sensor.set_framesize(sensor.VGA)
|
||||||
sensor.VGA
|
|
||||||
) # we run out of memory if the resolution is much bigger...
|
|
||||||
# AprilTags works on a maximum of < 64K pixels.
|
# AprilTags works on a maximum of < 64K pixels.
|
||||||
if omv.board_type() == "H7":
|
if omv.board_type() == "H7":
|
||||||
sensor.set_windowing((240, 240))
|
sensor.set_windowing((240, 240))
|
||||||
|
|||||||
@ -9,9 +9,7 @@ import math
|
|||||||
|
|
||||||
sensor.reset()
|
sensor.reset()
|
||||||
sensor.set_pixformat(sensor.RGB565)
|
sensor.set_pixformat(sensor.RGB565)
|
||||||
sensor.set_framesize(
|
sensor.set_framesize(sensor.VGA)
|
||||||
sensor.VGA
|
|
||||||
) # we run out of memory if the resolution is much bigger...
|
|
||||||
sensor.set_windowing((160, 120)) # Look at center 160x120 pixels of the VGA resolution.
|
sensor.set_windowing((160, 120)) # Look at center 160x120 pixels of the VGA resolution.
|
||||||
sensor.skip_frames(time=2000)
|
sensor.skip_frames(time=2000)
|
||||||
sensor.set_auto_gain(False) # must turn this off to prevent image washout...
|
sensor.set_auto_gain(False) # must turn this off to prevent image washout...
|
||||||
|
|||||||
@ -6,17 +6,15 @@
|
|||||||
# P4 = TXD
|
# P4 = TXD
|
||||||
|
|
||||||
import math
|
import math
|
||||||
import pyb
|
|
||||||
import sensor
|
import sensor
|
||||||
import struct
|
import struct
|
||||||
import time
|
import time
|
||||||
|
import machine
|
||||||
|
|
||||||
# Parameters #################################################################
|
UART_BAUDRATE = 115200
|
||||||
|
|
||||||
uart_baudrate = 115200
|
|
||||||
|
|
||||||
MAV_system_id = 1
|
MAV_system_id = 1
|
||||||
MAV_component_id = 0x54
|
MAV_component_id = 0x54
|
||||||
|
packet_sequence = 0
|
||||||
|
|
||||||
lens_mm = 2.8 # Standard Lens.
|
lens_mm = 2.8 # Standard Lens.
|
||||||
lens_to_camera_mm = 22 # Standard Lens.
|
lens_to_camera_mm = 22 # Standard Lens.
|
||||||
@ -35,10 +33,8 @@ valid_tag_ids = {
|
|||||||
2: 165, # 8.5" x 11" tag black border size in mm
|
2: 165, # 8.5" x 11" tag black border size in mm
|
||||||
}
|
}
|
||||||
|
|
||||||
##############################################################################
|
|
||||||
|
|
||||||
# Camera Setup
|
# Camera Setup
|
||||||
|
|
||||||
sensor.reset()
|
sensor.reset()
|
||||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||||
sensor.set_framesize(sensor.QQVGA)
|
sensor.set_framesize(sensor.QQVGA)
|
||||||
@ -54,22 +50,17 @@ h_fov = 2 * math.atan((sensor_w_mm / 2) / lens_mm)
|
|||||||
v_fov = 2 * math.atan((sensor_h_mm / 2) / lens_mm)
|
v_fov = 2 * math.atan((sensor_h_mm / 2) / lens_mm)
|
||||||
|
|
||||||
|
|
||||||
def translation_to_mm(translation, tag_size): # translation is in decimeters...
|
def translation_to_mm(translation, tag_size):
|
||||||
|
# translation is in decimeters...
|
||||||
return ((translation * 100) * tag_size) / 210
|
return ((translation * 100) * tag_size) / 210
|
||||||
|
|
||||||
|
|
||||||
# Link Setup
|
# Link Setup
|
||||||
|
uart = machine.UART(3, UART_BAUDRATE, timeout_char=1000)
|
||||||
uart = pyb.UART(3, uart_baudrate, timeout_char=1000)
|
|
||||||
|
|
||||||
# Helper Stuff
|
|
||||||
|
|
||||||
packet_sequence = 0
|
|
||||||
|
|
||||||
|
|
||||||
def checksum(
|
# https://github.com/mavlink/c_library_v1/blob/master/checksum.h
|
||||||
data, extra
|
def checksum(data, extra):
|
||||||
): # https://github.com/mavlink/c_library_v1/blob/master/checksum.h
|
|
||||||
output = 0xFFFF
|
output = 0xFFFF
|
||||||
for i in range(len(data)):
|
for i in range(len(data)):
|
||||||
tmp = data[i] ^ (output & 0xFF)
|
tmp = data[i] ^ (output & 0xFF)
|
||||||
@ -123,8 +114,8 @@ def send_landing_target_packet(tag, dist_mm, w, h):
|
|||||||
|
|
||||||
|
|
||||||
# LED control
|
# LED control
|
||||||
led_success = pyb.LED(2) # Red LED = 1, Green LED = 2, Blue LED = 3, IR LEDs = 4.
|
led_success = machine.LED("LED_GREEN")
|
||||||
led_fail = pyb.LED(1)
|
led_fail = machine.LED("LED_RED")
|
||||||
led_counter = 0
|
led_counter = 0
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -5,25 +5,21 @@
|
|||||||
#
|
#
|
||||||
# P4 = TXD
|
# P4 = TXD
|
||||||
|
|
||||||
import pyb
|
|
||||||
import sensor
|
import sensor
|
||||||
import struct
|
import struct
|
||||||
import time
|
import time
|
||||||
|
import machine
|
||||||
|
|
||||||
# Parameters #################################################################
|
UART_BAUDRATE = 115200
|
||||||
|
|
||||||
uart_baudrate = 115200
|
|
||||||
|
|
||||||
MAV_system_id = 1
|
MAV_system_id = 1
|
||||||
MAV_component_id = 0x54
|
MAV_component_id = 0x54
|
||||||
MAV_OPTICAL_FLOW_confidence_threshold = (
|
packet_sequence = 0
|
||||||
0.1 # Below 0.1 or so (YMMV) and the results are just noise.
|
|
||||||
)
|
|
||||||
|
|
||||||
##############################################################################
|
# Below 0.1 or so (YMMV) and the results are just noise.
|
||||||
|
MAV_OPTICAL_FLOW_confidence_threshold = (0.1)
|
||||||
|
|
||||||
# LED control
|
# LED control
|
||||||
led = pyb.LED(2) # Red LED = 1, Green LED = 2, Blue LED = 3, IR LEDs = 4.
|
led = machine.LED("LED_BLUE")
|
||||||
led_state = 0
|
led_state = 0
|
||||||
|
|
||||||
|
|
||||||
@ -38,17 +34,11 @@ def update_led():
|
|||||||
|
|
||||||
|
|
||||||
# Link Setup
|
# Link Setup
|
||||||
|
uart = machine.UART(3, UART_BAUDRATE, timeout_char=1000)
|
||||||
uart = pyb.UART(3, uart_baudrate, timeout_char=1000)
|
|
||||||
|
|
||||||
# Helper Stuff
|
|
||||||
|
|
||||||
packet_sequence = 0
|
|
||||||
|
|
||||||
|
|
||||||
def checksum(
|
# https://github.com/mavlink/c_library_v1/blob/master/checksum.h
|
||||||
data, extra
|
def checksum(data, extra):
|
||||||
): # https://github.com/mavlink/c_library_v1/blob/master/checksum.h
|
|
||||||
output = 0xFFFF
|
output = 0xFFFF
|
||||||
for i in range(len(data)):
|
for i in range(len(data)):
|
||||||
tmp = data[i] ^ (output & 0xFF)
|
tmp = data[i] ^ (output & 0xFF)
|
||||||
|
|||||||
@ -1,13 +1,11 @@
|
|||||||
import sensor
|
import sensor
|
||||||
import time
|
import time
|
||||||
from pyb import UART
|
from machine import UART
|
||||||
from modbus import ModbusRTU
|
from modbus import ModbusRTU
|
||||||
|
|
||||||
sensor.reset()
|
sensor.reset()
|
||||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||||
sensor.set_framesize(
|
sensor.set_framesize(sensor.QQVGA)
|
||||||
sensor.QQVGA
|
|
||||||
) # we run out of memory if the resolution is much bigger...
|
|
||||||
|
|
||||||
uart = UART(3, 115200, parity=None, stop=2, timeout=1, timeout_char=4)
|
uart = UART(3, 115200, parity=None, stop=2, timeout=1, timeout_char=4)
|
||||||
modbus = ModbusRTU(uart, register_num=9999)
|
modbus = ModbusRTU(uart, register_num=9999)
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
import time
|
import time
|
||||||
from pyb import UART
|
from machine import UART
|
||||||
from modbus import ModbusRTU
|
from modbus import ModbusRTU
|
||||||
|
|
||||||
uart = UART(3, 115200, parity=None, stop=2, timeout=1, timeout_char=4)
|
uart = UART(3, 115200, parity=None, stop=2, timeout=1, timeout_char=4)
|
||||||
|
|||||||
@ -1,14 +0,0 @@
|
|||||||
# Blinky example
|
|
||||||
|
|
||||||
import time
|
|
||||||
from machine import Pin
|
|
||||||
|
|
||||||
# This is the only LED pin available on the Nano RP2040,
|
|
||||||
# other than the RGB LED connected to Nina WiFi module.
|
|
||||||
led = Pin("LED_BLUE", Pin.OUT)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
led.on()
|
|
||||||
time.sleep_ms(250)
|
|
||||||
led.off()
|
|
||||||
time.sleep_ms(250)
|
|
||||||
@ -2,12 +2,11 @@
|
|||||||
# This example demonstrates the low-power deep sleep mode plus sensor shutdown.
|
# This example demonstrates the low-power deep sleep mode plus sensor shutdown.
|
||||||
# Note the camera will reset after wake-up from deep sleep. To find out if the cause of reset
|
# Note the camera will reset after wake-up from deep sleep. To find out if the cause of reset
|
||||||
# is deep sleep, call the machine.reset_cause() function and test for machine.DEEPSLEEP_RESET
|
# is deep sleep, call the machine.reset_cause() function and test for machine.DEEPSLEEP_RESET
|
||||||
import pyb
|
|
||||||
import machine
|
import machine
|
||||||
import sensor
|
import sensor
|
||||||
|
|
||||||
# Create and init RTC object.
|
# Create and init RTC object.
|
||||||
rtc = pyb.RTC()
|
rtc = machine.RTC()
|
||||||
|
|
||||||
# (year, month, day[, hour[, minute[, second[, microsecond[, tzinfo]]]]])
|
# (year, month, day[, hour[, minute[, second[, microsecond[, tzinfo]]]]])
|
||||||
rtc.datetime((2014, 5, 1, 4, 13, 0, 0, 0))
|
rtc.datetime((2014, 5, 1, 4, 13, 0, 0, 0))
|
||||||
|
|||||||
@ -1,28 +0,0 @@
|
|||||||
# Blinky example
|
|
||||||
|
|
||||||
import time
|
|
||||||
from board import LED
|
|
||||||
|
|
||||||
led_red = LED(1)
|
|
||||||
led_green = LED(2)
|
|
||||||
led_blue = LED(3)
|
|
||||||
led_yellow = LED(4)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
led_blue.on()
|
|
||||||
time.sleep_ms(250)
|
|
||||||
led_blue.off()
|
|
||||||
|
|
||||||
led_red.on()
|
|
||||||
time.sleep_ms(250)
|
|
||||||
led_red.off()
|
|
||||||
|
|
||||||
led_green.on()
|
|
||||||
time.sleep_ms(250)
|
|
||||||
led_green.off()
|
|
||||||
|
|
||||||
led_yellow.on()
|
|
||||||
time.sleep_ms(250)
|
|
||||||
led_yellow.off()
|
|
||||||
|
|
||||||
time.sleep_ms(500)
|
|
||||||
@ -1,14 +0,0 @@
|
|||||||
# Blinky example
|
|
||||||
|
|
||||||
import time
|
|
||||||
from machine import Pin
|
|
||||||
|
|
||||||
# This is the only LED pin available on the Nano RP2040,
|
|
||||||
# other than the RGB LED connected to Nina WiFi module.
|
|
||||||
led = Pin(6, Pin.OUT)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
led.on()
|
|
||||||
time.sleep_ms(250)
|
|
||||||
led.off()
|
|
||||||
time.sleep_ms(250)
|
|
||||||
@ -1,14 +0,0 @@
|
|||||||
# Blinky example
|
|
||||||
|
|
||||||
import time
|
|
||||||
from machine import Pin
|
|
||||||
|
|
||||||
# This is the only LED pin available on the Nano RP2040,
|
|
||||||
# other than the RGB LED connected to Nina WiFi module.
|
|
||||||
led = Pin("LED_BLUE", Pin.OUT)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
led.on()
|
|
||||||
time.sleep_ms(250)
|
|
||||||
led.off()
|
|
||||||
time.sleep_ms(250)
|
|
||||||
@ -2,12 +2,11 @@
|
|||||||
# This example demonstrates the low-power deep sleep mode plus sensor shutdown.
|
# This example demonstrates the low-power deep sleep mode plus sensor shutdown.
|
||||||
# Note the camera will reset after wake-up from deep sleep. To find out if the cause of reset
|
# Note the camera will reset after wake-up from deep sleep. To find out if the cause of reset
|
||||||
# is deep sleep, call the machine.reset_cause() function and test for machine.DEEPSLEEP_RESET
|
# is deep sleep, call the machine.reset_cause() function and test for machine.DEEPSLEEP_RESET
|
||||||
import pyb
|
|
||||||
import machine
|
import machine
|
||||||
import sensor
|
import sensor
|
||||||
|
|
||||||
# Create and init RTC object.
|
# Create and init RTC object.
|
||||||
rtc = pyb.RTC()
|
rtc = machine.RTC()
|
||||||
|
|
||||||
# (year, month, day[, hour[, minute[, second[, microsecond[, tzinfo]]]]])
|
# (year, month, day[, hour[, minute[, second[, microsecond[, tzinfo]]]]])
|
||||||
rtc.datetime((2014, 5, 1, 4, 13, 0, 0, 0))
|
rtc.datetime((2014, 5, 1, 4, 13, 0, 0, 0))
|
||||||
|
|||||||
@ -1,14 +0,0 @@
|
|||||||
# Blinky example
|
|
||||||
|
|
||||||
import time
|
|
||||||
from machine import Pin
|
|
||||||
|
|
||||||
# This is the only LED pin available on the Nano RP2040,
|
|
||||||
# other than the RGB LED connected to Nina WiFi module.
|
|
||||||
led = Pin("LED_BLUE", Pin.OUT)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
led.on()
|
|
||||||
time.sleep_ms(250)
|
|
||||||
led.off()
|
|
||||||
time.sleep_ms(250)
|
|
||||||
@ -2,12 +2,11 @@
|
|||||||
# This example demonstrates the low-power deep sleep mode plus sensor shutdown.
|
# This example demonstrates the low-power deep sleep mode plus sensor shutdown.
|
||||||
# Note the camera will reset after wake-up from deep sleep. To find out if the cause of reset
|
# Note the camera will reset after wake-up from deep sleep. To find out if the cause of reset
|
||||||
# is deep sleep, call the machine.reset_cause() function and test for machine.DEEPSLEEP_RESET
|
# is deep sleep, call the machine.reset_cause() function and test for machine.DEEPSLEEP_RESET
|
||||||
import pyb
|
|
||||||
import machine
|
import machine
|
||||||
import sensor
|
import sensor
|
||||||
|
|
||||||
# Create and init RTC object.
|
# Create and init RTC object.
|
||||||
rtc = pyb.RTC()
|
rtc = machine.RTC()
|
||||||
|
|
||||||
# (year, month, day[, hour[, minute[, second[, microsecond[, tzinfo]]]]])
|
# (year, month, day[, hour[, minute[, second[, microsecond[, tzinfo]]]]])
|
||||||
rtc.datetime((2014, 5, 1, 4, 13, 0, 0, 0))
|
rtc.datetime((2014, 5, 1, 4, 13, 0, 0, 0))
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user