mirror of
https://github.com/openmv/openmv.git
synced 2025-11-04 14:49:50 +08:00
Merge pull request #1893 from openmv/format_python_examples
examples: Format all examples.
This commit is contained in:
commit
283c93485e
12
.github/workflows/python-linter.yml
vendored
12
.github/workflows/python-linter.yml
vendored
@ -12,11 +12,11 @@ on:
|
||||
branches:
|
||||
- 'master'
|
||||
paths:
|
||||
- 'scripts/examples/*.py'
|
||||
- 'scripts/libraries/*.py'
|
||||
- 'scripts/examples/**.py'
|
||||
- 'scripts/libraries/**.py'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
formatting-check:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@ -35,12 +35,12 @@ jobs:
|
||||
|
||||
- name: '🛠 Install dependencies'
|
||||
run: |
|
||||
pip install -r requirements.txt
|
||||
pip install -r .github/workflows/requirements.txt
|
||||
flake8 --version
|
||||
pytest --version
|
||||
|
||||
- name: '😾 Lint with flake8'
|
||||
run: |
|
||||
# stop the build if there are Python syntax errors or undefined names
|
||||
flake8 --count --select=E9,F63,F7,F82 --ignore=F821 --show-source --statistics scripts/libraries/
|
||||
flake8 --count --max-complexity=15 --max-line-length=120 --ignore=F821,E722,E741,C901,E713,W605,E203,W503,F841,F403,F405 --statistics scripts/libraries/
|
||||
flake8 --count --select=E9,F63,F7,F82 --ignore=F821 --show-source --statistics scripts/libraries/ scripts/examples/
|
||||
flake8 --count --max-complexity=15 --max-line-length=120 --ignore=F821,E722,E741,C901,E713,W605,E203,W503,F841,F403,F405 --statistics scripts/libraries/ scripts/examples/
|
||||
|
||||
4
.github/workflows/requirements.txt
vendored
4
.github/workflows/requirements.txt
vendored
@ -1,2 +1,2 @@
|
||||
flake8
|
||||
pytest
|
||||
flake8==6.0.0
|
||||
pytest==7.4.0
|
||||
|
||||
@ -2,16 +2,17 @@
|
||||
#
|
||||
# Welcome to the OpenMV IDE! Click on the green run arrow button below to run the script!
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.skip_frames(time = 2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Update the FPS clock.
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
|
||||
# to the IDE. The FPS should increase once disconnected.
|
||||
while True:
|
||||
clock.tick() # Update the FPS clock.
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
|
||||
# to the IDE. The FPS should increase once disconnected.
|
||||
|
||||
@ -4,18 +4,19 @@
|
||||
#
|
||||
# You can use your OpenMV Cam to save modified image files.
|
||||
|
||||
import sensor, image, pyb
|
||||
import sensor
|
||||
import pyb
|
||||
|
||||
RED_LED_PIN = 1
|
||||
BLUE_LED_PIN = 3
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
|
||||
pyb.LED(RED_LED_PIN).on()
|
||||
sensor.skip_frames(time = 2000) # Give the user time to get ready.
|
||||
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
||||
|
||||
pyb.LED(RED_LED_PIN).off()
|
||||
pyb.LED(BLUE_LED_PIN).on()
|
||||
@ -23,11 +24,9 @@ pyb.LED(BLUE_LED_PIN).on()
|
||||
print("You're on camera!")
|
||||
img = sensor.snapshot()
|
||||
|
||||
img.morph(1, [+2, +1, +0,\
|
||||
+1, +1, -1,\
|
||||
+0, -1, -2]) # Emboss the image.
|
||||
img.morph(1, [+2, +1, +0, +1, +1, -1, +0, -1, -2]) # Emboss the image.
|
||||
|
||||
img.save("example.jpg") # or "example.bmp" (or others)
|
||||
img.save("example.jpg") # or "example.bmp" (or others)
|
||||
|
||||
pyb.LED(BLUE_LED_PIN).off()
|
||||
print("Done! Reset the camera to see the saved image.")
|
||||
|
||||
@ -4,24 +4,25 @@
|
||||
#
|
||||
# You can use your OpenMV Cam to save image files.
|
||||
|
||||
import sensor, image, pyb
|
||||
import sensor
|
||||
import pyb
|
||||
|
||||
RED_LED_PIN = 1
|
||||
BLUE_LED_PIN = 3
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
|
||||
pyb.LED(RED_LED_PIN).on()
|
||||
sensor.skip_frames(time = 2000) # Give the user time to get ready.
|
||||
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
||||
|
||||
pyb.LED(RED_LED_PIN).off()
|
||||
pyb.LED(BLUE_LED_PIN).on()
|
||||
|
||||
print("You're on camera!")
|
||||
sensor.snapshot().save("example.jpg") # or "example.bmp" (or others)
|
||||
sensor.snapshot().save("example.jpg") # or "example.bmp" (or others)
|
||||
|
||||
pyb.LED(BLUE_LED_PIN).off()
|
||||
print("Done! Reset the camera to see the saved image.")
|
||||
|
||||
@ -5,15 +5,17 @@
|
||||
# This example demonstrates using face tracking on your OpenMV Cam to take a
|
||||
# picture.
|
||||
|
||||
import sensor, image, pyb
|
||||
import sensor
|
||||
import image
|
||||
import pyb
|
||||
|
||||
RED_LED_PIN = 1
|
||||
BLUE_LED_PIN = 3
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
sensor.set_framesize(sensor.HQVGA) # or sensor.QQVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.HQVGA) # Set frame size to HQVGA
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
|
||||
# Load up a face detection HaarCascade. This is object that your OpenMV Cam
|
||||
# can use to detect faces using the find_features() method below. Your OpenMV
|
||||
@ -23,18 +25,17 @@ sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
# stages.
|
||||
face_cascade = image.HaarCascade("frontalface", stages=25)
|
||||
|
||||
while(True):
|
||||
|
||||
while True:
|
||||
pyb.LED(RED_LED_PIN).on()
|
||||
print("About to start detecting faces...")
|
||||
sensor.skip_frames(time = 2000) # Give the user time to get ready.
|
||||
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
||||
|
||||
pyb.LED(RED_LED_PIN).off()
|
||||
print("Now detecting faces!")
|
||||
pyb.LED(BLUE_LED_PIN).on()
|
||||
|
||||
diff = 10 # We'll say we detected a face after 10 frames.
|
||||
while(diff):
|
||||
diff = 10 # We'll say we detected a face after 10 frames.
|
||||
while diff:
|
||||
img = sensor.snapshot()
|
||||
# Threshold can be between 0.0 and 1.0. A higher threshold results in a
|
||||
# higher detection rate with more false positives. The scale value
|
||||
@ -48,4 +49,4 @@ while(True):
|
||||
|
||||
pyb.LED(BLUE_LED_PIN).off()
|
||||
print("Face detected! Saving image...")
|
||||
sensor.snapshot().save("snapshot-%d.jpg" % pyb.rng()) # Save Pic.
|
||||
sensor.snapshot().save("snapshot-%d.jpg" % pyb.rng()) # Save Pic.
|
||||
|
||||
@ -5,41 +5,43 @@
|
||||
# This example demonstrates using frame differencing with your OpenMV Cam to do
|
||||
# motion detection. After motion is detected your OpenMV Cam will take picture.
|
||||
|
||||
import sensor, image, pyb, os
|
||||
import sensor
|
||||
import pyb
|
||||
import os
|
||||
|
||||
RED_LED_PIN = 1
|
||||
BLUE_LED_PIN = 3
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
sensor.set_auto_whitebal(False) # Turn off white balance.
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
sensor.set_auto_whitebal(False) # Turn off white balance.
|
||||
|
||||
if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory
|
||||
|
||||
while(True):
|
||||
if not "temp" in os.listdir():
|
||||
os.mkdir("temp") # Make a temp directory
|
||||
|
||||
while True:
|
||||
pyb.LED(RED_LED_PIN).on()
|
||||
print("About to save background image...")
|
||||
sensor.skip_frames(time = 2000) # Give the user time to get ready.
|
||||
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
||||
|
||||
pyb.LED(RED_LED_PIN).off()
|
||||
sensor.snapshot().save("temp/bg.bmp")
|
||||
print("Saved background image - Now detecting motion!")
|
||||
pyb.LED(BLUE_LED_PIN).on()
|
||||
|
||||
diff = 10 # We'll say we detected motion after 10 frames of motion.
|
||||
while(diff):
|
||||
diff = 10 # We'll say we detected motion after 10 frames of motion.
|
||||
while diff:
|
||||
img = sensor.snapshot()
|
||||
img.difference("temp/bg.bmp")
|
||||
stats = img.statistics()
|
||||
# Stats 5 is the max of the lighting color channel. The below code
|
||||
# triggers when the lighting max for the whole image goes above 20.
|
||||
# The lighting difference maximum should be zero normally.
|
||||
if (stats[5] > 20):
|
||||
if stats[5] > 20:
|
||||
diff -= 1
|
||||
|
||||
pyb.LED(BLUE_LED_PIN).off()
|
||||
print("Movement detected! Saving image...")
|
||||
sensor.snapshot().save("temp/snapshot-%d.jpg" % pyb.rng()) # Save Pic.
|
||||
sensor.snapshot().save("temp/snapshot-%d.jpg" % pyb.rng()) # Save Pic.
|
||||
|
||||
@ -8,7 +8,10 @@
|
||||
# pictures it will run the bootloader each time. Please power the camera
|
||||
# from something other than USB to not have the bootloader run.
|
||||
|
||||
import pyb, machine, sensor, image, pyb, os
|
||||
import pyb
|
||||
import machine
|
||||
import sensor
|
||||
import os
|
||||
|
||||
# Create and init RTC object. This will allow us to set the current time for
|
||||
# the RTC and let us set an interrupt to wake up later on.
|
||||
@ -16,51 +19,97 @@ rtc = pyb.RTC()
|
||||
newFile = False
|
||||
|
||||
try:
|
||||
os.stat('time.txt')
|
||||
except OSError: # If the log file doesn't exist then set the RTC and set newFile to True
|
||||
# datetime format: year, month, day, weekday (Monday=1, Sunday=7),
|
||||
# hours (24 hour clock), minutes, seconds, subseconds (counds down from 255 to 0)
|
||||
rtc.datetime((2018, 3, 9, 5, 13, 0, 0, 0))
|
||||
newFile = True
|
||||
os.stat("time.txt")
|
||||
except (
|
||||
OSError
|
||||
): # If the log file doesn't exist then set the RTC and set newFile to True
|
||||
# datetime format: year, month, day, weekday (Monday=1, Sunday=7),
|
||||
# hours (24 hour clock), minutes, seconds, subseconds (counds down from 255 to 0)
|
||||
rtc.datetime((2018, 3, 9, 5, 13, 0, 0, 0))
|
||||
newFile = True
|
||||
|
||||
# Extract the date and time from the RTC object.
|
||||
dateTime = rtc.datetime()
|
||||
year = str(dateTime[0])
|
||||
month = '%02d' % dateTime[1]
|
||||
day = '%02d' % dateTime[2]
|
||||
hour = '%02d' % dateTime[4]
|
||||
minute = '%02d' % dateTime[5]
|
||||
second = '%02d' % dateTime[6]
|
||||
month = "%02d" % dateTime[1]
|
||||
day = "%02d" % dateTime[2]
|
||||
hour = "%02d" % dateTime[4]
|
||||
minute = "%02d" % dateTime[5]
|
||||
second = "%02d" % dateTime[6]
|
||||
subSecond = str(dateTime[7])
|
||||
|
||||
newName='I'+year+month+day+hour+minute+second # Image file name based on RTC
|
||||
newName = (
|
||||
"I" + year + month + day + hour + minute + second
|
||||
) # Image file name based on RTC
|
||||
|
||||
# Enable RTC interrupts every 10 seconds, camera will RESET after wakeup from deepsleep Mode.
|
||||
rtc.wakeup(10000)
|
||||
|
||||
BLUE_LED_PIN = 3
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
sensor.set_framesize(sensor.VGA)
|
||||
sensor.skip_frames(time = 1000) # Let new settings take affect.
|
||||
sensor.skip_frames(time=1000) # Let new settings take affect.
|
||||
|
||||
# Let folks know we are about to take a picture.
|
||||
pyb.LED(BLUE_LED_PIN).on()
|
||||
|
||||
if(newFile): # If log file does not exist then create it.
|
||||
with open('time.txt', 'a') as timeFile: # Write text file to keep track of date, time and image number.
|
||||
timeFile.write('Date and time format: year, month, day, hours, minutes, seconds, subseconds' + '\n')
|
||||
timeFile.write(newName + ',' + year + ',' + month + ',' + day + ',' + hour + ',' + minute + ',' + second + ',' + subSecond + '\n')
|
||||
if newFile: # If log file does not exist then create it.
|
||||
with open(
|
||||
"time.txt", "a"
|
||||
) as timeFile: # Write text file to keep track of date, time and image number.
|
||||
timeFile.write(
|
||||
"Date and time format: year, month, day, hours, minutes, seconds, subseconds"
|
||||
+ "\n"
|
||||
)
|
||||
timeFile.write(
|
||||
newName
|
||||
+ ","
|
||||
+ year
|
||||
+ ","
|
||||
+ month
|
||||
+ ","
|
||||
+ day
|
||||
+ ","
|
||||
+ hour
|
||||
+ ","
|
||||
+ minute
|
||||
+ ","
|
||||
+ second
|
||||
+ ","
|
||||
+ subSecond
|
||||
+ "\n"
|
||||
)
|
||||
else:
|
||||
with open('time.txt', 'a') as timeFile: # Append to date, time and image number to text file.
|
||||
timeFile.write(newName + ',' + year + ',' + month + ',' + day + ',' + hour + ',' + minute + ',' + second + ',' + subSecond + '\n')
|
||||
with open(
|
||||
"time.txt", "a"
|
||||
) as timeFile: # Append to date, time and image number to text file.
|
||||
timeFile.write(
|
||||
newName
|
||||
+ ","
|
||||
+ year
|
||||
+ ","
|
||||
+ month
|
||||
+ ","
|
||||
+ day
|
||||
+ ","
|
||||
+ hour
|
||||
+ ","
|
||||
+ minute
|
||||
+ ","
|
||||
+ second
|
||||
+ ","
|
||||
+ subSecond
|
||||
+ "\n"
|
||||
)
|
||||
|
||||
if not "images" in os.listdir(): os.mkdir("images") # Make a temp directory
|
||||
if not "images" in os.listdir():
|
||||
os.mkdir("images") # Make a temp directory
|
||||
|
||||
# Take photo and save to SD card
|
||||
img = sensor.snapshot()
|
||||
img.save('images/' + newName, quality=90)
|
||||
img.save("images/" + newName, quality=90)
|
||||
pyb.LED(BLUE_LED_PIN).off()
|
||||
|
||||
# Enter Deepsleep Mode (i.e. the OpenMV Cam effectively turns itself off except for the RTC).
|
||||
|
||||
@ -6,19 +6,22 @@
|
||||
# recorder object RGB565 frames or Grayscale frames. Use photo editing software
|
||||
# like GIMP to compress and optimize the Gif before uploading it to the web.
|
||||
|
||||
import sensor, image, time, gif, pyb
|
||||
import sensor
|
||||
import time
|
||||
import gif
|
||||
import pyb
|
||||
|
||||
RED_LED_PIN = 1
|
||||
BLUE_LED_PIN = 3
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120)
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
pyb.LED(RED_LED_PIN).on()
|
||||
sensor.skip_frames(time = 2000) # Give the user time to get ready.
|
||||
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
||||
|
||||
pyb.LED(RED_LED_PIN).off()
|
||||
pyb.LED(BLUE_LED_PIN).on()
|
||||
@ -29,7 +32,7 @@ print("You're on camera!")
|
||||
for i in range(100):
|
||||
clock.tick()
|
||||
# clock.avg() returns the milliseconds between frames - gif delay is in
|
||||
g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds.
|
||||
g.add_frame(sensor.snapshot(), delay=int(clock.avg() / 10)) # centiseconds.
|
||||
print(clock.fps())
|
||||
|
||||
g.close()
|
||||
|
||||
@ -9,15 +9,19 @@
|
||||
# This example demonstrates using face tracking on your OpenMV Cam to take a
|
||||
# gif.
|
||||
|
||||
import sensor, image, time, gif, pyb
|
||||
import sensor
|
||||
import image
|
||||
import time
|
||||
import gif
|
||||
import pyb
|
||||
|
||||
RED_LED_PIN = 1
|
||||
BLUE_LED_PIN = 3
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.HQVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120)
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
|
||||
# Load up a face detection HaarCascade. This is object that your OpenMV Cam
|
||||
# can use to detect faces using the find_features() method below. Your OpenMV
|
||||
@ -27,18 +31,17 @@ sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
# stages.
|
||||
face_cascade = image.HaarCascade("frontalface", stages=25)
|
||||
|
||||
while(True):
|
||||
|
||||
while True:
|
||||
pyb.LED(RED_LED_PIN).on()
|
||||
print("About to start detecting faces...")
|
||||
sensor.skip_frames(time = 2000) # Give the user time to get ready.
|
||||
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
||||
|
||||
pyb.LED(RED_LED_PIN).off()
|
||||
print("Now detecting faces!")
|
||||
pyb.LED(BLUE_LED_PIN).on()
|
||||
|
||||
diff = 10 # We'll say we detected a face after 10 frames.
|
||||
while(diff):
|
||||
diff = 10 # We'll say we detected a face after 10 frames.
|
||||
while diff:
|
||||
img = sensor.snapshot()
|
||||
# Threshold can be between 0.0 and 1.0. A higher threshold results in a
|
||||
# higher detection rate with more false positives. The scale value
|
||||
@ -52,12 +55,12 @@ while(True):
|
||||
|
||||
g = gif.Gif("example-%d.gif" % pyb.rng(), loop=True)
|
||||
|
||||
clock = time.clock() # Tracks FPS.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
print("You're on camera!")
|
||||
for i in range(100):
|
||||
clock.tick()
|
||||
# clock.avg() returns the milliseconds between frames - gif delay is in
|
||||
g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds.
|
||||
g.add_frame(sensor.snapshot(), delay=int(clock.avg() / 10)) # centiseconds.
|
||||
print(clock.fps())
|
||||
|
||||
g.close()
|
||||
|
||||
@ -9,49 +9,53 @@
|
||||
# This example demonstrates using frame differencing with your OpenMV Cam to do
|
||||
# motion detection. After motion is detected your OpenMV Cam will take video.
|
||||
|
||||
import sensor, image, time, gif, pyb, os
|
||||
import sensor
|
||||
import time
|
||||
import gif
|
||||
import pyb
|
||||
import os
|
||||
|
||||
RED_LED_PIN = 1
|
||||
BLUE_LED_PIN = 3
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
sensor.set_auto_whitebal(False) # Turn off white balance.
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120)
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
sensor.set_auto_whitebal(False) # Turn off white balance.
|
||||
|
||||
if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory
|
||||
|
||||
while(True):
|
||||
if not "temp" in os.listdir():
|
||||
os.mkdir("temp") # Make a temp directory
|
||||
|
||||
while True:
|
||||
pyb.LED(RED_LED_PIN).on()
|
||||
print("About to save background image...")
|
||||
sensor.skip_frames(time = 2000) # Give the user time to get ready.
|
||||
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
||||
|
||||
pyb.LED(RED_LED_PIN).off()
|
||||
sensor.snapshot().save("temp/bg.bmp")
|
||||
print("Saved background image - Now detecting motion!")
|
||||
pyb.LED(BLUE_LED_PIN).on()
|
||||
|
||||
diff = 10 # We'll say we detected motion after 10 frames of motion.
|
||||
while(diff):
|
||||
diff = 10 # We'll say we detected motion after 10 frames of motion.
|
||||
while diff:
|
||||
img = sensor.snapshot()
|
||||
img.difference("temp/bg.bmp")
|
||||
stats = img.statistics()
|
||||
# Stats 5 is the max of the lighting color channel. The below code
|
||||
# triggers when the lighting max for the whole image goes above 20.
|
||||
# The lighting difference maximum should be zero normally.
|
||||
if (stats[5] > 20):
|
||||
if stats[5] > 20:
|
||||
diff -= 1
|
||||
|
||||
g = gif.Gif("example-%d.gif" % pyb.rng(), loop=True)
|
||||
|
||||
clock = time.clock() # Tracks FPS.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
print("You're on camera!")
|
||||
for i in range(100):
|
||||
clock.tick()
|
||||
# clock.avg() returns the milliseconds between frames - gif delay is in
|
||||
g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds.
|
||||
g.add_frame(sensor.snapshot(), delay=int(clock.avg() / 10)) # centiseconds.
|
||||
print(clock.fps())
|
||||
|
||||
g.close()
|
||||
|
||||
@ -2,18 +2,20 @@
|
||||
#
|
||||
# This example shows how to use the ImageIO stream to record frames in memory and play them back.
|
||||
# Note: While this should work on any board, the board should have an SDRAM to be of any use.
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import image
|
||||
import time
|
||||
|
||||
# Number of frames to pre-allocate and record
|
||||
N_FRAMES = 500
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
|
||||
# This frame size must match the image size passed to ImageIO
|
||||
sensor.set_windowing((120, 120))
|
||||
sensor.skip_frames(time = 2000)
|
||||
sensor.skip_frames(time=2000)
|
||||
|
||||
clock = time.clock()
|
||||
|
||||
@ -25,7 +27,7 @@ for i in range(0, N_FRAMES):
|
||||
stream.write(sensor.snapshot())
|
||||
print(clock.fps())
|
||||
|
||||
while (True):
|
||||
while True:
|
||||
# Rewind stream and play back
|
||||
stream.seek(0)
|
||||
for i in range(0, N_FRAMES):
|
||||
|
||||
@ -5,27 +5,29 @@
|
||||
# This example shows how to use the Image Reader object to replay snapshots of what your
|
||||
# OpenMV Cam saw saved by the Image Writer object for testing machine vision algorithms.
|
||||
|
||||
# Altered to allow full speed reading from SD card for extraction of sequences to the network etc.
|
||||
# Altered to allow full speed reading from SD card for extraction of sequences to the network etc.
|
||||
# Set the new pause parameter to false
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import image
|
||||
import time
|
||||
|
||||
snapshot_source = False # Set to true once finished to pull data from sensor.
|
||||
snapshot_source = False # Set to true once finished to pull data from sensor.
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QQVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120)
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
stream = None
|
||||
if snapshot_source == False:
|
||||
if snapshot_source is False:
|
||||
stream = image.ImageIO("/stream.bin", "r")
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
if snapshot_source:
|
||||
img = sensor.snapshot()
|
||||
img = sensor.snapshot()
|
||||
else:
|
||||
img = stream.read(copy_to_fb=True, loop=True, pause=True)
|
||||
# Do machine vision algorithms on the image here.
|
||||
|
||||
@ -6,15 +6,18 @@
|
||||
# OpenMV Cam sees for later analysis using the Image Reader object. Images written to disk
|
||||
# by the Image Writer object are stored in a simple file format readable by your OpenMV Cam.
|
||||
|
||||
import sensor, image, pyb, time
|
||||
import sensor
|
||||
import image
|
||||
import pyb
|
||||
import time
|
||||
|
||||
record_time = 10000 # 10 seconds in milliseconds
|
||||
record_time = 10000 # 10 seconds in milliseconds
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QQVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120)
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
stream = image.ImageIO("/stream.bin", "w")
|
||||
|
||||
|
||||
@ -7,19 +7,22 @@
|
||||
# recording a Mjpeg file you can use VLC to play it. If you are on Ubuntu then
|
||||
# the built-in video player will work too.
|
||||
|
||||
import sensor, image, time, mjpeg, pyb
|
||||
import sensor
|
||||
import time
|
||||
import mjpeg
|
||||
import pyb
|
||||
|
||||
RED_LED_PIN = 1
|
||||
BLUE_LED_PIN = 3
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
pyb.LED(RED_LED_PIN).on()
|
||||
sensor.skip_frames(time = 2000) # Give the user time to get ready.
|
||||
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
||||
|
||||
pyb.LED(RED_LED_PIN).off()
|
||||
pyb.LED(BLUE_LED_PIN).on()
|
||||
|
||||
@ -10,15 +10,19 @@
|
||||
# This example demonstrates using face tracking on your OpenMV Cam to take a
|
||||
# mjpeg.
|
||||
|
||||
import sensor, image, time, mjpeg, pyb
|
||||
import sensor
|
||||
import image
|
||||
import time
|
||||
import mjpeg
|
||||
import pyb
|
||||
|
||||
RED_LED_PIN = 1
|
||||
BLUE_LED_PIN = 3
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.HQVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120)
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
|
||||
# Load up a face detection HaarCascade. This is object that your OpenMV Cam
|
||||
# can use to detect faces using the find_features() method below. Your OpenMV
|
||||
@ -28,18 +32,17 @@ sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
# stages.
|
||||
face_cascade = image.HaarCascade("frontalface", stages=25)
|
||||
|
||||
while(True):
|
||||
|
||||
while True:
|
||||
pyb.LED(RED_LED_PIN).on()
|
||||
print("About to start detecting faces...")
|
||||
sensor.skip_frames(time = 2000) # Give the user time to get ready.
|
||||
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
||||
|
||||
pyb.LED(RED_LED_PIN).off()
|
||||
print("Now detecting faces!")
|
||||
pyb.LED(BLUE_LED_PIN).on()
|
||||
|
||||
diff = 10 # We'll say we detected a face after 10 frames.
|
||||
while(diff):
|
||||
diff = 10 # We'll say we detected a face after 10 frames.
|
||||
while diff:
|
||||
img = sensor.snapshot()
|
||||
# Threshold can be between 0.0 and 1.0. A higher threshold results in a
|
||||
# higher detection rate with more false positives. The scale value
|
||||
@ -53,7 +56,7 @@ while(True):
|
||||
|
||||
m = mjpeg.Mjpeg("example-%d.mjpeg" % pyb.rng())
|
||||
|
||||
clock = time.clock() # Tracks FPS.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
print("You're on camera!")
|
||||
for i in range(200):
|
||||
clock.tick()
|
||||
|
||||
@ -10,44 +10,48 @@
|
||||
# This example demonstrates using frame differencing with your OpenMV Cam to do
|
||||
# motion detection. After motion is detected your OpenMV Cam will take video.
|
||||
|
||||
import sensor, image, time, mjpeg, pyb, os
|
||||
import sensor
|
||||
import time
|
||||
import mjpeg
|
||||
import pyb
|
||||
import os
|
||||
|
||||
RED_LED_PIN = 1
|
||||
BLUE_LED_PIN = 3
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
sensor.set_auto_whitebal(False) # Turn off white balance.
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
sensor.set_auto_whitebal(False) # Turn off white balance.
|
||||
|
||||
if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory
|
||||
|
||||
while(True):
|
||||
if not "temp" in os.listdir():
|
||||
os.mkdir("temp") # Make a temp directory
|
||||
|
||||
while True:
|
||||
pyb.LED(RED_LED_PIN).on()
|
||||
print("About to save background image...")
|
||||
sensor.skip_frames(time = 2000) # Give the user time to get ready.
|
||||
sensor.skip_frames(time=2000) # Give the user time to get ready.
|
||||
|
||||
pyb.LED(RED_LED_PIN).off()
|
||||
sensor.snapshot().save("temp/bg.bmp")
|
||||
print("Saved background image - Now detecting motion!")
|
||||
pyb.LED(BLUE_LED_PIN).on()
|
||||
|
||||
diff = 10 # We'll say we detected motion after 10 frames of motion.
|
||||
while(diff):
|
||||
diff = 10 # We'll say we detected motion after 10 frames of motion.
|
||||
while diff:
|
||||
img = sensor.snapshot()
|
||||
img.difference("temp/bg.bmp")
|
||||
stats = img.statistics()
|
||||
# Stats 5 is the max of the lighting color channel. The below code
|
||||
# triggers when the lighting max for the whole image goes above 20.
|
||||
# The lighting difference maximum should be zero normally.
|
||||
if (stats[5] > 20):
|
||||
if stats[5] > 20:
|
||||
diff -= 1
|
||||
|
||||
m = mjpeg.Mjpeg("example-%d.mjpeg" % pyb.rng())
|
||||
|
||||
clock = time.clock() # Tracks FPS.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
print("You're on camera!")
|
||||
for i in range(200):
|
||||
clock.tick()
|
||||
|
||||
@ -4,30 +4,32 @@
|
||||
# rotation/scale by comparing the current and a previous
|
||||
# image against each other. Note that only rotation/scale is
|
||||
# handled - not X and Y translation in this mode.
|
||||
|
||||
#
|
||||
# To run this demo effectively please mount your OpenMV Cam on a steady
|
||||
# base and SLOWLY rotate the camera around the lens and move the camera
|
||||
# forward/backwards to see the numbers change.
|
||||
# I.e. Z direction changes only.
|
||||
|
||||
import sensor, image, time, math
|
||||
|
||||
# NOTE!!! You have to use a small power of 2 resolution when using
|
||||
#
|
||||
# NOTE You have to use a small power of 2 resolution when using
|
||||
# find_displacement(). This is because the algorithm is powered by
|
||||
# something called phase correlation which does the image comparison
|
||||
# using FFTs. A non-power of 2 resolution requires padding to a power
|
||||
# of 2 which reduces the usefulness of the algorithm results. Please
|
||||
# use a resolution like B64X64 or B64X32 (2x faster).
|
||||
|
||||
#
|
||||
# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64,
|
||||
# 128x64, and 128x128. If you want a resolution of 32x32 you can create
|
||||
# it by doing "img.pool(2, 2)" on a 64x64 image.
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)...
|
||||
sensor.skip_frames(time = 2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
import sensor
|
||||
import time
|
||||
import math
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)...
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
# Take from the main frame buffer's RAM to allocate a second frame buffer.
|
||||
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
|
||||
@ -36,19 +38,19 @@ clock = time.clock() # Create a clock object to track the FPS.
|
||||
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565)
|
||||
extra_fb.replace(sensor.snapshot())
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works.
|
||||
# Put in a z_rotation value below and you should see the r output be equal to that.
|
||||
if(0):
|
||||
if 0:
|
||||
expected_rotation = 20.0
|
||||
img.rotation_corr(z_rotation=expected_rotation)
|
||||
|
||||
# This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works.
|
||||
# Put in a zoom value below and you should see the z output be equal to that.
|
||||
if(0):
|
||||
if 0:
|
||||
expected_zoom = 0.8
|
||||
img.rotation_corr(zoom=expected_zoom)
|
||||
|
||||
@ -59,9 +61,13 @@ while(True):
|
||||
rotation_change = int(math.degrees(displacement.rotation()) * 5) / 5.0
|
||||
zoom_amount = displacement.scale()
|
||||
|
||||
if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise.
|
||||
print("{0:+f}r {1:+f}z {2} {3} FPS".format(rotation_change, zoom_amount, \
|
||||
displacement.response(),
|
||||
clock.fps()))
|
||||
if (
|
||||
displacement.response() > 0.1
|
||||
): # Below 0.1 or so (YMMV) and the results are just noise.
|
||||
print(
|
||||
"{0:+f}r {1:+f}z {2} {3} FPS".format(
|
||||
rotation_change, zoom_amount, displacement.response(), clock.fps()
|
||||
)
|
||||
)
|
||||
else:
|
||||
print(clock.fps())
|
||||
|
||||
@ -4,30 +4,31 @@
|
||||
# in the X and Y direction by comparing the current and a previous
|
||||
# image against each other. Note that only X and Y translation is
|
||||
# handled - not rotation/scale in this mode.
|
||||
|
||||
#
|
||||
# To run this demo effectively please mount your OpenMV Cam on a steady
|
||||
# base and SLOWLY translate it to the left, right, up, and down and
|
||||
# watch the numbers change. Note that you can see displacement numbers
|
||||
# up +- half of the hoizontal and vertical resolution.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
# NOTE!!! You have to use a small power of 2 resolution when using
|
||||
#
|
||||
# NOTE You have to use a small power of 2 resolution when using
|
||||
# find_displacement(). This is because the algorithm is powered by
|
||||
# something called phase correlation which does the image comparison
|
||||
# using FFTs. A non-power of 2 resolution requires padding to a power
|
||||
# of 2 which reduces the usefulness of the algorithm results. Please
|
||||
# use a resolution like B64X64 or B64X32 (2x faster).
|
||||
|
||||
#
|
||||
# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64,
|
||||
# 128x64, and 128x128. If you want a resolution of 32x32 you can create
|
||||
# it by doing "img.pool(2, 2)" on a 64x64 image.
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)...
|
||||
sensor.skip_frames(time = 2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)...
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
# Take from the main frame buffer's RAM to allocate a second frame buffer.
|
||||
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
|
||||
@ -36,9 +37,9 @@ clock = time.clock() # Create a clock object to track the FPS.
|
||||
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565)
|
||||
extra_fb.replace(sensor.snapshot())
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# For this example we never update the old image to measure absolute change.
|
||||
displacement = extra_fb.find_displacement(img)
|
||||
@ -47,9 +48,13 @@ while(True):
|
||||
sub_pixel_x = int(displacement.x_translation() * 5) / 5.0
|
||||
sub_pixel_y = int(displacement.y_translation() * 5) / 5.0
|
||||
|
||||
if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise.
|
||||
print("{0:+f}x {1:+f}y {2} {3} FPS".format(sub_pixel_x, sub_pixel_y,
|
||||
displacement.response(),
|
||||
clock.fps()))
|
||||
if (
|
||||
displacement.response() > 0.1
|
||||
): # Below 0.1 or so (YMMV) and the results are just noise.
|
||||
print(
|
||||
"{0:+f}x {1:+f}y {2} {3} FPS".format(
|
||||
sub_pixel_x, sub_pixel_y, displacement.response(), clock.fps()
|
||||
)
|
||||
)
|
||||
else:
|
||||
print(clock.fps())
|
||||
|
||||
@ -4,30 +4,32 @@
|
||||
# rotation/scale by comparing the current and the previous
|
||||
# image against each other. Note that only rotation/scale is
|
||||
# handled - not X and Y translation in this mode.
|
||||
|
||||
#
|
||||
# To run this demo effectively please mount your OpenMV Cam on a steady
|
||||
# base and SLOWLY rotate the camera around the lens and move the camera
|
||||
# forward/backwards to see the numbers change.
|
||||
# I.e. Z direction changes only.
|
||||
|
||||
import sensor, image, time, math
|
||||
|
||||
# NOTE!!! You have to use a small power of 2 resolution when using
|
||||
#
|
||||
# NOTE You have to use a small power of 2 resolution when using
|
||||
# find_displacement(). This is because the algorithm is powered by
|
||||
# something called phase correlation which does the image comparison
|
||||
# using FFTs. A non-power of 2 resolution requires padding to a power
|
||||
# of 2 which reduces the usefulness of the algorithm results. Please
|
||||
# use a resolution like B64X64 or B64X32 (2x faster).
|
||||
|
||||
#
|
||||
# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64,
|
||||
# 128x64, and 128x128. If you want a resolution of 32x32 you can create
|
||||
# it by doing "img.pool(2, 2)" on a 64x64 image.
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)...
|
||||
sensor.skip_frames(time = 2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
import sensor
|
||||
import time
|
||||
import math
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)...
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
# Take from the main frame buffer's RAM to allocate a second frame buffer.
|
||||
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
|
||||
@ -36,21 +38,21 @@ clock = time.clock() # Create a clock object to track the FPS.
|
||||
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565)
|
||||
extra_fb.replace(sensor.snapshot())
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works.
|
||||
# Put in a z_rotation value below and you should see the r output be equal to that.
|
||||
if(0):
|
||||
if 0:
|
||||
expected_rotation = 20.0
|
||||
extra_fb.rotation_corr(z_rotation=(-expected_rotation))
|
||||
|
||||
# This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works.
|
||||
# Put in a zoom value below and you should see the z output be equal to that.
|
||||
if(0):
|
||||
if 0:
|
||||
expected_zoom = 0.8
|
||||
extra_fb.rotation_corr(zoom=(2.00-expected_zoom))
|
||||
extra_fb.rotation_corr(zoom=(2.00 - expected_zoom))
|
||||
|
||||
displacement = extra_fb.find_displacement(img, logpolar=True)
|
||||
extra_fb.replace(img)
|
||||
@ -59,9 +61,13 @@ while(True):
|
||||
rotation_change = int(math.degrees(displacement.rotation()) * 5) / 5.0
|
||||
zoom_amount = displacement.scale()
|
||||
|
||||
if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise.
|
||||
print("{0:+f}r {1:+f}z {2} {3} FPS".format(rotation_change, zoom_amount, \
|
||||
displacement.response(),
|
||||
clock.fps()))
|
||||
if (
|
||||
displacement.response() > 0.1
|
||||
): # Below 0.1 or so (YMMV) and the results are just noise.
|
||||
print(
|
||||
"{0:+f}r {1:+f}z {2} {3} FPS".format(
|
||||
rotation_change, zoom_amount, displacement.response(), clock.fps()
|
||||
)
|
||||
)
|
||||
else:
|
||||
print(clock.fps())
|
||||
|
||||
@ -4,30 +4,31 @@
|
||||
# in the X and Y direction by comparing the current and the previous
|
||||
# image against each other. Note that only X and Y translation is
|
||||
# handled - not rotation/scale in this mode.
|
||||
|
||||
#
|
||||
# To run this demo effectively please mount your OpenMV Cam on a steady
|
||||
# base and QUICKLY translate it to the left, right, up, and down and
|
||||
# watch the numbers change. Note that you can see displacement numbers
|
||||
# up +- half of the hoizontal and vertical resolution.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
# NOTE!!! You have to use a small power of 2 resolution when using
|
||||
#
|
||||
# NOTE You have to use a small power of 2 resolution when using
|
||||
# find_displacement(). This is because the algorithm is powered by
|
||||
# something called phase correlation which does the image comparison
|
||||
# using FFTs. A non-power of 2 resolution requires padding to a power
|
||||
# of 2 which reduces the usefulness of the algorithm results. Please
|
||||
# use a resolution like B64X64 or B64X32 (2x faster).
|
||||
|
||||
#
|
||||
# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64,
|
||||
# 128x64, and 128x128. If you want a resolution of 32x32 you can create
|
||||
# it by doing "img.pool(2, 2)" on a 64x64 image.
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)...
|
||||
sensor.skip_frames(time = 2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)...
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
# Take from the main frame buffer's RAM to allocate a second frame buffer.
|
||||
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
|
||||
@ -36,9 +37,9 @@ clock = time.clock() # Create a clock object to track the FPS.
|
||||
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565)
|
||||
extra_fb.replace(sensor.snapshot())
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
displacement = extra_fb.find_displacement(img)
|
||||
extra_fb.replace(img)
|
||||
@ -47,9 +48,13 @@ while(True):
|
||||
sub_pixel_x = int(displacement.x_translation() * 5) / 5.0
|
||||
sub_pixel_y = int(displacement.y_translation() * 5) / 5.0
|
||||
|
||||
if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise.
|
||||
print("{0:+f}x {1:+f}y {2} {3} FPS".format(sub_pixel_x, sub_pixel_y,
|
||||
displacement.response(),
|
||||
clock.fps()))
|
||||
if (
|
||||
displacement.response() > 0.1
|
||||
): # Below 0.1 or so (YMMV) and the results are just noise.
|
||||
print(
|
||||
"{0:+f}x {1:+f}y {2} {3} FPS".format(
|
||||
sub_pixel_x, sub_pixel_y, displacement.response(), clock.fps()
|
||||
)
|
||||
)
|
||||
else:
|
||||
print(clock.fps())
|
||||
|
||||
@ -11,35 +11,35 @@
|
||||
#
|
||||
# NOTE that surfaces need to have some type of "edge" on them for the
|
||||
# algorithm to work. A featureless surface produces crazy results.
|
||||
|
||||
# NOTE: Unless you have a very nice test rig this example is hard to see usefulness of...
|
||||
|
||||
BLOCK_W = 16 # pow2
|
||||
BLOCK_H = 16 # pow2
|
||||
|
||||
#
|
||||
# To run this demo effectively please mount your OpenMV Cam on a steady
|
||||
# base and SLOWLY rotate the camera around the lens and move the camera
|
||||
# forward/backwards to see the numbers change.
|
||||
# I.e. Z direction changes only.
|
||||
|
||||
import sensor, image, time, math
|
||||
|
||||
# NOTE!!! You have to use a small power of 2 resolution when using
|
||||
#
|
||||
# NOTE You have to use a small power of 2 resolution when using
|
||||
# find_displacement(). This is because the algorithm is powered by
|
||||
# something called phase correlation which does the image comparison
|
||||
# using FFTs. A non-power of 2 resolution requires padding to a power
|
||||
# of 2 which reduces the usefulness of the algorithm results. Please
|
||||
# use a resolution like B128X128 or B128X64 (2x faster).
|
||||
|
||||
#
|
||||
# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64,
|
||||
# 128x64, and 128x128. If you want a resolution of 32x32 you can create
|
||||
# it by doing "img.pool(2, 2)" on a 64x64 image.
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565)
|
||||
import sensor
|
||||
import time
|
||||
import math
|
||||
|
||||
BLOCK_W = 16 # pow2
|
||||
BLOCK_H = 16 # pow2
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565)
|
||||
sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)...
|
||||
sensor.skip_frames(time = 2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
# Take from the main frame buffer's RAM to allocate a second frame buffer.
|
||||
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
|
||||
@ -48,26 +48,46 @@ clock = time.clock() # Create a clock object to track the FPS.
|
||||
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE)
|
||||
extra_fb.replace(sensor.snapshot())
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
for y in range(0, sensor.height(), BLOCK_H):
|
||||
for x in range(0, sensor.width(), BLOCK_W):
|
||||
# For this example we never update the old image to measure absolute change.
|
||||
displacement = extra_fb.find_displacement(img, logpolar=True, \
|
||||
roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H))
|
||||
displacement = extra_fb.find_displacement(
|
||||
img,
|
||||
logpolar=True,
|
||||
roi=(x, y, BLOCK_W, BLOCK_H),
|
||||
template_roi=(x, y, BLOCK_W, BLOCK_H),
|
||||
)
|
||||
|
||||
# Below 0.1 or so (YMMV) and the results are just noise.
|
||||
if(displacement.response() > 0.1):
|
||||
if displacement.response() > 0.1:
|
||||
rotation_change = displacement.rotation()
|
||||
zoom_amount = displacement.scale()
|
||||
pixel_x = x + (BLOCK_W//2) + int(math.sin(rotation_change) * zoom_amount * (BLOCK_W//4))
|
||||
pixel_y = y + (BLOCK_H//2) + int(math.cos(rotation_change) * zoom_amount * (BLOCK_H//4))
|
||||
img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \
|
||||
color = 255)
|
||||
pixel_x = (
|
||||
x
|
||||
+ (BLOCK_W // 2)
|
||||
+ int(math.sin(rotation_change) * zoom_amount * (BLOCK_W // 4))
|
||||
)
|
||||
pixel_y = (
|
||||
y
|
||||
+ (BLOCK_H // 2)
|
||||
+ int(math.cos(rotation_change) * zoom_amount * (BLOCK_H // 4))
|
||||
)
|
||||
img.draw_line(
|
||||
(x + BLOCK_W // 2, y + BLOCK_H // 2, pixel_x, pixel_y), color=255
|
||||
)
|
||||
else:
|
||||
img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \
|
||||
color = 0)
|
||||
img.draw_line(
|
||||
(
|
||||
x + BLOCK_W // 2,
|
||||
y + BLOCK_H // 2,
|
||||
x + BLOCK_W // 2,
|
||||
y + BLOCK_H // 2,
|
||||
),
|
||||
color=0,
|
||||
)
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -11,33 +11,34 @@
|
||||
#
|
||||
# NOTE that surfaces need to have some type of "edge" on them for the
|
||||
# algorithm to work. A featureless surface produces crazy results.
|
||||
|
||||
BLOCK_W = 16 # pow2
|
||||
BLOCK_H = 16 # pow2
|
||||
|
||||
#
|
||||
# To run this demo effectively please mount your OpenMV Cam on a steady
|
||||
# base and SLOWLY translate it to the left, right, up, and down and
|
||||
# watch the numbers change. Note that you can see displacement numbers
|
||||
# up +- half of the hoizontal and vertical resolution.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
# NOTE!!! You have to use a small power of 2 resolution when using
|
||||
#
|
||||
# NOTE You have to use a small power of 2 resolution when using
|
||||
# find_displacement(). This is because the algorithm is powered by
|
||||
# something called phase correlation which does the image comparison
|
||||
# using FFTs. A non-power of 2 resolution requires padding to a power
|
||||
# of 2 which reduces the usefulness of the algorithm results. Please
|
||||
# use a resolution like B128X128 or B128X64 (2x faster).
|
||||
|
||||
#
|
||||
# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64,
|
||||
# 128x64, and 128x128. If you want a resolution of 32x32 you can create
|
||||
# it by doing "img.pool(2, 2)" on a 64x64 image.
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565)
|
||||
import sensor
|
||||
import time
|
||||
|
||||
BLOCK_W = 16 # pow2
|
||||
BLOCK_H = 16 # pow2
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565)
|
||||
sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)...
|
||||
sensor.skip_frames(time = 2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
# Take from the main frame buffer's RAM to allocate a second frame buffer.
|
||||
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
|
||||
@ -46,24 +47,33 @@ clock = time.clock() # Create a clock object to track the FPS.
|
||||
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE)
|
||||
extra_fb.replace(sensor.snapshot())
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
for y in range(0, sensor.height(), BLOCK_H):
|
||||
for x in range(0, sensor.width(), BLOCK_W):
|
||||
# For this example we never update the old image to measure absolute change.
|
||||
displacement = extra_fb.find_displacement(img, \
|
||||
roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H))
|
||||
displacement = extra_fb.find_displacement(
|
||||
img, roi=(x, y, BLOCK_W, BLOCK_H), template_roi=(x, y, BLOCK_W, BLOCK_H)
|
||||
)
|
||||
|
||||
# Below 0.1 or so (YMMV) and the results are just noise.
|
||||
if(displacement.response() > 0.1):
|
||||
pixel_x = x + (BLOCK_W//2) + int(displacement.x_translation())
|
||||
pixel_y = y + (BLOCK_H//2) + int(displacement.y_translation())
|
||||
img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \
|
||||
color = 255)
|
||||
if displacement.response() > 0.1:
|
||||
pixel_x = x + (BLOCK_W // 2) + int(displacement.x_translation())
|
||||
pixel_y = y + (BLOCK_H // 2) + int(displacement.y_translation())
|
||||
img.draw_line(
|
||||
(x + BLOCK_W // 2, y + BLOCK_H // 2, pixel_x, pixel_y), color=255
|
||||
)
|
||||
else:
|
||||
img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \
|
||||
color = 0)
|
||||
img.draw_line(
|
||||
(
|
||||
x + BLOCK_W // 2,
|
||||
y + BLOCK_H // 2,
|
||||
x + BLOCK_W // 2,
|
||||
y + BLOCK_H // 2,
|
||||
),
|
||||
color=0,
|
||||
)
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -11,35 +11,34 @@
|
||||
#
|
||||
# NOTE that surfaces need to have some type of "edge" on them for the
|
||||
# algorithm to work. A featureless surface produces crazy results.
|
||||
|
||||
# NOTE: Unless you have a very nice test rig this example is hard to see usefulness of...
|
||||
|
||||
BLOCK_W = 16 # pow2
|
||||
BLOCK_H = 16 # pow2
|
||||
|
||||
#
|
||||
# To run this demo effectively please mount your OpenMV Cam on a steady
|
||||
# base and SLOWLY rotate the camera around the lens and move the camera
|
||||
# forward/backwards to see the numbers change.
|
||||
# I.e. Z direction changes only.
|
||||
|
||||
import sensor, image, time, math
|
||||
|
||||
# NOTE!!! You have to use a small power of 2 resolution when using
|
||||
#
|
||||
# NOTE You have to use a small power of 2 resolution when using
|
||||
# find_displacement(). This is because the algorithm is powered by
|
||||
# something called phase correlation which does the image comparison
|
||||
# using FFTs. A non-power of 2 resolution requires padding to a power
|
||||
# of 2 which reduces the usefulness of the algorithm results. Please
|
||||
# use a resolution like B128X128 or B128X64 (2x faster).
|
||||
|
||||
#
|
||||
# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64,
|
||||
# 128x64, and 128x128. If you want a resolution of 32x32 you can create
|
||||
# it by doing "img.pool(2, 2)" on a 64x64 image.
|
||||
import sensor
|
||||
import time
|
||||
import math
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565)
|
||||
BLOCK_W = 16 # pow2
|
||||
BLOCK_H = 16 # pow2
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565)
|
||||
sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)...
|
||||
sensor.skip_frames(time = 2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
# Take from the main frame buffer's RAM to allocate a second frame buffer.
|
||||
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
|
||||
@ -48,26 +47,46 @@ clock = time.clock() # Create a clock object to track the FPS.
|
||||
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE)
|
||||
extra_fb.replace(sensor.snapshot())
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
for y in range(0, sensor.height(), BLOCK_H):
|
||||
for x in range(0, sensor.width(), BLOCK_W):
|
||||
displacement = extra_fb.find_displacement(img, logpolar=True, \
|
||||
roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H))
|
||||
displacement = extra_fb.find_displacement(
|
||||
img,
|
||||
logpolar=True,
|
||||
roi=(x, y, BLOCK_W, BLOCK_H),
|
||||
template_roi=(x, y, BLOCK_W, BLOCK_H),
|
||||
)
|
||||
|
||||
# Below 0.1 or so (YMMV) and the results are just noise.
|
||||
if(displacement.response() > 0.1):
|
||||
if displacement.response() > 0.1:
|
||||
rotation_change = displacement.rotation()
|
||||
zoom_amount = 1.0 + displacement.scale()
|
||||
pixel_x = x + (BLOCK_W//2) + int(math.sin(rotation_change) * zoom_amount * (BLOCK_W//4))
|
||||
pixel_y = y + (BLOCK_H//2) + int(math.cos(rotation_change) * zoom_amount * (BLOCK_H//4))
|
||||
img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \
|
||||
color = 255)
|
||||
pixel_x = (
|
||||
x
|
||||
+ (BLOCK_W // 2)
|
||||
+ int(math.sin(rotation_change) * zoom_amount * (BLOCK_W // 4))
|
||||
)
|
||||
pixel_y = (
|
||||
y
|
||||
+ (BLOCK_H // 2)
|
||||
+ int(math.cos(rotation_change) * zoom_amount * (BLOCK_H // 4))
|
||||
)
|
||||
img.draw_line(
|
||||
(x + BLOCK_W // 2, y + BLOCK_H // 2, pixel_x, pixel_y), color=255
|
||||
)
|
||||
else:
|
||||
img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \
|
||||
color = 0)
|
||||
img.draw_line(
|
||||
(
|
||||
x + BLOCK_W // 2,
|
||||
y + BLOCK_H // 2,
|
||||
x + BLOCK_W // 2,
|
||||
y + BLOCK_H // 2,
|
||||
),
|
||||
color=0,
|
||||
)
|
||||
extra_fb.replace(img)
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -11,33 +11,33 @@
|
||||
#
|
||||
# NOTE that surfaces need to have some type of "edge" on them for the
|
||||
# algorithm to work. A featureless surface produces crazy results.
|
||||
|
||||
BLOCK_W = 16 # pow2
|
||||
BLOCK_H = 16 # pow2
|
||||
|
||||
#
|
||||
# To run this demo effectively please mount your OpenMV Cam on a steady
|
||||
# base and SLOWLY translate it to the left, right, up, and down and
|
||||
# watch the numbers change. Note that you can see displacement numbers
|
||||
# up +- half of the hoizontal and vertical resolution.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
# NOTE!!! You have to use a small power of 2 resolution when using
|
||||
#
|
||||
# NOTE You have to use a small power of 2 resolution when using
|
||||
# find_displacement(). This is because the algorithm is powered by
|
||||
# something called phase correlation which does the image comparison
|
||||
# using FFTs. A non-power of 2 resolution requires padding to a power
|
||||
# of 2 which reduces the usefulness of the algorithm results. Please
|
||||
# use a resolution like B128X128 or B128X64 (2x faster).
|
||||
|
||||
#
|
||||
# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64,
|
||||
# 128x64, and 128x128. If you want a resolution of 32x32 you can create
|
||||
# it by doing "img.pool(2, 2)" on a 64x64 image.
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565)
|
||||
BLOCK_W = 16 # pow2
|
||||
BLOCK_H = 16 # pow2
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565)
|
||||
sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)...
|
||||
sensor.skip_frames(time = 2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
# Take from the main frame buffer's RAM to allocate a second frame buffer.
|
||||
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
|
||||
@ -46,24 +46,33 @@ clock = time.clock() # Create a clock object to track the FPS.
|
||||
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE)
|
||||
extra_fb.replace(sensor.snapshot())
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
for y in range(0, sensor.height(), BLOCK_H):
|
||||
for x in range(0, sensor.width(), BLOCK_W):
|
||||
displacement = extra_fb.find_displacement(img, \
|
||||
roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H))
|
||||
displacement = extra_fb.find_displacement(
|
||||
img, roi=(x, y, BLOCK_W, BLOCK_H), template_roi=(x, y, BLOCK_W, BLOCK_H)
|
||||
)
|
||||
|
||||
# Below 0.1 or so (YMMV) and the results are just noise.
|
||||
if(displacement.response() > 0.1):
|
||||
pixel_x = x + (BLOCK_W//2) + int(displacement.x_translation())
|
||||
pixel_y = y + (BLOCK_H//2) + int(displacement.y_translation())
|
||||
img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \
|
||||
color = 255)
|
||||
if displacement.response() > 0.1:
|
||||
pixel_x = x + (BLOCK_W // 2) + int(displacement.x_translation())
|
||||
pixel_y = y + (BLOCK_H // 2) + int(displacement.y_translation())
|
||||
img.draw_line(
|
||||
(x + BLOCK_W // 2, y + BLOCK_H // 2, pixel_x, pixel_y), color=255
|
||||
)
|
||||
else:
|
||||
img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \
|
||||
color = 0)
|
||||
img.draw_line(
|
||||
(
|
||||
x + BLOCK_W // 2,
|
||||
y + BLOCK_H // 2,
|
||||
x + BLOCK_W // 2,
|
||||
y + BLOCK_H // 2,
|
||||
),
|
||||
color=0,
|
||||
)
|
||||
extra_fb.replace(img)
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -3,11 +3,13 @@
|
||||
# The frogeye2020 is a 320x240 event camera. There are two bits per pixel which show no motion,
|
||||
# motion in one direction, or motion in another direction. The sensor runs at 50 FPS.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import image
|
||||
import time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
|
||||
palette = image.Image(1, 256, sensor.RGB565)
|
||||
|
||||
@ -25,7 +27,7 @@ for i in range(192, 256):
|
||||
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
@ -3,11 +3,13 @@
|
||||
# The frogeye2020 is a 320x240 event camera. There are two bits per pixel which show no motion,
|
||||
# motion in one direction, or motion in another direction. The sensor runs at 50 FPS.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import image
|
||||
import time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
|
||||
palette = image.Image(1, 256, sensor.RGB565)
|
||||
|
||||
@ -25,7 +27,7 @@ for i in range(192, 256):
|
||||
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
@ -36,8 +38,9 @@ while(True):
|
||||
# Cleanup noise.
|
||||
img.erode(1)
|
||||
|
||||
blobs = img.find_blobs([(0, 0)], invert=True,
|
||||
pixels_threshold=10, area_threshold=10, merge=False)
|
||||
blobs = img.find_blobs(
|
||||
[(0, 0)], invert=True, pixels_threshold=10, area_threshold=10, merge=False
|
||||
)
|
||||
|
||||
for blob in blobs:
|
||||
img.draw_rectangle(blob.rect(), color=(0, 255, 0))
|
||||
|
||||
@ -12,18 +12,19 @@
|
||||
# time so you will not get the maximum readout speed unless you reduce the exposure time too.
|
||||
# This results in a dark image however so YOU NEED A LOT of lighting for high FPS.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120) - make smaller to go faster
|
||||
sensor.skip_frames(time = 2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120)
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
sensor.set_auto_exposure(True, exposure_us=5000) # make smaller to go faster
|
||||
sensor.set_auto_exposure(True, exposure_us=5000) # make smaller to go faster
|
||||
|
||||
while(True):
|
||||
clock.tick() # Update the FPS clock.
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
|
||||
# to the IDE. The FPS should increase once disconnected.
|
||||
while True:
|
||||
clock.tick() # Update the FPS clock.
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
|
||||
# to the IDE. The FPS should increase once disconnected.
|
||||
|
||||
@ -12,18 +12,19 @@
|
||||
# continously by the camera and because you have to wait for the integration to finish before
|
||||
# readout of the frame.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE
|
||||
sensor.set_framesize(sensor.VGA) # Set frame size to VGA (640x480)
|
||||
sensor.skip_frames(time = 2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE
|
||||
sensor.set_framesize(sensor.VGA) # Set frame size to VGA (640x480)
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
sensor.ioctl(sensor.IOCTL_SET_TRIGGERED_MODE, True)
|
||||
|
||||
while(True):
|
||||
clock.tick() # Update the FPS clock.
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
|
||||
# to the IDE. The FPS should increase once disconnected.
|
||||
while True:
|
||||
clock.tick() # Update the FPS clock.
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
|
||||
# to the IDE. The FPS should increase once disconnected.
|
||||
|
||||
@ -17,10 +17,11 @@
|
||||
# leptons don't have radiometry support or they don't activate their calibration process often
|
||||
# enough to deal with temperature changes (FLIR 2.5).
|
||||
|
||||
import sensor, image, time, math
|
||||
import sensor
|
||||
import time
|
||||
|
||||
# Color Tracking Thresholds (Grayscale Min, Grayscale Max)
|
||||
threshold_list = [(100, 255)] # track very hot objects
|
||||
threshold_list = [(100, 255)] # track very hot objects
|
||||
|
||||
# Set the target temp range here
|
||||
# 500C is the maximum the Lepton 3.5 sensor can measure
|
||||
@ -33,10 +34,20 @@ print("Resetting Lepton...")
|
||||
sensor.reset()
|
||||
# Enable measurement mode with high temp
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_MODE, True, True)
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius)
|
||||
print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT)))
|
||||
print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No"))
|
||||
sensor.ioctl(
|
||||
sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius
|
||||
)
|
||||
print(
|
||||
"Lepton Res (%dx%d)"
|
||||
% (
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT),
|
||||
)
|
||||
)
|
||||
print(
|
||||
"Radiometry Available: "
|
||||
+ ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")
|
||||
)
|
||||
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QQVGA)
|
||||
@ -47,15 +58,29 @@ clock = time.clock()
|
||||
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
|
||||
# camera resolution. "merge=True" merges all overlapping blobs in the image.
|
||||
|
||||
def map_g_to_temp(g):
|
||||
return ((g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0) + min_temp_in_celsius
|
||||
|
||||
while(True):
|
||||
def map_g_to_temp(g):
|
||||
return (
|
||||
(g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0
|
||||
) + min_temp_in_celsius
|
||||
|
||||
|
||||
while True:
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
for blob in img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True):
|
||||
for blob in img.find_blobs(
|
||||
threshold_list, pixels_threshold=200, area_threshold=200, merge=True
|
||||
):
|
||||
stats = img.get_statistics(thresholds=threshold_list, roi=blob.rect())
|
||||
img.draw_rectangle(blob.rect())
|
||||
img.draw_cross(blob.cx(), blob.cy())
|
||||
img.draw_string(blob.x(), blob.y() - 10, "%.2f C" % map_g_to_temp(stats.mean()), mono_space=False)
|
||||
print("FPS %f - Lepton Temp: %f C" % (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE)))
|
||||
img.draw_string(
|
||||
blob.x(),
|
||||
blob.y() - 10,
|
||||
"%.2f C" % map_g_to_temp(stats.mean()),
|
||||
mono_space=False,
|
||||
)
|
||||
print(
|
||||
"FPS %f - Lepton Temp: %f C"
|
||||
% (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE))
|
||||
)
|
||||
|
||||
@ -17,7 +17,8 @@
|
||||
# leptons don't have radiometry support or they don't activate their calibration process often
|
||||
# enough to deal with temperature changes (FLIR 2.5).
|
||||
|
||||
import sensor, image, time, math
|
||||
import sensor
|
||||
import time
|
||||
|
||||
# Color Tracking Thresholds (Grayscale Min, Grayscale Max)
|
||||
threshold_list = [(200, 255)]
|
||||
@ -30,10 +31,20 @@ print("Resetting Lepton...")
|
||||
# These settings are applied on reset
|
||||
sensor.reset()
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_MODE, True)
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius)
|
||||
print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT)))
|
||||
print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No"))
|
||||
sensor.ioctl(
|
||||
sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius
|
||||
)
|
||||
print(
|
||||
"Lepton Res (%dx%d)"
|
||||
% (
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT),
|
||||
)
|
||||
)
|
||||
print(
|
||||
"Radiometry Available: "
|
||||
+ ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")
|
||||
)
|
||||
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QQVGA)
|
||||
@ -44,15 +55,29 @@ clock = time.clock()
|
||||
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
|
||||
# camera resolution. "merge=True" merges all overlapping blobs in the image.
|
||||
|
||||
def map_g_to_temp(g):
|
||||
return ((g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0) + min_temp_in_celsius
|
||||
|
||||
while(True):
|
||||
def map_g_to_temp(g):
|
||||
return (
|
||||
(g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0
|
||||
) + min_temp_in_celsius
|
||||
|
||||
|
||||
while True:
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
for blob in img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True):
|
||||
for blob in img.find_blobs(
|
||||
threshold_list, pixels_threshold=200, area_threshold=200, merge=True
|
||||
):
|
||||
stats = img.get_statistics(thresholds=threshold_list, roi=blob.rect())
|
||||
img.draw_rectangle(blob.rect())
|
||||
img.draw_cross(blob.cx(), blob.cy())
|
||||
img.draw_string(blob.x(), blob.y() - 10, "%.2f C" % map_g_to_temp(stats.mean()), mono_space=False)
|
||||
print("FPS %f - Lepton Temp: %f C" % (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE)))
|
||||
img.draw_string(
|
||||
blob.x(),
|
||||
blob.y() - 10,
|
||||
"%.2f C" % map_g_to_temp(stats.mean()),
|
||||
mono_space=False,
|
||||
)
|
||||
print(
|
||||
"FPS %f - Lepton Temp: %f C"
|
||||
% (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE))
|
||||
)
|
||||
|
||||
@ -17,7 +17,8 @@
|
||||
# leptons don't have radiometry support or they don't activate their calibration process often
|
||||
# enough to deal with temperature changes (FLIR 2.5).
|
||||
|
||||
import sensor, image, time, math
|
||||
import sensor
|
||||
import time
|
||||
|
||||
# Color Tracking Thresholds (Grayscale Min, Grayscale Max)
|
||||
threshold_list = [(200, 255)]
|
||||
@ -30,10 +31,20 @@ print("Resetting Lepton...")
|
||||
# These settings are applied on reset
|
||||
sensor.reset()
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_MODE, True)
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius)
|
||||
print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT)))
|
||||
print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No"))
|
||||
sensor.ioctl(
|
||||
sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius
|
||||
)
|
||||
print(
|
||||
"Lepton Res (%dx%d)"
|
||||
% (
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT),
|
||||
)
|
||||
)
|
||||
print(
|
||||
"Radiometry Available: "
|
||||
+ ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")
|
||||
)
|
||||
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QQVGA)
|
||||
@ -44,23 +55,43 @@ clock = time.clock()
|
||||
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
|
||||
# camera resolution. "merge=True" merges all overlapping blobs in the image.
|
||||
|
||||
def map_g_to_temp(g):
|
||||
return ((g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0) + min_temp_in_celsius
|
||||
|
||||
while(True):
|
||||
def map_g_to_temp(g):
|
||||
return (
|
||||
(g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0
|
||||
) + min_temp_in_celsius
|
||||
|
||||
|
||||
while True:
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
blob_stats = []
|
||||
blobs = img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True)
|
||||
blobs = img.find_blobs(
|
||||
threshold_list, pixels_threshold=200, area_threshold=200, merge=True
|
||||
)
|
||||
# Collect stats into a list of tuples
|
||||
for blob in blobs:
|
||||
blob_stats.append((blob.x(), blob.y(), map_g_to_temp(img.get_statistics(thresholds=threshold_list,
|
||||
roi=blob.rect()).mean())))
|
||||
img.to_rainbow(color_palette=sensor.PALETTE_IRONBOW) # color it
|
||||
blob_stats.append(
|
||||
(
|
||||
blob.x(),
|
||||
blob.y(),
|
||||
map_g_to_temp(
|
||||
img.get_statistics(
|
||||
thresholds=threshold_list, roi=blob.rect()
|
||||
).mean()
|
||||
),
|
||||
)
|
||||
)
|
||||
img.to_rainbow(color_palette=sensor.PALETTE_IRONBOW) # color it
|
||||
# Draw stuff on the colored image
|
||||
for blob in blobs:
|
||||
img.draw_rectangle(blob.rect())
|
||||
img.draw_cross(blob.cx(), blob.cy())
|
||||
for blob_stat in blob_stats:
|
||||
img.draw_string(blob_stat[0], blob_stat[1] - 10, "%.2f C" % blob_stat[2], mono_space=False)
|
||||
print("FPS %f - Lepton Temp: %f C" % (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE)))
|
||||
img.draw_string(
|
||||
blob_stat[0], blob_stat[1] - 10, "%.2f C" % blob_stat[2], mono_space=False
|
||||
)
|
||||
print(
|
||||
"FPS %f - Lepton Temp: %f C"
|
||||
% (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE))
|
||||
)
|
||||
|
||||
@ -17,7 +17,9 @@
|
||||
# leptons don't have radiometry support or they don't activate their calibration process often
|
||||
# enough to deal with temperature changes (FLIR 2.5).
|
||||
|
||||
import sensor, image, time, math, lcd
|
||||
import sensor
|
||||
import time
|
||||
import lcd
|
||||
|
||||
# Color Tracking Thresholds (Grayscale Min, Grayscale Max)
|
||||
threshold_list = [(200, 255)]
|
||||
@ -30,10 +32,20 @@ print("Resetting Lepton...")
|
||||
# These settings are applied on reset
|
||||
sensor.reset()
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_MODE, True)
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius)
|
||||
print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT)))
|
||||
print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No"))
|
||||
sensor.ioctl(
|
||||
sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius
|
||||
)
|
||||
print(
|
||||
"Lepton Res (%dx%d)"
|
||||
% (
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT),
|
||||
)
|
||||
)
|
||||
print(
|
||||
"Radiometry Available: "
|
||||
+ ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")
|
||||
)
|
||||
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
sensor.set_framesize(sensor.LCD)
|
||||
@ -45,24 +57,44 @@ lcd.init()
|
||||
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
|
||||
# camera resolution. "merge=True" merges all overlapping blobs in the image.
|
||||
|
||||
def map_g_to_temp(g):
|
||||
return ((g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0) + min_temp_in_celsius
|
||||
|
||||
while(True):
|
||||
def map_g_to_temp(g):
|
||||
return (
|
||||
(g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0
|
||||
) + min_temp_in_celsius
|
||||
|
||||
|
||||
while True:
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
blob_stats = []
|
||||
blobs = img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True)
|
||||
blobs = img.find_blobs(
|
||||
threshold_list, pixels_threshold=200, area_threshold=200, merge=True
|
||||
)
|
||||
# Collect stats into a list of tuples
|
||||
for blob in blobs:
|
||||
blob_stats.append((blob.x(), blob.y(), map_g_to_temp(img.get_statistics(thresholds=threshold_list,
|
||||
roi=blob.rect()).mean())))
|
||||
img.to_rainbow(color_palette=sensor.PALETTE_IRONBOW) # color it
|
||||
blob_stats.append(
|
||||
(
|
||||
blob.x(),
|
||||
blob.y(),
|
||||
map_g_to_temp(
|
||||
img.get_statistics(
|
||||
thresholds=threshold_list, roi=blob.rect()
|
||||
).mean()
|
||||
),
|
||||
)
|
||||
)
|
||||
img.to_rainbow(color_palette=sensor.PALETTE_IRONBOW) # color it
|
||||
# Draw stuff on the colored image
|
||||
for blob in blobs:
|
||||
img.draw_rectangle(blob.rect())
|
||||
img.draw_cross(blob.cx(), blob.cy())
|
||||
for blob_stat in blob_stats:
|
||||
img.draw_string(blob_stat[0], blob_stat[1] - 10, "%.2f C" % blob_stat[2], mono_space=False)
|
||||
img.draw_string(
|
||||
blob_stat[0], blob_stat[1] - 10, "%.2f C" % blob_stat[2], mono_space=False
|
||||
)
|
||||
lcd.display(img)
|
||||
print("FPS %f - Lepton Temp: %f C" % (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE)))
|
||||
print(
|
||||
"FPS %f - Lepton Temp: %f C"
|
||||
% (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE))
|
||||
)
|
||||
|
||||
@ -17,7 +17,9 @@
|
||||
# leptons don't have radiometry support or they don't activate their calibration process often
|
||||
# enough to deal with temperature changes (FLIR 2.5).
|
||||
|
||||
import sensor, image, time, math, lcd
|
||||
import sensor
|
||||
import time
|
||||
import lcd
|
||||
|
||||
# Color Tracking Thresholds (Grayscale Min, Grayscale Max)
|
||||
threshold_list = [(200, 255)]
|
||||
@ -30,10 +32,20 @@ print("Resetting Lepton...")
|
||||
# These settings are applied on reset
|
||||
sensor.reset()
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_MODE, True)
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius)
|
||||
print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT)))
|
||||
print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No"))
|
||||
sensor.ioctl(
|
||||
sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius
|
||||
)
|
||||
print(
|
||||
"Lepton Res (%dx%d)"
|
||||
% (
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT),
|
||||
)
|
||||
)
|
||||
print(
|
||||
"Radiometry Available: "
|
||||
+ ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")
|
||||
)
|
||||
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
sensor.set_framesize(sensor.LCD)
|
||||
@ -45,16 +57,30 @@ lcd.init()
|
||||
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
|
||||
# camera resolution. "merge=True" merges all overlapping blobs in the image.
|
||||
|
||||
def map_g_to_temp(g):
|
||||
return ((g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0) + min_temp_in_celsius
|
||||
|
||||
while(True):
|
||||
def map_g_to_temp(g):
|
||||
return (
|
||||
(g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0
|
||||
) + min_temp_in_celsius
|
||||
|
||||
|
||||
while True:
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
for blob in img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True):
|
||||
for blob in img.find_blobs(
|
||||
threshold_list, pixels_threshold=200, area_threshold=200, merge=True
|
||||
):
|
||||
stats = img.get_statistics(thresholds=threshold_list, roi=blob.rect())
|
||||
img.draw_rectangle(blob.rect())
|
||||
img.draw_cross(blob.cx(), blob.cy())
|
||||
img.draw_string(blob.x(), blob.y() - 10, "%.2f C" % map_g_to_temp(stats.mean()), mono_space=False)
|
||||
img.draw_string(
|
||||
blob.x(),
|
||||
blob.y() - 10,
|
||||
"%.2f C" % map_g_to_temp(stats.mean()),
|
||||
mono_space=False,
|
||||
)
|
||||
lcd.display(img)
|
||||
print("FPS %f - Lepton Temp: %f C" % (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE)))
|
||||
print(
|
||||
"FPS %f - Lepton Temp: %f C"
|
||||
% (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE))
|
||||
)
|
||||
|
||||
@ -7,7 +7,8 @@
|
||||
# stablizes. You can force the re-calibration to not happen if you need to via the lepton API.
|
||||
# However, it is not recommended because the image will degrade overtime.
|
||||
|
||||
import sensor, image, time, math
|
||||
import sensor
|
||||
import time
|
||||
|
||||
# Color Tracking Thresholds (Grayscale Min, Grayscale Max)
|
||||
threshold_list = [(220, 255)]
|
||||
@ -15,9 +16,17 @@ threshold_list = [(220, 255)]
|
||||
print("Resetting Lepton...")
|
||||
# These settings are applied on reset
|
||||
sensor.reset()
|
||||
print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT)))
|
||||
print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No"))
|
||||
print(
|
||||
"Lepton Res (%dx%d)"
|
||||
% (
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT),
|
||||
)
|
||||
)
|
||||
print(
|
||||
"Radiometry Available: "
|
||||
+ ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")
|
||||
)
|
||||
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QQVGA)
|
||||
@ -28,10 +37,12 @@ clock = time.clock()
|
||||
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
|
||||
# camera resolution. "merge=True" merges all overlapping blobs in the image.
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
for blob in img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True):
|
||||
for blob in img.find_blobs(
|
||||
threshold_list, pixels_threshold=200, area_threshold=200, merge=True
|
||||
):
|
||||
img.draw_rectangle(blob.rect(), color=127)
|
||||
img.draw_cross(blob.cx(), blob.cy(), color=127)
|
||||
print(clock.fps())
|
||||
|
||||
@ -7,7 +7,9 @@
|
||||
# stablizes. You can force the re-calibration to not happen if you need to via the lepton API.
|
||||
# However, it is not recommended because the image will degrade overtime.
|
||||
|
||||
import sensor, image, time, math, lcd
|
||||
import sensor
|
||||
import time
|
||||
import lcd
|
||||
|
||||
# Color Tracking Thresholds (Grayscale Min, Grayscale Max)
|
||||
threshold_list = [(220, 255)]
|
||||
@ -15,9 +17,17 @@ threshold_list = [(220, 255)]
|
||||
print("Resetting Lepton...")
|
||||
# These settings are applied on reset
|
||||
sensor.reset()
|
||||
print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT)))
|
||||
print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No"))
|
||||
print(
|
||||
"Lepton Res (%dx%d)"
|
||||
% (
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT),
|
||||
)
|
||||
)
|
||||
print(
|
||||
"Radiometry Available: "
|
||||
+ ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")
|
||||
)
|
||||
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
sensor.set_framesize(sensor.LCD)
|
||||
@ -29,10 +39,12 @@ lcd.init()
|
||||
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
|
||||
# camera resolution. "merge=True" merges all overlapping blobs in the image.
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
for blob in img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True):
|
||||
for blob in img.find_blobs(
|
||||
threshold_list, pixels_threshold=200, area_threshold=200, merge=True
|
||||
):
|
||||
img.draw_rectangle(blob.rect(), color=127)
|
||||
img.draw_cross(blob.cx(), blob.cy(), color=127)
|
||||
lcd.display(img)
|
||||
|
||||
@ -7,17 +7,26 @@
|
||||
# stablizes. You can force the re-calibration to not happen if you need to via the lepton API.
|
||||
# However, it is not recommended because the image will degrade overtime.
|
||||
|
||||
import sensor, image, time, math
|
||||
import sensor
|
||||
import time
|
||||
|
||||
# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max)
|
||||
threshold_list = [( 70, 100, -30, 40, 20, 100)]
|
||||
threshold_list = [(70, 100, -30, 40, 20, 100)]
|
||||
|
||||
print("Resetting Lepton...")
|
||||
# These settings are applied on reset
|
||||
sensor.reset()
|
||||
print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT)))
|
||||
print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No"))
|
||||
print(
|
||||
"Lepton Res (%dx%d)"
|
||||
% (
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT),
|
||||
)
|
||||
)
|
||||
print(
|
||||
"Radiometry Available: "
|
||||
+ ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")
|
||||
)
|
||||
# Make the color palette cool
|
||||
sensor.set_color_palette(sensor.PALETTE_IRONBOW)
|
||||
|
||||
@ -30,10 +39,12 @@ clock = time.clock()
|
||||
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
|
||||
# camera resolution. "merge=True" merges all overlapping blobs in the image.
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
for blob in img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True):
|
||||
for blob in img.find_blobs(
|
||||
threshold_list, pixels_threshold=200, area_threshold=200, merge=True
|
||||
):
|
||||
img.draw_rectangle(blob.rect())
|
||||
img.draw_cross(blob.cx(), blob.cy())
|
||||
print(clock.fps())
|
||||
|
||||
@ -7,17 +7,27 @@
|
||||
# stablizes. You can force the re-calibration to not happen if you need to via the lepton API.
|
||||
# However, it is not recommended because the image will degrade overtime.
|
||||
|
||||
import sensor, image, time, math, lcd
|
||||
import sensor
|
||||
import time
|
||||
import lcd
|
||||
|
||||
# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max)
|
||||
threshold_list = [( 70, 100, -30, 40, 20, 100)]
|
||||
threshold_list = [(70, 100, -30, 40, 20, 100)]
|
||||
|
||||
print("Resetting Lepton...")
|
||||
# These settings are applied on reset
|
||||
sensor.reset()
|
||||
print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT)))
|
||||
print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No"))
|
||||
print(
|
||||
"Lepton Res (%dx%d)"
|
||||
% (
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT),
|
||||
)
|
||||
)
|
||||
print(
|
||||
"Radiometry Available: "
|
||||
+ ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")
|
||||
)
|
||||
# Make the color palette cool
|
||||
sensor.set_color_palette(sensor.PALETTE_IRONBOW)
|
||||
|
||||
@ -31,10 +41,12 @@ lcd.init()
|
||||
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
|
||||
# camera resolution. "merge=True" merges all overlapping blobs in the image.
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
for blob in img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True):
|
||||
for blob in img.find_blobs(
|
||||
threshold_list, pixels_threshold=200, area_threshold=200, merge=True
|
||||
):
|
||||
img.draw_rectangle(blob.rect())
|
||||
img.draw_cross(blob.cx(), blob.cy())
|
||||
lcd.display(img)
|
||||
|
||||
@ -17,7 +17,8 @@
|
||||
# leptons don't have radiometry support or they don't activate their calibration process often
|
||||
# enough to deal with temperature changes (FLIR 2.5).
|
||||
|
||||
import sensor, image, time, math
|
||||
import sensor
|
||||
import time
|
||||
|
||||
# Color Tracking Thresholds (Grayscale Min, Grayscale Max)
|
||||
threshold_list = [(220, 255)]
|
||||
@ -30,10 +31,20 @@ print("Resetting Lepton...")
|
||||
# These settings are applied on reset
|
||||
sensor.reset()
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_MODE, True)
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius)
|
||||
print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT)))
|
||||
print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No"))
|
||||
sensor.ioctl(
|
||||
sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius
|
||||
)
|
||||
print(
|
||||
"Lepton Res (%dx%d)"
|
||||
% (
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT),
|
||||
)
|
||||
)
|
||||
print(
|
||||
"Radiometry Available: "
|
||||
+ ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")
|
||||
)
|
||||
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QQVGA)
|
||||
@ -44,10 +55,15 @@ clock = time.clock()
|
||||
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
|
||||
# camera resolution. "merge=True" merges all overlapping blobs in the image.
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
for blob in img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True):
|
||||
for blob in img.find_blobs(
|
||||
threshold_list, pixels_threshold=200, area_threshold=200, merge=True
|
||||
):
|
||||
img.draw_rectangle(blob.rect(), color=127)
|
||||
img.draw_cross(blob.cx(), blob.cy(), color=127)
|
||||
print("FPS %f - Lepton Temp: %f C" % (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE)))
|
||||
print(
|
||||
"FPS %f - Lepton Temp: %f C"
|
||||
% (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE))
|
||||
)
|
||||
|
||||
@ -17,10 +17,11 @@
|
||||
# leptons don't have radiometry support or they don't activate their calibration process often
|
||||
# enough to deal with temperature changes (FLIR 2.5).
|
||||
|
||||
import sensor, image, time, math
|
||||
import sensor
|
||||
import time
|
||||
|
||||
# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max)
|
||||
threshold_list = [( 70, 100, -30, 40, 20, 100)]
|
||||
threshold_list = [(70, 100, -30, 40, 20, 100)]
|
||||
|
||||
# Set the target temp range here
|
||||
min_temp_in_celsius = 20
|
||||
@ -30,10 +31,20 @@ print("Resetting Lepton...")
|
||||
# These settings are applied on reset
|
||||
sensor.reset()
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_MODE, True)
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius)
|
||||
print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT)))
|
||||
print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No"))
|
||||
sensor.ioctl(
|
||||
sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius
|
||||
)
|
||||
print(
|
||||
"Lepton Res (%dx%d)"
|
||||
% (
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
|
||||
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT),
|
||||
)
|
||||
)
|
||||
print(
|
||||
"Radiometry Available: "
|
||||
+ ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")
|
||||
)
|
||||
# Make the color palette cool
|
||||
sensor.set_color_palette(sensor.PALETTE_IRONBOW)
|
||||
|
||||
@ -46,10 +57,15 @@ clock = time.clock()
|
||||
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
|
||||
# camera resolution. "merge=True" merges all overlapping blobs in the image.
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
for blob in img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True):
|
||||
for blob in img.find_blobs(
|
||||
threshold_list, pixels_threshold=200, area_threshold=200, merge=True
|
||||
):
|
||||
img.draw_rectangle(blob.rect())
|
||||
img.draw_cross(blob.cx(), blob.cy())
|
||||
print("FPS %f - Lepton Temp: %f C" % (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE)))
|
||||
print(
|
||||
"FPS %f - Lepton Temp: %f C"
|
||||
% (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE))
|
||||
)
|
||||
|
||||
@ -3,26 +3,33 @@
|
||||
# This example shows off how to overlay a depth map onto
|
||||
# OpenMV Cam's live video output from the main camera.
|
||||
|
||||
import image, time, tof
|
||||
import image
|
||||
import time
|
||||
import tof
|
||||
|
||||
IMAGE_SCALE = 10 # Higher scaling uses more memory.
|
||||
drawing_hint = image.BILINEAR # or image.BILINEAR or 0 (nearest neighbor)
|
||||
IMAGE_SCALE = 10 # Higher scaling uses more memory.
|
||||
drawing_hint = image.BILINEAR # or image.BILINEAR or 0 (nearest neighbor)
|
||||
|
||||
# Initialize the ToF sensor
|
||||
tof.init() #Auto-detects the connected sensor.
|
||||
w = tof.width() * IMAGE_SCALE
|
||||
tof.init() # Auto-detects the connected sensor.
|
||||
w = tof.width() * IMAGE_SCALE
|
||||
h = tof.height() * IMAGE_SCALE
|
||||
|
||||
# FPS clock
|
||||
clock = time.clock()
|
||||
|
||||
while (True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
try:
|
||||
img = tof.snapshot(x_size=w, y_size=h,
|
||||
color_palette=tof.PALETTE_IRONBOW,
|
||||
hint=drawing_hint, copy_to_fb=True, scale=(0, 4000))
|
||||
img = tof.snapshot(
|
||||
x_size=w,
|
||||
y_size=h,
|
||||
color_palette=tof.PALETTE_IRONBOW,
|
||||
hint=drawing_hint,
|
||||
copy_to_fb=True,
|
||||
scale=(0, 4000),
|
||||
)
|
||||
except OSError:
|
||||
continue
|
||||
|
||||
|
||||
@ -2,12 +2,15 @@
|
||||
#
|
||||
# This example shows off how to overlay a depth map onto
|
||||
# OpenMV Cam's live video output from the main camera.
|
||||
import sensor, image, time, tof
|
||||
import sensor
|
||||
import image
|
||||
import time
|
||||
import tof
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.set_windowing((0, 0, 240, 240))
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.set_windowing((0, 0, 240, 240)) # Set window size to 240x240
|
||||
|
||||
# Initialize the ToF sensor
|
||||
tof.init()
|
||||
@ -15,7 +18,7 @@ tof.init()
|
||||
# FPS clock
|
||||
clock = time.clock()
|
||||
|
||||
while (True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
# Capture an image
|
||||
@ -28,12 +31,22 @@ while (True):
|
||||
continue
|
||||
|
||||
# Scale the image and belnd it with the framebuffer
|
||||
tof.draw_depth(img, depth, hint=image.BILINEAR,
|
||||
alpha=200, scale=(0, 4000), color_palette=tof.PALETTE_IRONBOW)
|
||||
tof.draw_depth(
|
||||
img,
|
||||
depth,
|
||||
hint=image.BILINEAR,
|
||||
alpha=200,
|
||||
scale=(0, 4000),
|
||||
color_palette=tof.PALETTE_IRONBOW,
|
||||
)
|
||||
|
||||
# Draw min and max distance.
|
||||
img.draw_string(8, 0, "Min distance: %d mm" % dmin, color = (255, 0, 0), mono_space = False)
|
||||
img.draw_string(8, 8, "Max distance: %d mm" % dmax, color = (255, 0, 0), mono_space = False)
|
||||
img.draw_string(
|
||||
8, 0, "Min distance: %d mm" % dmin, color=(255, 0, 0), mono_space=False
|
||||
)
|
||||
img.draw_string(
|
||||
8, 8, "Max distance: %d mm" % dmax, color=(255, 0, 0), mono_space=False
|
||||
)
|
||||
|
||||
# Force high quality streaming
|
||||
img.compress(quality=90)
|
||||
|
||||
@ -22,24 +22,27 @@
|
||||
# more when the lighting changes versus the exposure being constant and
|
||||
# the gain changing.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
|
||||
# The gain db ceiling maxes out at about 24 db for the OV7725 sensor.
|
||||
sensor.set_auto_gain(True, gain_db_ceiling = 16.0) # Default gain.
|
||||
sensor.set_auto_gain(True, gain_db_ceiling=16.0) # Default gain.
|
||||
|
||||
# Note! If you set the gain ceiling to low without adjusting the exposure control
|
||||
# target value then you'll just get a lot of oscillation from the exposure
|
||||
# control if it's on.
|
||||
|
||||
sensor.skip_frames(time = 2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Update the FPS clock.
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
print("FPS %f, Gain %f dB, Exposure %d us" % \
|
||||
(clock.fps(), sensor.get_gain_db(), sensor.get_exposure_us()))
|
||||
while True:
|
||||
clock.tick() # Update the FPS clock.
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
print(
|
||||
"FPS %f, Gain %f dB, Exposure %d us"
|
||||
% (clock.fps(), sensor.get_gain_db(), sensor.get_exposure_us())
|
||||
)
|
||||
|
||||
@ -15,20 +15,21 @@
|
||||
# noise. So, it's best to let the exposure increase as much as possible
|
||||
# and then use gain control to make up any remaining ground.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
# Change this value to adjust the exposure. Try 10.0/0.1/etc.
|
||||
EXPOSURE_TIME_SCALE = 1.0
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
|
||||
# Print out the initial exposure time for comparison.
|
||||
print("Initial exposure == %d" % sensor.get_exposure_us())
|
||||
|
||||
sensor.skip_frames(time = 2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
# You have to turn automatic gain control and automatic white blance off
|
||||
# otherwise they will change the image gains to undo any exposure settings
|
||||
@ -36,7 +37,7 @@ clock = time.clock() # Create a clock object to track the FPS.
|
||||
sensor.set_auto_gain(False)
|
||||
sensor.set_auto_whitebal(False)
|
||||
# Need to let the above settings get in...
|
||||
sensor.skip_frames(time = 500)
|
||||
sensor.skip_frames(time=500)
|
||||
|
||||
current_exposure_time_in_microseconds = sensor.get_exposure_us()
|
||||
print("Current Exposure == %d" % current_exposure_time_in_microseconds)
|
||||
@ -44,8 +45,9 @@ print("Current Exposure == %d" % current_exposure_time_in_microseconds)
|
||||
# Auto exposure control (AEC) is enabled by default. Calling the below function
|
||||
# disables sensor auto exposure control. The additionally "exposure_us"
|
||||
# argument then overrides the auto exposure value after AEC is disabled.
|
||||
sensor.set_auto_exposure(False, \
|
||||
exposure_us = int(current_exposure_time_in_microseconds * EXPOSURE_TIME_SCALE))
|
||||
sensor.set_auto_exposure(
|
||||
False, exposure_us=int(current_exposure_time_in_microseconds * EXPOSURE_TIME_SCALE)
|
||||
)
|
||||
|
||||
print("New exposure == %d" % sensor.get_exposure_us())
|
||||
# sensor.get_exposure_us() returns the exact camera sensor exposure time
|
||||
@ -60,8 +62,8 @@ print("New exposure == %d" % sensor.get_exposure_us())
|
||||
# Just disables the exposure value update but does not change the exposure
|
||||
# value the camera sensor determined was good.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Update the FPS clock.
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
|
||||
# to the IDE. The FPS should increase once disconnected.
|
||||
while True:
|
||||
clock.tick() # Update the FPS clock.
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
|
||||
# to the IDE. The FPS should increase once disconnected.
|
||||
|
||||
@ -3,19 +3,20 @@
|
||||
# This example shows off horizontally mirroring the image in hardware
|
||||
# from the camera sensor.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.skip_frames(time = 2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
# Change this to False to undo the mirror.
|
||||
sensor.set_hmirror(True)
|
||||
|
||||
while(True):
|
||||
clock.tick() # Update the FPS clock.
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
|
||||
# to the IDE. The FPS should increase once disconnected.
|
||||
while True:
|
||||
clock.tick() # Update the FPS clock.
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
|
||||
# to the IDE. The FPS should increase once disconnected.
|
||||
|
||||
@ -13,13 +13,14 @@
|
||||
# the sensor on startup you can control the colors
|
||||
# the camera sees.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.skip_frames(time = 2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
# You can control the white balance gains here. The first value is the
|
||||
# R gain in db, and then the G gain in db, followed by the B gain in db.
|
||||
@ -32,8 +33,7 @@ clock = time.clock() # Create a clock object to track the FPS.
|
||||
# comming out. Do not expect the exact value going in to be equal to the value
|
||||
# comming out.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Update the FPS clock.
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
print(clock.fps(), \
|
||||
sensor.get_rgb_gain_db()) # Prints the AWB current RGB gains.
|
||||
while True:
|
||||
clock.tick() # Update the FPS clock.
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
print(clock.fps(), sensor.get_rgb_gain_db()) # Prints the AWB current RGB gains.
|
||||
|
||||
@ -3,19 +3,20 @@
|
||||
# This example shows off vertically flipping the image in hardware
|
||||
# from the camera sensor.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.skip_frames(time = 2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
# Change this to False to undo the flip.
|
||||
sensor.set_vflip(True)
|
||||
|
||||
while(True):
|
||||
clock.tick() # Update the FPS clock.
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
|
||||
# to the IDE. The FPS should increase once disconnected.
|
||||
while True:
|
||||
clock.tick() # Update the FPS clock.
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
|
||||
# to the IDE. The FPS should increase once disconnected.
|
||||
|
||||
@ -15,20 +15,21 @@
|
||||
# noise. So, it's best to let the exposure increase as much as possible
|
||||
# and then use gain control to make up any remaining ground.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
# Change this value to adjust the gain. Try 10.0/0/0.1/etc.
|
||||
GAIN_SCALE = 1.0
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
|
||||
# Print out the initial gain for comparison.
|
||||
print("Initial gain == %f db" % sensor.get_gain_db())
|
||||
|
||||
sensor.skip_frames(time = 2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
sensor.skip_frames(time=2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
# You have to turn automatic exposure control and automatic white blance off
|
||||
# otherwise they will change the image exposure to undo any gain settings
|
||||
@ -36,7 +37,7 @@ clock = time.clock() # Create a clock object to track the FPS.
|
||||
sensor.set_auto_exposure(False)
|
||||
sensor.set_auto_whitebal(False)
|
||||
# Need to let the above settings get in...
|
||||
sensor.skip_frames(time = 500)
|
||||
sensor.skip_frames(time=500)
|
||||
|
||||
current_gain_in_decibels = sensor.get_gain_db()
|
||||
print("Current Gain == %f db" % current_gain_in_decibels)
|
||||
@ -44,8 +45,7 @@ print("Current Gain == %f db" % current_gain_in_decibels)
|
||||
# Auto gain control (AGC) is enabled by default. Calling the below function
|
||||
# disables sensor auto gain control. The additionally "gain_db"
|
||||
# argument then overrides the auto gain value after AGC is disabled.
|
||||
sensor.set_auto_gain(False, \
|
||||
gain_db = current_gain_in_decibels * GAIN_SCALE)
|
||||
sensor.set_auto_gain(False, gain_db=current_gain_in_decibels * GAIN_SCALE)
|
||||
|
||||
print("New gain == %f db" % sensor.get_gain_db())
|
||||
# sensor.get_gain_db() returns the exact camera sensor gain decibels.
|
||||
@ -60,8 +60,8 @@ print("New gain == %f db" % sensor.get_gain_db())
|
||||
# Just disables the gain value update but does not change the gain
|
||||
# value the camera sensor determined was good.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Update the FPS clock.
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
|
||||
# to the IDE. The FPS should increase once disconnected.
|
||||
while True:
|
||||
clock.tick() # Update the FPS clock.
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
|
||||
# to the IDE. The FPS should increase once disconnected.
|
||||
|
||||
@ -3,10 +3,11 @@
|
||||
|
||||
# This example is was designed and tested on the OpenMV Cam H7 Plus using the OV5640 sensor.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
EXPOSURE_MICROSECONDS = 1000
|
||||
TRACKING_THRESHOLDS = [(128, 255)] # When you lower the exposure you darken everything.
|
||||
TRACKING_THRESHOLDS = [(128, 255)] # When you lower the exposure you darken everything.
|
||||
|
||||
SEARCHING_RESOLUTION = sensor.VGA
|
||||
SEARCHING_AREA_THRESHOLD = 16
|
||||
@ -16,32 +17,34 @@ TRACKING_RESOLUTION = sensor.QQVGA
|
||||
TRACKING_AREA_THRESHOLD = 256
|
||||
TRACKING_PIXEL_THRESHOLD = TRACKING_AREA_THRESHOLD
|
||||
|
||||
TRACKING_EDGE_TOLERANCE = 0.05 # Blob can move 5% away from the center.
|
||||
TRACKING_EDGE_TOLERANCE = 0.05 # Blob can move 5% away from the center.
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE
|
||||
sensor.set_framesize(SEARCHING_RESOLUTION)
|
||||
sensor.skip_frames(time = 1000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
sensor.skip_frames(time=1000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
sensor.set_auto_gain(False) # Turn off as it will oscillate.
|
||||
sensor.set_auto_gain(False) # Turn off as it will oscillate.
|
||||
sensor.set_auto_exposure(False, exposure_us=EXPOSURE_MICROSECONDS)
|
||||
sensor.skip_frames(time = 1000)
|
||||
sensor.skip_frames(time=1000)
|
||||
|
||||
# sensor_w and sensor_h are the image sensor raw pixels w/h (x/y are 0 initially).
|
||||
x, y, sensor_w, sensor_h = sensor.ioctl(sensor.IOCTL_GET_READOUT_WINDOW)
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
|
||||
# We need to find an IR object to track - it's likely to be really bright.
|
||||
blobs = img.find_blobs(TRACKING_THRESHOLDS,
|
||||
area_threshold=SEARCHING_AREA_THRESHOLD,
|
||||
pixels_threshold=SEARCHING_PIXEL_THRESHOLD)
|
||||
blobs = img.find_blobs(
|
||||
TRACKING_THRESHOLDS,
|
||||
area_threshold=SEARCHING_AREA_THRESHOLD,
|
||||
pixels_threshold=SEARCHING_PIXEL_THRESHOLD,
|
||||
)
|
||||
|
||||
if len(blobs):
|
||||
most_dense_blob = max(blobs, key = lambda x: x.density())
|
||||
most_dense_blob = max(blobs, key=lambda x: x.density())
|
||||
img.draw_rectangle(most_dense_blob.rect())
|
||||
|
||||
def get_mapped_centroid(b):
|
||||
@ -68,7 +71,7 @@ while(True):
|
||||
# Add in our displacement from the sensor center
|
||||
mapped_cy += y + (sensor_h / 2.0)
|
||||
|
||||
return (mapped_cx, mapped_cy) # X/Y location on the sensor array.
|
||||
return (mapped_cx, mapped_cy) # X/Y location on the sensor array.
|
||||
|
||||
def center_on_blob(b, res):
|
||||
mapped_cx, mapped_cy = get_mapped_centroid(b)
|
||||
@ -92,22 +95,28 @@ while(True):
|
||||
x_error = x - new_x
|
||||
y_error = y - new_y
|
||||
|
||||
if x_error < 0: print("-X Limit Reached ", end="")
|
||||
if x_error > 0: print("+X Limit Reached ", end="")
|
||||
if y_error < 0: print("-Y Limit Reached ", end="")
|
||||
if y_error > 0: print("+Y Limit Reached ", end="")
|
||||
if x_error < 0:
|
||||
print("-X Limit Reached ", end="")
|
||||
if x_error > 0:
|
||||
print("+X Limit Reached ", end="")
|
||||
if y_error < 0:
|
||||
print("-Y Limit Reached ", end="")
|
||||
if y_error > 0:
|
||||
print("+Y Limit Reached ", end="")
|
||||
|
||||
center_on_blob(most_dense_blob, TRACKING_RESOLUTION)
|
||||
|
||||
# This loop will track the blob at a much higher readout speed and lower resolution.
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
|
||||
# Find the blob in the lower resolution image.
|
||||
blobs = img.find_blobs(TRACKING_THRESHOLDS,
|
||||
area_threshold=TRACKING_AREA_THRESHOLD,
|
||||
pixels_threshold=TRACKING_PIXEL_THRESHOLD)
|
||||
blobs = img.find_blobs(
|
||||
TRACKING_THRESHOLDS,
|
||||
area_threshold=TRACKING_AREA_THRESHOLD,
|
||||
pixels_threshold=TRACKING_PIXEL_THRESHOLD,
|
||||
)
|
||||
|
||||
# If we loose the blob then we need to find a new one.
|
||||
if not len(blobs):
|
||||
@ -117,10 +126,12 @@ while(True):
|
||||
break
|
||||
|
||||
# Narrow down the blob list and highlight the blob.
|
||||
most_dense_blob = max(blobs, key = lambda x: x.density())
|
||||
most_dense_blob = max(blobs, key=lambda x: x.density())
|
||||
img.draw_rectangle(most_dense_blob.rect())
|
||||
|
||||
print(clock.fps(), "BLOB cx:%d, cy:%d" % get_mapped_centroid(most_dense_blob))
|
||||
print(
|
||||
clock.fps(), "BLOB cx:%d, cy:%d" % get_mapped_centroid(most_dense_blob)
|
||||
)
|
||||
|
||||
x_diff = most_dense_blob.cx() - (sensor.width() / 2.0)
|
||||
y_diff = most_dense_blob.cy() - (sensor.height() / 2.0)
|
||||
|
||||
@ -3,32 +3,37 @@
|
||||
|
||||
# This example is was designed and tested on the OpenMV Cam H7 Plus using the OV5640 sensor.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
# This example script forces the exposure to a constant value for the whole time. However, you may
|
||||
# wish to dynamically adjust the exposure when the readout window shrinks to a small size.
|
||||
EXPOSURE_MICROSECONDS = 20000
|
||||
|
||||
SEARCHING_RESOLUTION = sensor.QVGA
|
||||
TRACKING_RESOLUTION = sensor.QQVGA # or sensor.QQQVGA
|
||||
TRACKING_RESOLUTION = sensor.QQVGA # or sensor.QQQVGA
|
||||
|
||||
TRACKING_LOW_RATIO_THRESHOLD = 0.2 # Go to a smaller readout window when tag side vs res is smaller.
|
||||
TRACKING_HIGH_RATIO_THRESHOLD = 0.8 # Go to a larger readout window when tag side vs res is larger.
|
||||
TRACKING_LOW_RATIO_THRESHOLD = (
|
||||
0.2 # Go to a smaller readout window when tag side vs res is smaller.
|
||||
)
|
||||
TRACKING_HIGH_RATIO_THRESHOLD = (
|
||||
0.8 # Go to a larger readout window when tag side vs res is larger.
|
||||
)
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE
|
||||
sensor.set_framesize(SEARCHING_RESOLUTION)
|
||||
sensor.skip_frames(time = 1000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
sensor.skip_frames(time=1000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
sensor.set_auto_gain(False) # Turn off as it will oscillate.
|
||||
sensor.set_auto_gain(False) # Turn off as it will oscillate.
|
||||
sensor.set_auto_exposure(False, exposure_us=EXPOSURE_MICROSECONDS)
|
||||
sensor.skip_frames(time = 1000)
|
||||
sensor.skip_frames(time=1000)
|
||||
|
||||
# sensor_w and sensor_h are the image sensor raw pixels w/h (x/y are 0 initially).
|
||||
x, y, sensor_w, sensor_h = sensor.ioctl(sensor.IOCTL_GET_READOUT_WINDOW)
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
|
||||
@ -36,7 +41,7 @@ while(True):
|
||||
tags = img.find_apriltags()
|
||||
|
||||
if len(tags):
|
||||
best_tag = max(tags, key = lambda x: x.decision_margin())
|
||||
best_tag = max(tags, key=lambda x: x.decision_margin())
|
||||
img.draw_rectangle(best_tag.rect())
|
||||
|
||||
# This needs to be less than the sensor output at default so we can move it around.
|
||||
@ -67,7 +72,7 @@ while(True):
|
||||
# Add in our displacement from the sensor center
|
||||
mapped_cy += y + (sensor_h / 2.0)
|
||||
|
||||
return (mapped_cx, mapped_cy) # X/Y location on the sensor array.
|
||||
return (mapped_cx, mapped_cy) # X/Y location on the sensor array.
|
||||
|
||||
def center_on_tag(t, res):
|
||||
global readout_window_w
|
||||
@ -93,17 +98,21 @@ while(True):
|
||||
x_error = x - new_x
|
||||
y_error = y - new_y
|
||||
|
||||
if x_error < 0: print("-X Limit Reached ", end="")
|
||||
if x_error > 0: print("+X Limit Reached ", end="")
|
||||
if y_error < 0: print("-Y Limit Reached ", end="")
|
||||
if y_error > 0: print("+Y Limit Reached ", end="")
|
||||
if x_error < 0:
|
||||
print("-X Limit Reached ", end="")
|
||||
if x_error > 0:
|
||||
print("+X Limit Reached ", end="")
|
||||
if y_error < 0:
|
||||
print("-Y Limit Reached ", end="")
|
||||
if y_error > 0:
|
||||
print("+Y Limit Reached ", end="")
|
||||
|
||||
center_on_tag(best_tag, TRACKING_RESOLUTION)
|
||||
|
||||
loss_count = 0
|
||||
|
||||
# This loop will track the tag at a much higher readout speed and lower resolution.
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
|
||||
@ -113,7 +122,7 @@ while(True):
|
||||
# If we loose the tag then we need to find a new one.
|
||||
if not len(tags):
|
||||
# Handle a few bad frames due to tag flicker.
|
||||
if (loss_count < 2):
|
||||
if loss_count < 2:
|
||||
loss_count += 1
|
||||
continue
|
||||
# Reset resolution.
|
||||
@ -124,7 +133,7 @@ while(True):
|
||||
loss_count = 0
|
||||
|
||||
# Narrow down the blob list and highlight the blob.
|
||||
best_tag = max(tags, key = lambda x: x.decision_margin())
|
||||
best_tag = max(tags, key=lambda x: x.decision_margin())
|
||||
img.draw_rectangle(best_tag.rect())
|
||||
|
||||
print(clock.fps(), "TAG cx:%d, cy:%d" % get_mapped_centroid(best_tag))
|
||||
@ -133,14 +142,18 @@ while(True):
|
||||
h_ratio = best_tag.h() / sensor.height()
|
||||
|
||||
# Shrink the tracking window until the tag fits.
|
||||
while (w_ratio < TRACKING_LOW_RATIO_THRESHOLD) or (h_ratio < TRACKING_LOW_RATIO_THRESHOLD):
|
||||
while (w_ratio < TRACKING_LOW_RATIO_THRESHOLD) or (
|
||||
h_ratio < TRACKING_LOW_RATIO_THRESHOLD
|
||||
):
|
||||
readout_window_w /= 2
|
||||
readout_window_h /= 2
|
||||
w_ratio *= 2
|
||||
h_ratio *= 2
|
||||
|
||||
# Enlarge the tracking window until the tag fits.
|
||||
while (TRACKING_HIGH_RATIO_THRESHOLD < w_ratio) or (TRACKING_HIGH_RATIO_THRESHOLD < h_ratio):
|
||||
while (TRACKING_HIGH_RATIO_THRESHOLD < w_ratio) or (
|
||||
TRACKING_HIGH_RATIO_THRESHOLD < h_ratio
|
||||
):
|
||||
readout_window_w *= 2
|
||||
readout_window_h *= 2
|
||||
w_ratio /= 2
|
||||
|
||||
@ -2,30 +2,32 @@
|
||||
#
|
||||
# This example shows off drawing arrows on the OpenMV Cam.
|
||||
|
||||
import sensor, image, time, pyb
|
||||
import sensor
|
||||
import time
|
||||
import pyb
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time=2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
for i in range(10):
|
||||
x0 = (pyb.rng() % (2*img.width())) - (img.width()//2)
|
||||
y0 = (pyb.rng() % (2*img.height())) - (img.height()//2)
|
||||
x1 = (pyb.rng() % (2*img.width())) - (img.width()//2)
|
||||
y1 = (pyb.rng() % (2*img.height())) - (img.height()//2)
|
||||
x0 = (pyb.rng() % (2 * img.width())) - (img.width() // 2)
|
||||
y0 = (pyb.rng() % (2 * img.height())) - (img.height() // 2)
|
||||
x1 = (pyb.rng() % (2 * img.width())) - (img.width() // 2)
|
||||
y1 = (pyb.rng() % (2 * img.height())) - (img.height() // 2)
|
||||
r = (pyb.rng() % 127) + 128
|
||||
g = (pyb.rng() % 127) + 128
|
||||
b = (pyb.rng() % 127) + 128
|
||||
|
||||
# If the first argument is a scaler then this method expects
|
||||
# to see x0, y0, x1, and y1. Otherwise, it expects a (x0,y0,x1,y1) tuple.
|
||||
img.draw_arrow(x0, y0, x1, y1, color = (r, g, b), size = 30, thickness = 2)
|
||||
img.draw_arrow(x0, y0, x1, y1, color=(r, g, b), size=30, thickness=2)
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -2,23 +2,25 @@
|
||||
#
|
||||
# This example shows off drawing circles on the OpenMV Cam.
|
||||
|
||||
import sensor, image, time, pyb
|
||||
import sensor
|
||||
import time
|
||||
import pyb
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time=2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
for i in range(10):
|
||||
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
|
||||
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
|
||||
radius = pyb.rng() % (max(img.height(), img.width())//2)
|
||||
x = (pyb.rng() % (2 * img.width())) - (img.width() // 2)
|
||||
y = (pyb.rng() % (2 * img.height())) - (img.height() // 2)
|
||||
radius = pyb.rng() % (max(img.height(), img.width()) // 2)
|
||||
|
||||
r = (pyb.rng() % 127) + 128
|
||||
g = (pyb.rng() % 127) + 128
|
||||
@ -26,6 +28,6 @@ while(True):
|
||||
|
||||
# If the first argument is a scaler then this method expects
|
||||
# to see x, y, and radius. Otherwise, it expects a (x,y,radius) tuple.
|
||||
img.draw_circle(x, y, radius, color = (r, g, b), thickness = 2, fill = False)
|
||||
img.draw_circle(x, y, radius, color=(r, g, b), thickness=2, fill=False)
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -1,8 +1,10 @@
|
||||
# Copy image to framebuffer.
|
||||
# Copy image to framebuffer.
|
||||
#
|
||||
# This example shows how to load and copy an image to framebuffer for testing.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import image
|
||||
import time
|
||||
|
||||
# Still need to init sensor
|
||||
sensor.reset()
|
||||
|
||||
@ -2,28 +2,30 @@
|
||||
#
|
||||
# This example shows off drawing crosses on the OpenMV Cam.
|
||||
|
||||
import sensor, image, time, pyb
|
||||
import sensor
|
||||
import time
|
||||
import pyb
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time=2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
for i in range(10):
|
||||
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
|
||||
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
|
||||
x = (pyb.rng() % (2 * img.width())) - (img.width() // 2)
|
||||
y = (pyb.rng() % (2 * img.height())) - (img.height() // 2)
|
||||
r = (pyb.rng() % 127) + 128
|
||||
g = (pyb.rng() % 127) + 128
|
||||
b = (pyb.rng() % 127) + 128
|
||||
|
||||
# If the first argument is a scaler then this method expects
|
||||
# to see x and y. Otherwise, it expects a (x,y) tuple.
|
||||
img.draw_cross(x, y, color = (r, g, b), size = 10, thickness = 2)
|
||||
img.draw_cross(x, y, color=(r, g, b), size=10, thickness=2)
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -2,24 +2,26 @@
|
||||
#
|
||||
# This example shows off drawing ellipses on the OpenMV Cam.
|
||||
|
||||
import sensor, image, time, pyb
|
||||
import sensor
|
||||
import time
|
||||
import pyb
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time=2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
for i in range(10):
|
||||
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
|
||||
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
|
||||
radius_x = pyb.rng() % (max(img.height(), img.width())//2)
|
||||
radius_y = pyb.rng() % (max(img.height(), img.width())//2)
|
||||
x = (pyb.rng() % (2 * img.width())) - (img.width() // 2)
|
||||
y = (pyb.rng() % (2 * img.height())) - (img.height() // 2)
|
||||
radius_x = pyb.rng() % (max(img.height(), img.width()) // 2)
|
||||
radius_y = pyb.rng() % (max(img.height(), img.width()) // 2)
|
||||
rot = pyb.rng()
|
||||
|
||||
r = (pyb.rng() % 127) + 128
|
||||
@ -29,7 +31,8 @@ while(True):
|
||||
# If the first argument is a scaler then this method expects
|
||||
# to see x, y, radius x, and radius y.
|
||||
# Otherwise, it expects a (x,y,radius_x,radius_y) tuple.
|
||||
img.draw_ellipse(x, y, radius_x, radius_y, rot,
|
||||
color = (r, g, b), thickness = 2, fill = False)
|
||||
img.draw_ellipse(
|
||||
x, y, radius_x, radius_y, rot, color=(r, g, b), thickness=2, fill=False
|
||||
)
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -2,15 +2,16 @@
|
||||
#
|
||||
# This example shows off flood filling areas in the image.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time=2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
# seed_threshold controls the maximum allowed difference between
|
||||
@ -28,8 +29,14 @@ while(True):
|
||||
|
||||
x = sensor.width() // 2
|
||||
y = sensor.height() // 2
|
||||
img = sensor.snapshot().flood_fill(x, y, \
|
||||
seed_threshold=0.05, floating_thresholds=0.05, \
|
||||
color=(255, 0, 0), invert=False, clear_background=False)
|
||||
img = sensor.snapshot().flood_fill(
|
||||
x,
|
||||
y,
|
||||
seed_threshold=0.05,
|
||||
floating_thresholds=0.05,
|
||||
color=(255, 0, 0),
|
||||
invert=False,
|
||||
clear_background=False,
|
||||
)
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -2,22 +2,23 @@
|
||||
#
|
||||
# This example shows off how to draw images in the frame buffer.
|
||||
|
||||
import sensor, image, time, pyb
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time=2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
small_img = img.mean_pooled(4, 4) # Makes a copy.
|
||||
small_img = img.mean_pooled(4, 4) # Makes a copy.
|
||||
|
||||
x = (img.width()//2)-(small_img.width()//2)
|
||||
y = (img.height()//2)-(small_img.height()//2)
|
||||
x = (img.width() // 2) - (small_img.width() // 2)
|
||||
y = (img.height() // 2) - (small_img.height() // 2)
|
||||
# Draws an image in the frame buffer.Pass an optional
|
||||
# mask image to control what pixels are drawn.
|
||||
img.draw_image(small_img, x, y, x_scale=1, y_scale=1)
|
||||
|
||||
@ -2,12 +2,14 @@
|
||||
#
|
||||
# Exercise draw image with many different values for testing
|
||||
|
||||
import sensor, image, time, pyb
|
||||
import sensor
|
||||
import image
|
||||
import time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time=2000)
|
||||
clock = time.clock()
|
||||
|
||||
BOUNCE = True
|
||||
@ -41,7 +43,7 @@ ymin = -sensor.height() / SMALL_IMAGE_SCALE - 8
|
||||
xmax = sensor.width() + 8
|
||||
ymax = sensor.height() + 8
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
status = ""
|
||||
@ -51,43 +53,66 @@ while(True):
|
||||
# Makes a scaled copy of the sensor
|
||||
small_img = img.mean_pooled(SMALL_IMAGE_SCALE, SMALL_IMAGE_SCALE)
|
||||
|
||||
status = 'rgb565 '
|
||||
status = "rgb565 "
|
||||
if CYCLE_FORMATS:
|
||||
image_format = (value_mixer >> 8) & 3
|
||||
# To test combining different formats
|
||||
if (image_format==1): small_img = small_img.to_bitmap(copy=True); status = 'bitmap '
|
||||
if (image_format==2): small_img = small_img.to_grayscale(copy=True); status = 'grayscale '
|
||||
if (image_format==3): small_img = small_img.to_rgb565(copy=True); status = 'rgb565 '
|
||||
if image_format == 1:
|
||||
small_img = small_img.to_bitmap(copy=True)
|
||||
status = "bitmap "
|
||||
if image_format == 2:
|
||||
small_img = small_img.to_grayscale(copy=True)
|
||||
status = "grayscale "
|
||||
if image_format == 3:
|
||||
small_img = small_img.to_rgb565(copy=True)
|
||||
status = "rgb565 "
|
||||
|
||||
# update small image location
|
||||
if BOUNCE:
|
||||
x = x + xd
|
||||
if (x<xmin or x>xmax):
|
||||
if x < xmin or x > xmax:
|
||||
xd = -xd
|
||||
|
||||
y = y + yd
|
||||
if (y<ymin or y>ymax):
|
||||
if y < ymin or y > ymax:
|
||||
yd = -yd
|
||||
|
||||
# Update small image scale
|
||||
if RESCALE:
|
||||
rescale = rescale + rd
|
||||
if (rescale<min_rescale or rescale>max_rescale):
|
||||
if rescale < min_rescale or rescale > max_rescale:
|
||||
rd = -rd
|
||||
|
||||
# Find the center of the image
|
||||
scaled_width = int(small_img.width() * abs(rescale))
|
||||
scaled_height= int(small_img.height() * abs(rescale))
|
||||
scaled_height = int(small_img.height() * abs(rescale))
|
||||
|
||||
apply_mask = CYCLE_MASK and ((value_mixer >> 9) & 1)
|
||||
if apply_mask:
|
||||
img.draw_image(small_img, int(x), int(y), mask=small_img.to_bitmap(copy=True), x_scale=rescale, y_scale=rescale, alpha=240, hint=image.BILINEAR | image.CENTER)
|
||||
status += 'alpha:240 '
|
||||
status += '+mask '
|
||||
img.draw_image(
|
||||
small_img,
|
||||
int(x),
|
||||
int(y),
|
||||
mask=small_img.to_bitmap(copy=True),
|
||||
x_scale=rescale,
|
||||
y_scale=rescale,
|
||||
alpha=240,
|
||||
hint=image.BILINEAR | image.CENTER,
|
||||
)
|
||||
status += "alpha:240 "
|
||||
status += "+mask "
|
||||
else:
|
||||
img.draw_image(small_img, int(x), int(y), x_scale=rescale, y_scale=rescale, alpha=128, hint=image.BILINEAR | image.CENTER)
|
||||
status += 'alpha:128 '
|
||||
img.draw_image(
|
||||
small_img,
|
||||
int(x),
|
||||
int(y),
|
||||
x_scale=rescale,
|
||||
y_scale=rescale,
|
||||
alpha=128,
|
||||
hint=image.BILINEAR | image.CENTER,
|
||||
)
|
||||
status += "alpha:128 "
|
||||
|
||||
img.draw_string(8, 0, status, mono_space = False)
|
||||
img.draw_string(8, 0, status, mono_space=False)
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -5,67 +5,77 @@
|
||||
# area scaling along with color channel extraction, alpha blending,
|
||||
# color palette application, and alpha palette application.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import image
|
||||
import time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
|
||||
hint = image.BICUBIC # image.BILINEAR image.BICUBIC
|
||||
hint = image.BICUBIC # image.BILINEAR image.BICUBIC
|
||||
|
||||
small_img = image.Image(4, 4, sensor.RGB565)
|
||||
small_img.set_pixel(0, 0, (0, 0, 127))
|
||||
small_img.set_pixel(1, 0, (47, 255, 199))
|
||||
small_img.set_pixel(2, 0, (0, 188, 255))
|
||||
small_img.set_pixel(3, 0, (0, 0, 127))
|
||||
small_img.set_pixel(0, 1, (0, 176, 255))
|
||||
small_img.set_pixel(1, 1, (222, 0, 0 ))
|
||||
small_img.set_pixel(2, 1, (50, 255, 195))
|
||||
small_img.set_pixel(3, 1, (86, 255, 160))
|
||||
small_img.set_pixel(0, 2, (255, 211, 0 ))
|
||||
small_img.set_pixel(1, 2, (83, 255, 163))
|
||||
small_img.set_pixel(0, 0, (0, 0, 127))
|
||||
small_img.set_pixel(1, 0, (47, 255, 199))
|
||||
small_img.set_pixel(2, 0, (0, 188, 255))
|
||||
small_img.set_pixel(3, 0, (0, 0, 127))
|
||||
small_img.set_pixel(0, 1, (0, 176, 255))
|
||||
small_img.set_pixel(1, 1, (222, 0, 0))
|
||||
small_img.set_pixel(2, 1, (50, 255, 195))
|
||||
small_img.set_pixel(3, 1, (86, 255, 160))
|
||||
small_img.set_pixel(0, 2, (255, 211, 0))
|
||||
small_img.set_pixel(1, 2, (83, 255, 163))
|
||||
small_img.set_pixel(2, 2, (255, 211, 0))
|
||||
small_img.set_pixel(3, 2, (0, 80, 255))
|
||||
small_img.set_pixel(0, 3, (255, 118, 0 ))
|
||||
small_img.set_pixel(1, 3, (127, 0, 0 ))
|
||||
small_img.set_pixel(2, 3, (0, 144, 255))
|
||||
small_img.set_pixel(3, 3, (50, 255, 195))
|
||||
#small_img.to_grayscale()
|
||||
#small_img.to_bitmap()
|
||||
small_img.set_pixel(3, 2, (0, 80, 255))
|
||||
small_img.set_pixel(0, 3, (255, 118, 0))
|
||||
small_img.set_pixel(1, 3, (127, 0, 0))
|
||||
small_img.set_pixel(2, 3, (0, 144, 255))
|
||||
small_img.set_pixel(3, 3, (50, 255, 195))
|
||||
# small_img.to_grayscale()
|
||||
# small_img.to_bitmap()
|
||||
|
||||
big_img = image.Image(128, 128, sensor.RGB565)
|
||||
big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint)
|
||||
#big_img.to_grayscale()
|
||||
#big_img.to_bitmap()
|
||||
# big_img.to_grayscale()
|
||||
# big_img.to_bitmap()
|
||||
|
||||
alpha_div = 1
|
||||
alpha_value = 0
|
||||
alpha_step = 2
|
||||
|
||||
x_bounce = sensor.width()//2
|
||||
x_bounce = sensor.width() // 2
|
||||
x_bounce_toggle = 1
|
||||
|
||||
y_bounce = sensor.height()//2
|
||||
y_bounce = sensor.height() // 2
|
||||
y_bounce_toggle = 1
|
||||
|
||||
clock = time.clock()
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
#img.to_grayscale()
|
||||
#img.to_bitmap()
|
||||
img.draw_image(big_img, x_bounce, y_bounce,
|
||||
rgb_channel=-1, alpha=alpha_value//alpha_div,
|
||||
hint=hint|image.CENTER)
|
||||
# img.to_grayscale()
|
||||
# img.to_bitmap()
|
||||
img.draw_image(
|
||||
big_img,
|
||||
x_bounce,
|
||||
y_bounce,
|
||||
rgb_channel=-1,
|
||||
alpha=alpha_value // alpha_div,
|
||||
hint=hint | image.CENTER,
|
||||
)
|
||||
|
||||
x_bounce += x_bounce_toggle
|
||||
if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle
|
||||
if abs(x_bounce - (img.width() // 2)) >= (img.width() // 2):
|
||||
x_bounce_toggle = -x_bounce_toggle
|
||||
|
||||
y_bounce += y_bounce_toggle
|
||||
if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle
|
||||
if abs(y_bounce - (img.height() // 2)) >= (img.height() // 2):
|
||||
y_bounce_toggle = -y_bounce_toggle
|
||||
|
||||
alpha_value += alpha_step
|
||||
if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step
|
||||
if not alpha_value or alpha_value // alpha_div == 256:
|
||||
alpha_step = -alpha_step
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -5,77 +5,88 @@
|
||||
# area scaling along with color channel extraction, alpha blending,
|
||||
# color palette application, and alpha palette application.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import image
|
||||
import time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
|
||||
hint = image.BICUBIC # image.BILINEAR image.BICUBIC
|
||||
hint = image.BICUBIC # image.BILINEAR image.BICUBIC
|
||||
|
||||
# RGB channel extraction is done after scaling normally, this
|
||||
# may produce false colors. Set this flag to do it before.
|
||||
#
|
||||
hint |= 0 # image.EXTRACT_RGB_CHANNEL_FIRST
|
||||
hint |= 0 # image.EXTRACT_RGB_CHANNEL_FIRST
|
||||
|
||||
# Color table application is done after scaling normally, this
|
||||
# may produce false colors. Set this flag to do it before.
|
||||
#
|
||||
hint |= 0 # image.APPLY_COLOR_PALETTE_FIRST
|
||||
hint |= 0 # image.APPLY_COLOR_PALETTE_FIRST
|
||||
|
||||
small_img = image.Image(4, 4, sensor.RGB565)
|
||||
small_img.set_pixel(0, 0, (0, 0, 127))
|
||||
small_img.set_pixel(1, 0, (47, 255, 199))
|
||||
small_img.set_pixel(2, 0, (0, 188, 255))
|
||||
small_img.set_pixel(3, 0, (0, 0, 127))
|
||||
small_img.set_pixel(0, 1, (0, 176, 255))
|
||||
small_img.set_pixel(1, 1, (222, 0, 0 ))
|
||||
small_img.set_pixel(2, 1, (50, 255, 195))
|
||||
small_img.set_pixel(3, 1, (86, 255, 160))
|
||||
small_img.set_pixel(0, 2, (255, 211, 0 ))
|
||||
small_img.set_pixel(1, 2, (83, 255, 163))
|
||||
small_img.set_pixel(0, 0, (0, 0, 127))
|
||||
small_img.set_pixel(1, 0, (47, 255, 199))
|
||||
small_img.set_pixel(2, 0, (0, 188, 255))
|
||||
small_img.set_pixel(3, 0, (0, 0, 127))
|
||||
small_img.set_pixel(0, 1, (0, 176, 255))
|
||||
small_img.set_pixel(1, 1, (222, 0, 0))
|
||||
small_img.set_pixel(2, 1, (50, 255, 195))
|
||||
small_img.set_pixel(3, 1, (86, 255, 160))
|
||||
small_img.set_pixel(0, 2, (255, 211, 0))
|
||||
small_img.set_pixel(1, 2, (83, 255, 163))
|
||||
small_img.set_pixel(2, 2, (255, 211, 0))
|
||||
small_img.set_pixel(3, 2, (0, 80, 255))
|
||||
small_img.set_pixel(0, 3, (255, 118, 0 ))
|
||||
small_img.set_pixel(1, 3, (127, 0, 0 ))
|
||||
small_img.set_pixel(2, 3, (0, 144, 255))
|
||||
small_img.set_pixel(3, 3, (50, 255, 195))
|
||||
#small_img.to_grayscale()
|
||||
#small_img.to_bitmap()
|
||||
small_img.set_pixel(3, 2, (0, 80, 255))
|
||||
small_img.set_pixel(0, 3, (255, 118, 0))
|
||||
small_img.set_pixel(1, 3, (127, 0, 0))
|
||||
small_img.set_pixel(2, 3, (0, 144, 255))
|
||||
small_img.set_pixel(3, 3, (50, 255, 195))
|
||||
# small_img.to_grayscale()
|
||||
# small_img.to_bitmap()
|
||||
|
||||
big_img = image.Image(128, 128, sensor.RGB565)
|
||||
big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint)
|
||||
#big_img.to_grayscale()
|
||||
#big_img.to_bitmap()
|
||||
# big_img.to_grayscale()
|
||||
# big_img.to_bitmap()
|
||||
|
||||
alpha_div = 1
|
||||
alpha_value = 0
|
||||
alpha_step = 2
|
||||
|
||||
x_bounce = sensor.width()//2
|
||||
x_bounce = sensor.width() // 2
|
||||
x_bounce_toggle = 1
|
||||
|
||||
y_bounce = sensor.height()//2
|
||||
y_bounce = sensor.height() // 2
|
||||
y_bounce_toggle = 1
|
||||
|
||||
clock = time.clock()
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
#img.to_grayscale()
|
||||
#img.to_bitmap()
|
||||
img.draw_image(big_img, x_bounce, y_bounce,
|
||||
rgb_channel=-1, alpha=alpha_value//alpha_div,
|
||||
color_palette=sensor.PALETTE_IRONBOW, hint=hint|image.CENTER)
|
||||
# img.to_grayscale()
|
||||
# img.to_bitmap()
|
||||
img.draw_image(
|
||||
big_img,
|
||||
x_bounce,
|
||||
y_bounce,
|
||||
rgb_channel=-1,
|
||||
alpha=alpha_value // alpha_div,
|
||||
color_palette=sensor.PALETTE_IRONBOW,
|
||||
hint=hint | image.CENTER,
|
||||
)
|
||||
|
||||
x_bounce += x_bounce_toggle
|
||||
if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle
|
||||
if abs(x_bounce - (img.width() // 2)) >= (img.width() // 2):
|
||||
x_bounce_toggle = -x_bounce_toggle
|
||||
|
||||
y_bounce += y_bounce_toggle
|
||||
if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle
|
||||
if abs(y_bounce - (img.height() // 2)) >= (img.height() // 2):
|
||||
y_bounce_toggle = -y_bounce_toggle
|
||||
|
||||
alpha_value += alpha_step
|
||||
if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step
|
||||
if not alpha_value or alpha_value // alpha_div == 256:
|
||||
alpha_step = -alpha_step
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -5,38 +5,40 @@
|
||||
# area scaling along with color channel extraction, alpha blending,
|
||||
# color palette application, and alpha palette application.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import image
|
||||
import time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
|
||||
hint = image.BICUBIC # image.BILINEAR image.BICUBIC
|
||||
hint = image.BICUBIC # image.BILINEAR image.BICUBIC
|
||||
|
||||
small_img = image.Image(4, 4, sensor.RGB565)
|
||||
small_img.set_pixel(0, 0, (0, 0, 127))
|
||||
small_img.set_pixel(1, 0, (47, 255, 199))
|
||||
small_img.set_pixel(2, 0, (0, 188, 255))
|
||||
small_img.set_pixel(3, 0, (0, 0, 127))
|
||||
small_img.set_pixel(0, 1, (0, 176, 255))
|
||||
small_img.set_pixel(1, 1, (222, 0, 0 ))
|
||||
small_img.set_pixel(2, 1, (50, 255, 195))
|
||||
small_img.set_pixel(3, 1, (86, 255, 160))
|
||||
small_img.set_pixel(0, 2, (255, 211, 0 ))
|
||||
small_img.set_pixel(1, 2, (83, 255, 163))
|
||||
small_img.set_pixel(0, 0, (0, 0, 127))
|
||||
small_img.set_pixel(1, 0, (47, 255, 199))
|
||||
small_img.set_pixel(2, 0, (0, 188, 255))
|
||||
small_img.set_pixel(3, 0, (0, 0, 127))
|
||||
small_img.set_pixel(0, 1, (0, 176, 255))
|
||||
small_img.set_pixel(1, 1, (222, 0, 0))
|
||||
small_img.set_pixel(2, 1, (50, 255, 195))
|
||||
small_img.set_pixel(3, 1, (86, 255, 160))
|
||||
small_img.set_pixel(0, 2, (255, 211, 0))
|
||||
small_img.set_pixel(1, 2, (83, 255, 163))
|
||||
small_img.set_pixel(2, 2, (255, 211, 0))
|
||||
small_img.set_pixel(3, 2, (0, 80, 255))
|
||||
small_img.set_pixel(0, 3, (255, 118, 0 ))
|
||||
small_img.set_pixel(1, 3, (127, 0, 0 ))
|
||||
small_img.set_pixel(2, 3, (0, 144, 255))
|
||||
small_img.set_pixel(3, 3, (50, 255, 195))
|
||||
#small_img.to_grayscale()
|
||||
#small_img.to_bitmap()
|
||||
small_img.set_pixel(3, 2, (0, 80, 255))
|
||||
small_img.set_pixel(0, 3, (255, 118, 0))
|
||||
small_img.set_pixel(1, 3, (127, 0, 0))
|
||||
small_img.set_pixel(2, 3, (0, 144, 255))
|
||||
small_img.set_pixel(3, 3, (50, 255, 195))
|
||||
# small_img.to_grayscale()
|
||||
# small_img.to_bitmap()
|
||||
|
||||
big_img = image.Image(128, 128, sensor.RGB565)
|
||||
big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint)
|
||||
#big_img.to_grayscale()
|
||||
#big_img.to_bitmap()
|
||||
# big_img.to_grayscale()
|
||||
# big_img.to_bitmap()
|
||||
|
||||
alpha_lut = image.Image(256, 1, sensor.GRAYSCALE)
|
||||
for i in range(256):
|
||||
@ -46,30 +48,39 @@ alpha_div = 1
|
||||
alpha_value = 0
|
||||
alpha_step = 2
|
||||
|
||||
x_bounce = sensor.width()//2
|
||||
x_bounce = sensor.width() // 2
|
||||
x_bounce_toggle = 1
|
||||
|
||||
y_bounce = sensor.height()//2
|
||||
y_bounce = sensor.height() // 2
|
||||
y_bounce_toggle = 1
|
||||
|
||||
clock = time.clock()
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
#img.to_grayscale()
|
||||
#img.to_bitmap()
|
||||
img.draw_image(big_img, x_bounce, y_bounce,
|
||||
rgb_channel=-1, alpha=alpha_value//alpha_div,
|
||||
alpha_palette=alpha_lut, hint=hint|image.CENTER)
|
||||
# img.to_grayscale()
|
||||
# img.to_bitmap()
|
||||
img.draw_image(
|
||||
big_img,
|
||||
x_bounce,
|
||||
y_bounce,
|
||||
rgb_channel=-1,
|
||||
alpha=alpha_value // alpha_div,
|
||||
alpha_palette=alpha_lut,
|
||||
hint=hint | image.CENTER,
|
||||
)
|
||||
|
||||
x_bounce += x_bounce_toggle
|
||||
if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle
|
||||
if abs(x_bounce - (img.width() // 2)) >= (img.width() // 2):
|
||||
x_bounce_toggle = -x_bounce_toggle
|
||||
|
||||
y_bounce += y_bounce_toggle
|
||||
if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle
|
||||
if abs(y_bounce - (img.height() // 2)) >= (img.height() // 2):
|
||||
y_bounce_toggle = -y_bounce_toggle
|
||||
|
||||
alpha_value += alpha_step
|
||||
if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step
|
||||
if not alpha_value or alpha_value // alpha_div == 256:
|
||||
alpha_step = -alpha_step
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -5,48 +5,50 @@
|
||||
# area scaling along with color channel extraction, alpha blending,
|
||||
# color palette application, and alpha palette application.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import image
|
||||
import time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
|
||||
hint = image.BICUBIC # image.BILINEAR image.BICUBIC
|
||||
hint = image.BICUBIC # image.BILINEAR image.BICUBIC
|
||||
|
||||
# RGB channel extraction is done after scaling normally, this
|
||||
# may produce false colors. Set this flag to do it before.
|
||||
#
|
||||
hint |= 0 # image.EXTRACT_RGB_CHANNEL_FIRST
|
||||
hint |= 0 # image.EXTRACT_RGB_CHANNEL_FIRST
|
||||
|
||||
# Color table application is done after scaling normally, this
|
||||
# may produce false colors. Set this flag to do it before.
|
||||
#
|
||||
hint |= 0 # image.APPLY_COLOR_PALETTE_FIRST
|
||||
hint |= 0 # image.APPLY_COLOR_PALETTE_FIRST
|
||||
|
||||
small_img = image.Image(4, 4, sensor.RGB565)
|
||||
small_img.set_pixel(0, 0, (0, 0, 127))
|
||||
small_img.set_pixel(1, 0, (47, 255, 199))
|
||||
small_img.set_pixel(2, 0, (0, 188, 255))
|
||||
small_img.set_pixel(3, 0, (0, 0, 127))
|
||||
small_img.set_pixel(0, 1, (0, 176, 255))
|
||||
small_img.set_pixel(1, 1, (222, 0, 0 ))
|
||||
small_img.set_pixel(2, 1, (50, 255, 195))
|
||||
small_img.set_pixel(3, 1, (86, 255, 160))
|
||||
small_img.set_pixel(0, 2, (255, 211, 0 ))
|
||||
small_img.set_pixel(1, 2, (83, 255, 163))
|
||||
small_img.set_pixel(0, 0, (0, 0, 127))
|
||||
small_img.set_pixel(1, 0, (47, 255, 199))
|
||||
small_img.set_pixel(2, 0, (0, 188, 255))
|
||||
small_img.set_pixel(3, 0, (0, 0, 127))
|
||||
small_img.set_pixel(0, 1, (0, 176, 255))
|
||||
small_img.set_pixel(1, 1, (222, 0, 0))
|
||||
small_img.set_pixel(2, 1, (50, 255, 195))
|
||||
small_img.set_pixel(3, 1, (86, 255, 160))
|
||||
small_img.set_pixel(0, 2, (255, 211, 0))
|
||||
small_img.set_pixel(1, 2, (83, 255, 163))
|
||||
small_img.set_pixel(2, 2, (255, 211, 0))
|
||||
small_img.set_pixel(3, 2, (0, 80, 255))
|
||||
small_img.set_pixel(0, 3, (255, 118, 0 ))
|
||||
small_img.set_pixel(1, 3, (127, 0, 0 ))
|
||||
small_img.set_pixel(2, 3, (0, 144, 255))
|
||||
small_img.set_pixel(3, 3, (50, 255, 195))
|
||||
#small_img.to_grayscale()
|
||||
#small_img.to_bitmap()
|
||||
small_img.set_pixel(3, 2, (0, 80, 255))
|
||||
small_img.set_pixel(0, 3, (255, 118, 0))
|
||||
small_img.set_pixel(1, 3, (127, 0, 0))
|
||||
small_img.set_pixel(2, 3, (0, 144, 255))
|
||||
small_img.set_pixel(3, 3, (50, 255, 195))
|
||||
# small_img.to_grayscale()
|
||||
# small_img.to_bitmap()
|
||||
|
||||
big_img = image.Image(128, 128, sensor.RGB565)
|
||||
big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint)
|
||||
#big_img.to_grayscale()
|
||||
#big_img.to_bitmap()
|
||||
# big_img.to_grayscale()
|
||||
# big_img.to_bitmap()
|
||||
|
||||
alpha_lut = image.Image(256, 1, sensor.GRAYSCALE)
|
||||
for i in range(256):
|
||||
@ -56,30 +58,40 @@ alpha_div = 1
|
||||
alpha_value = 0
|
||||
alpha_step = 2
|
||||
|
||||
x_bounce = sensor.width()//2
|
||||
x_bounce = sensor.width() // 2
|
||||
x_bounce_toggle = 1
|
||||
|
||||
y_bounce = sensor.height()//2
|
||||
y_bounce = sensor.height() // 2
|
||||
y_bounce_toggle = 1
|
||||
|
||||
clock = time.clock()
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
#img.to_grayscale()
|
||||
#img.to_bitmap()
|
||||
img.draw_image(big_img, x_bounce, y_bounce,
|
||||
rgb_channel=-1, alpha=alpha_value//alpha_div,
|
||||
color_palette=sensor.PALETTE_IRONBOW, alpha_palette=alpha_lut, hint=hint|image.CENTER)
|
||||
# img.to_grayscale()
|
||||
# img.to_bitmap()
|
||||
img.draw_image(
|
||||
big_img,
|
||||
x_bounce,
|
||||
y_bounce,
|
||||
rgb_channel=-1,
|
||||
alpha=alpha_value // alpha_div,
|
||||
color_palette=sensor.PALETTE_IRONBOW,
|
||||
alpha_palette=alpha_lut,
|
||||
hint=hint | image.CENTER,
|
||||
)
|
||||
|
||||
x_bounce += x_bounce_toggle
|
||||
if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle
|
||||
if abs(x_bounce - (img.width() // 2)) >= (img.width() // 2):
|
||||
x_bounce_toggle = -x_bounce_toggle
|
||||
|
||||
y_bounce += y_bounce_toggle
|
||||
if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle
|
||||
if abs(y_bounce - (img.height() // 2)) >= (img.height() // 2):
|
||||
y_bounce_toggle = -y_bounce_toggle
|
||||
|
||||
alpha_value += alpha_step
|
||||
if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step
|
||||
if not alpha_value or alpha_value // alpha_div == 256:
|
||||
alpha_step = -alpha_step
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -7,41 +7,43 @@
|
||||
|
||||
# DISABLE THE FRAME BUFFER TO SEE THE REAL FPS
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import image
|
||||
import time
|
||||
|
||||
up_hint = 0 # image.BILINEAR image.BICUBIC
|
||||
down_hint = image.AREA # image.BILINEAR image.BICUBIC image.AREA
|
||||
up_hint = 0 # image.BILINEAR image.BICUBIC
|
||||
down_hint = image.AREA # image.BILINEAR image.BICUBIC image.AREA
|
||||
|
||||
bounce_div = 128
|
||||
|
||||
medium_img = image.Image(32, 32, sensor.RGB565, copy_to_fb=True)
|
||||
#medium_img.to_grayscale()
|
||||
#medium_img.to_bitmap()
|
||||
# medium_img.to_grayscale()
|
||||
# medium_img.to_bitmap()
|
||||
|
||||
small_img = image.Image(4, 4, sensor.RGB565)
|
||||
small_img.set_pixel(0, 0, (0, 0, 127))
|
||||
small_img.set_pixel(1, 0, (47, 255, 199))
|
||||
small_img.set_pixel(2, 0, (0, 188, 255))
|
||||
small_img.set_pixel(3, 0, (0, 0, 127))
|
||||
small_img.set_pixel(0, 1, (0, 176, 255))
|
||||
small_img.set_pixel(1, 1, (222, 0, 0 ))
|
||||
small_img.set_pixel(2, 1, (50, 255, 195))
|
||||
small_img.set_pixel(3, 1, (86, 255, 160))
|
||||
small_img.set_pixel(0, 2, (255, 211, 0 ))
|
||||
small_img.set_pixel(1, 2, (83, 255, 163))
|
||||
small_img.set_pixel(0, 0, (0, 0, 127))
|
||||
small_img.set_pixel(1, 0, (47, 255, 199))
|
||||
small_img.set_pixel(2, 0, (0, 188, 255))
|
||||
small_img.set_pixel(3, 0, (0, 0, 127))
|
||||
small_img.set_pixel(0, 1, (0, 176, 255))
|
||||
small_img.set_pixel(1, 1, (222, 0, 0))
|
||||
small_img.set_pixel(2, 1, (50, 255, 195))
|
||||
small_img.set_pixel(3, 1, (86, 255, 160))
|
||||
small_img.set_pixel(0, 2, (255, 211, 0))
|
||||
small_img.set_pixel(1, 2, (83, 255, 163))
|
||||
small_img.set_pixel(2, 2, (255, 211, 0))
|
||||
small_img.set_pixel(3, 2, (0, 80, 255))
|
||||
small_img.set_pixel(0, 3, (255, 118, 0 ))
|
||||
small_img.set_pixel(1, 3, (127, 0, 0 ))
|
||||
small_img.set_pixel(2, 3, (0, 144, 255))
|
||||
small_img.set_pixel(3, 3, (50, 255, 195))
|
||||
#small_img.to_grayscale()
|
||||
#small_img.to_bitmap()
|
||||
small_img.set_pixel(3, 2, (0, 80, 255))
|
||||
small_img.set_pixel(0, 3, (255, 118, 0))
|
||||
small_img.set_pixel(1, 3, (127, 0, 0))
|
||||
small_img.set_pixel(2, 3, (0, 144, 255))
|
||||
small_img.set_pixel(3, 3, (50, 255, 195))
|
||||
# small_img.to_grayscale()
|
||||
# small_img.to_bitmap()
|
||||
|
||||
big_img = image.Image(128, 128, sensor.RGB565)
|
||||
big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=up_hint)
|
||||
#big_img.to_grayscale()
|
||||
#big_img.to_bitmap()
|
||||
# big_img.to_grayscale()
|
||||
# big_img.to_bitmap()
|
||||
|
||||
x_bounce = 0
|
||||
x_bounce_toggle = 0
|
||||
@ -50,20 +52,26 @@ y_bounce = 0
|
||||
y_bounce_toggle = 0
|
||||
|
||||
clock = time.clock()
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
medium_img.clear()
|
||||
medium_img.draw_image(big_img,
|
||||
x_bounce // bounce_div, y_bounce // bounce_div,
|
||||
x_scale=0.25, y_scale=0.25,
|
||||
hint=down_hint)
|
||||
medium_img.draw_image(
|
||||
big_img,
|
||||
x_bounce // bounce_div,
|
||||
y_bounce // bounce_div,
|
||||
x_scale=0.25,
|
||||
y_scale=0.25,
|
||||
hint=down_hint,
|
||||
)
|
||||
sensor.flush()
|
||||
|
||||
x_bounce += x_bounce_toggle
|
||||
if abs(x_bounce // bounce_div) >= (medium_img.width()*1.1): x_bounce_toggle = -x_bounce_toggle
|
||||
if abs(x_bounce // bounce_div) >= (medium_img.width() * 1.1):
|
||||
x_bounce_toggle = -x_bounce_toggle
|
||||
|
||||
y_bounce += y_bounce_toggle
|
||||
if abs(y_bounce // bounce_div) >= (medium_img.height()*1.1): y_bounce_toggle = -y_bounce_toggle
|
||||
if abs(y_bounce // bounce_div) >= (medium_img.height() * 1.1):
|
||||
y_bounce_toggle = -y_bounce_toggle
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -7,35 +7,37 @@
|
||||
|
||||
# DISABLE THE FRAME BUFFER TO SEE THE REAL FPS
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import image
|
||||
import time
|
||||
|
||||
hint = 0 # image.BILINEAR image.BICUBIC
|
||||
hint = 0 # image.BILINEAR image.BICUBIC
|
||||
|
||||
bounce_div = 32
|
||||
|
||||
big_img = image.Image(128, 128, sensor.RGB565, copy_to_fb=True)
|
||||
#big_img.to_grayscale()
|
||||
#big_img.to_bitmap()
|
||||
# big_img.to_grayscale()
|
||||
# big_img.to_bitmap()
|
||||
|
||||
small_img = image.Image(4, 4, sensor.RGB565)
|
||||
small_img.set_pixel(0, 0, (0, 0, 127))
|
||||
small_img.set_pixel(1, 0, (47, 255, 199))
|
||||
small_img.set_pixel(2, 0, (0, 188, 255))
|
||||
small_img.set_pixel(3, 0, (0, 0, 127))
|
||||
small_img.set_pixel(0, 1, (0, 176, 255))
|
||||
small_img.set_pixel(1, 1, (222, 0, 0 ))
|
||||
small_img.set_pixel(2, 1, (50, 255, 195))
|
||||
small_img.set_pixel(3, 1, (86, 255, 160))
|
||||
small_img.set_pixel(0, 2, (255, 211, 0 ))
|
||||
small_img.set_pixel(1, 2, (83, 255, 163))
|
||||
small_img.set_pixel(0, 0, (0, 0, 127))
|
||||
small_img.set_pixel(1, 0, (47, 255, 199))
|
||||
small_img.set_pixel(2, 0, (0, 188, 255))
|
||||
small_img.set_pixel(3, 0, (0, 0, 127))
|
||||
small_img.set_pixel(0, 1, (0, 176, 255))
|
||||
small_img.set_pixel(1, 1, (222, 0, 0))
|
||||
small_img.set_pixel(2, 1, (50, 255, 195))
|
||||
small_img.set_pixel(3, 1, (86, 255, 160))
|
||||
small_img.set_pixel(0, 2, (255, 211, 0))
|
||||
small_img.set_pixel(1, 2, (83, 255, 163))
|
||||
small_img.set_pixel(2, 2, (255, 211, 0))
|
||||
small_img.set_pixel(3, 2, (0, 80, 255))
|
||||
small_img.set_pixel(0, 3, (255, 118, 0 ))
|
||||
small_img.set_pixel(1, 3, (127, 0, 0 ))
|
||||
small_img.set_pixel(2, 3, (0, 144, 255))
|
||||
small_img.set_pixel(3, 3, (50, 255, 195))
|
||||
#small_img.to_grayscale()
|
||||
#small_img.to_bitmap()
|
||||
small_img.set_pixel(3, 2, (0, 80, 255))
|
||||
small_img.set_pixel(0, 3, (255, 118, 0))
|
||||
small_img.set_pixel(1, 3, (127, 0, 0))
|
||||
small_img.set_pixel(2, 3, (0, 144, 255))
|
||||
small_img.set_pixel(3, 3, (50, 255, 195))
|
||||
# small_img.to_grayscale()
|
||||
# small_img.to_bitmap()
|
||||
|
||||
x_bounce = 0
|
||||
x_bounce_toggle = 0
|
||||
@ -44,20 +46,26 @@ y_bounce = 0
|
||||
y_bounce_toggle = 0
|
||||
|
||||
clock = time.clock()
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
big_img.clear()
|
||||
big_img.draw_image(small_img,
|
||||
x_bounce // bounce_div, y_bounce // bounce_div,
|
||||
x_scale=32, y_scale=32,
|
||||
hint=hint)
|
||||
big_img.draw_image(
|
||||
small_img,
|
||||
x_bounce // bounce_div,
|
||||
y_bounce // bounce_div,
|
||||
x_scale=32,
|
||||
y_scale=32,
|
||||
hint=hint,
|
||||
)
|
||||
sensor.flush()
|
||||
|
||||
x_bounce += x_bounce_toggle
|
||||
if abs(x_bounce // bounce_div) >= (big_img.width()*1.1): x_bounce_toggle = -x_bounce_toggle
|
||||
if abs(x_bounce // bounce_div) >= (big_img.width() * 1.1):
|
||||
x_bounce_toggle = -x_bounce_toggle
|
||||
|
||||
y_bounce += y_bounce_toggle
|
||||
if abs(y_bounce // bounce_div) >= (big_img.height()*1.1): y_bounce_toggle = -y_bounce_toggle
|
||||
if abs(y_bounce // bounce_div) >= (big_img.height() * 1.1):
|
||||
y_bounce_toggle = -y_bounce_toggle
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -2,12 +2,14 @@
|
||||
#
|
||||
# This example shows off how to draw images in the frame buffer with a custom generated color palette.
|
||||
|
||||
import sensor, image, time, pyb
|
||||
import sensor
|
||||
import image
|
||||
import time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QQVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QQVGA) # or QQVGA...
|
||||
sensor.skip_frames(time=2000)
|
||||
clock = time.clock()
|
||||
|
||||
# the color palette is actually an image, this allows you to use image ops to create palettes
|
||||
@ -20,11 +22,16 @@ for i, color in enumerate(palette_source_colors):
|
||||
palette_source_color_image[i] = color
|
||||
|
||||
# Scale the image to palette width and smooth them
|
||||
palette = image.Image(256,1, sensor.RGB565)
|
||||
palette.draw_image(palette_source_color_image, 0, 0, x_scale=palette.width() / palette_source_color_image.width())
|
||||
palette.mean(int(palette.width() / palette_source_color_image.width()/2))
|
||||
palette = image.Image(256, 1, sensor.RGB565)
|
||||
palette.draw_image(
|
||||
palette_source_color_image,
|
||||
0,
|
||||
0,
|
||||
x_scale=palette.width() / palette_source_color_image.width(),
|
||||
)
|
||||
palette.mean(int(palette.width() / palette_source_color_image.width() / 2))
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
@ -37,7 +44,20 @@ while(True):
|
||||
palette_scale_x = (sensor.width() - palette_boundary_inset * 2) / palette.width()
|
||||
|
||||
img.draw_image(img_copy, 0, 0, color_palette=palette)
|
||||
img.draw_image(palette, palette_boundary_inset, palette_boundary_inset, x_scale=palette_scale_x, y_scale=8)
|
||||
img.draw_rectangle(palette_boundary_inset, palette_boundary_inset, int(palette.width()*palette_scale_x), 8, color=(255,255,255), thickness=1)
|
||||
img.draw_image(
|
||||
palette,
|
||||
palette_boundary_inset,
|
||||
palette_boundary_inset,
|
||||
x_scale=palette_scale_x,
|
||||
y_scale=8,
|
||||
)
|
||||
img.draw_rectangle(
|
||||
palette_boundary_inset,
|
||||
palette_boundary_inset,
|
||||
int(palette.width() * palette_scale_x),
|
||||
8,
|
||||
color=(255, 255, 255),
|
||||
thickness=1,
|
||||
)
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -3,22 +3,24 @@
|
||||
# This example shows off drawing keypoints on the OpenMV Cam. Usually you call draw_keypoints()
|
||||
# on a keypoints object but you can also call it on a list of 3-value tuples...
|
||||
|
||||
import sensor, image, time, pyb
|
||||
import sensor
|
||||
import time
|
||||
import pyb
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time=2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
for i in range(20):
|
||||
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
|
||||
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
|
||||
x = (pyb.rng() % (2 * img.width())) - (img.width() // 2)
|
||||
y = (pyb.rng() % (2 * img.height())) - (img.height() // 2)
|
||||
rot = pyb.rng() % 360
|
||||
|
||||
r = (pyb.rng() % 127) + 128
|
||||
@ -26,6 +28,8 @@ while(True):
|
||||
b = (pyb.rng() % 127) + 128
|
||||
|
||||
# This method draws a keypoints object or a list of (x, y, rot) tuples...
|
||||
img.draw_keypoints([(x, y, rot)], color = (r, g, b), size = 20, thickness = 2, fill = False)
|
||||
img.draw_keypoints(
|
||||
[(x, y, rot)], color=(r, g, b), size=20, thickness=2, fill=False
|
||||
)
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -2,30 +2,32 @@
|
||||
#
|
||||
# This example shows off drawing lines on the OpenMV Cam.
|
||||
|
||||
import sensor, image, time, pyb
|
||||
import sensor
|
||||
import time
|
||||
import pyb
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time=2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
for i in range(10):
|
||||
x0 = (pyb.rng() % (2*img.width())) - (img.width()//2)
|
||||
y0 = (pyb.rng() % (2*img.height())) - (img.height()//2)
|
||||
x1 = (pyb.rng() % (2*img.width())) - (img.width()//2)
|
||||
y1 = (pyb.rng() % (2*img.height())) - (img.height()//2)
|
||||
x0 = (pyb.rng() % (2 * img.width())) - (img.width() // 2)
|
||||
y0 = (pyb.rng() % (2 * img.height())) - (img.height() // 2)
|
||||
x1 = (pyb.rng() % (2 * img.width())) - (img.width() // 2)
|
||||
y1 = (pyb.rng() % (2 * img.height())) - (img.height() // 2)
|
||||
r = (pyb.rng() % 127) + 128
|
||||
g = (pyb.rng() % 127) + 128
|
||||
b = (pyb.rng() % 127) + 128
|
||||
|
||||
# If the first argument is a scaler then this method expects
|
||||
# to see x0, y0, x1, and y1. Otherwise, it expects a (x0,y0,x1,y1) tuple.
|
||||
img.draw_line(x0, y0, x1, y1, color = (r, g, b), thickness = 2)
|
||||
img.draw_line(x0, y0, x1, y1, color=(r, g, b), thickness=2)
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -2,30 +2,32 @@
|
||||
#
|
||||
# This example shows off drawing rectangles on the OpenMV Cam.
|
||||
|
||||
import sensor, image, time, pyb
|
||||
import sensor
|
||||
import time
|
||||
import pyb
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time=2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
for i in range(10):
|
||||
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
|
||||
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
|
||||
w = (pyb.rng() % (img.width()//2))
|
||||
h = (pyb.rng() % (img.height()//2))
|
||||
x = (pyb.rng() % (2 * img.width())) - (img.width() // 2)
|
||||
y = (pyb.rng() % (2 * img.height())) - (img.height() // 2)
|
||||
w = pyb.rng() % (img.width() // 2)
|
||||
h = pyb.rng() % (img.height() // 2)
|
||||
r = (pyb.rng() % 127) + 128
|
||||
g = (pyb.rng() % 127) + 128
|
||||
b = (pyb.rng() % 127) + 128
|
||||
|
||||
# If the first argument is a scaler then this method expects
|
||||
# to see x, y, w, and h. Otherwise, it expects a (x,y,w,h) tuple.
|
||||
img.draw_rectangle(x, y, w, h, color = (r, g, b), thickness = 2, fill = False)
|
||||
img.draw_rectangle(x, y, w, h, color=(r, g, b), thickness=2, fill=False)
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -2,22 +2,24 @@
|
||||
#
|
||||
# This example shows off drawing text on the OpenMV Cam.
|
||||
|
||||
import sensor, image, time, pyb
|
||||
import sensor
|
||||
import time
|
||||
import pyb
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time=2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
for i in range(10):
|
||||
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
|
||||
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
|
||||
x = (pyb.rng() % (2 * img.width())) - (img.width() // 2)
|
||||
y = (pyb.rng() % (2 * img.height())) - (img.height() // 2)
|
||||
r = (pyb.rng() % 127) + 128
|
||||
g = (pyb.rng() % 127) + 128
|
||||
b = (pyb.rng() % 127) + 128
|
||||
@ -26,8 +28,19 @@ while(True):
|
||||
# to see x, y, and text. Otherwise, it expects a (x,y,text) tuple.
|
||||
|
||||
# Character and string rotation can be done at 0, 90, 180, 270, and etc. degrees.
|
||||
img.draw_string(x, y, "Hello World!", color = (r, g, b), scale = 2, mono_space = False,
|
||||
char_rotation = 0, char_hmirror = False, char_vflip = False,
|
||||
string_rotation = 0, string_hmirror = False, string_vflip = False)
|
||||
img.draw_string(
|
||||
x,
|
||||
y,
|
||||
"Hello World!",
|
||||
color=(r, g, b),
|
||||
scale=2,
|
||||
mono_space=False,
|
||||
char_rotation=0,
|
||||
char_hmirror=False,
|
||||
char_vflip=False,
|
||||
string_rotation=0,
|
||||
string_hmirror=False,
|
||||
string_vflip=False,
|
||||
)
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -6,15 +6,16 @@
|
||||
# the image contrast versus a global histogram equalization. Additionally,
|
||||
# you may specify a clip limit to prevent the contrast from going wild.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QQVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
sensor.skip_frames(time=2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
# A clip_limit of < 0 gives you normal adaptive histogram equalization
|
||||
|
||||
@ -2,20 +2,21 @@
|
||||
#
|
||||
# This example shows off using the guassian filter to blur images.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time=2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# Run the kernel on every pixel of the image.
|
||||
img.gaussian(1)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
|
||||
@ -4,15 +4,16 @@
|
||||
# filter works by joining similar pixel areas of an image and replacing
|
||||
# the pixels in those areas with the area mean.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time=2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
# seed_threshold controls the maximum area growth of a colored
|
||||
|
||||
@ -2,17 +2,18 @@
|
||||
#
|
||||
# This example shows off using the bilateral filter on color images.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time=2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# color_sigma controls how close color wise pixels have to be to each other to be
|
||||
# blured togheter. A smaller value means they have to be closer.
|
||||
@ -29,5 +30,5 @@ while(True):
|
||||
# color_sigma/space_sigma to aggresively. Increase the sigma values until
|
||||
# the defects go away if you see them.
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
|
||||
@ -3,21 +3,21 @@
|
||||
# This script shows off the binary image filter. You may pass binary any
|
||||
# number of thresholds to segment the image by.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.skip_frames(time = 2000)
|
||||
sensor.skip_frames(time=2000)
|
||||
clock = time.clock()
|
||||
|
||||
# Use the Tools -> Machine Vision -> Threshold Edtor to pick better thresholds.
|
||||
red_threshold = (0,100, 0,127, 0,127) # L A B
|
||||
green_threshold = (0,100, -128,0, 0,127) # L A B
|
||||
blue_threshold = (0,100, -128,127, -128,0) # L A B
|
||||
|
||||
while(True):
|
||||
red_threshold = (0, 100, 0, 127, 0, 127) # L A B
|
||||
green_threshold = (0, 100, -128, 0, 0, 127) # L A B
|
||||
blue_threshold = (0, 100, -128, 127, -128, 0) # L A B
|
||||
|
||||
while True:
|
||||
# Test red threshold
|
||||
for i in range(100):
|
||||
clock.tick()
|
||||
@ -43,19 +43,19 @@ while(True):
|
||||
for i in range(100):
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
img.binary([red_threshold], invert = 1)
|
||||
img.binary([red_threshold], invert=1)
|
||||
print(clock.fps())
|
||||
|
||||
# Test not green threshold
|
||||
for i in range(100):
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
img.binary([green_threshold], invert = 1)
|
||||
img.binary([green_threshold], invert=1)
|
||||
print(clock.fps())
|
||||
|
||||
# Test not blue threshold
|
||||
for i in range(100):
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
img.binary([blue_threshold], invert = 1)
|
||||
img.binary([blue_threshold], invert=1)
|
||||
print(clock.fps())
|
||||
|
||||
@ -7,19 +7,20 @@
|
||||
# histeq() on the image without outliers from oversaturated
|
||||
# parts of the image breaking the algorithm...
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time=2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
thresholds = (90, 100, -128, 127, -128, 127)
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot().binary([thresholds], invert=False, zero=True)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
|
||||
@ -2,20 +2,21 @@
|
||||
#
|
||||
# This example shows off using the laplacian filter to detect edges.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time=2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# Run the kernel on every pixel of the image.
|
||||
img.laplacian(1)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
|
||||
@ -4,7 +4,7 @@
|
||||
# a binary image to remove noise. This example was originally a test but its
|
||||
# useful for showing off how these functions work.
|
||||
|
||||
import pyb, sensor, image
|
||||
import sensor
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
@ -12,8 +12,7 @@ sensor.set_framesize(sensor.QVGA)
|
||||
grayscale_thres = (170, 255)
|
||||
rgb565_thres = (70, 100, -128, 127, -128, 127)
|
||||
|
||||
while(True):
|
||||
|
||||
while True:
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
for i in range(20):
|
||||
img = sensor.snapshot()
|
||||
|
||||
@ -3,19 +3,20 @@
|
||||
# This example shows off gamma correction to make the image brighter. The gamma
|
||||
# correction method can also fix contrast and brightness too.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
sensor.skip_frames(time=2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
# Gamma, contrast, and brightness correction are applied to each color channel. The
|
||||
# values are scaled to the range per color channel per image type...
|
||||
img = sensor.snapshot().gamma_corr(gamma = 0.5, contrast = 1.0, brightness = 0.0)
|
||||
img = sensor.snapshot().gamma_corr(gamma=0.5, contrast=1.0, brightness=0.0)
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -2,17 +2,18 @@
|
||||
#
|
||||
# This example shows off using the bilateral filter on grayscale images.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time=2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# color_sigma controls how close color wise pixels have to be to each other to be
|
||||
# blured togheter. A smaller value means they have to be closer.
|
||||
@ -29,5 +30,5 @@ while(True):
|
||||
# color_sigma/space_sigma to aggresively. Increase the sigma values until
|
||||
# the defects go away if you see them.
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
|
||||
@ -3,19 +3,19 @@
|
||||
# This script shows off the binary image filter. You may pass binary any
|
||||
# number of thresholds to segment the image by.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
sensor.skip_frames(time = 2000)
|
||||
sensor.skip_frames(time=2000)
|
||||
clock = time.clock()
|
||||
|
||||
low_threshold = (0, 50)
|
||||
high_threshold = (205, 255)
|
||||
|
||||
while(True):
|
||||
|
||||
while True:
|
||||
# Test low threshold
|
||||
for i in range(100):
|
||||
clock.tick()
|
||||
@ -34,12 +34,12 @@ while(True):
|
||||
for i in range(100):
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
img.binary([low_threshold], invert = 1)
|
||||
img.binary([low_threshold], invert=1)
|
||||
print(clock.fps())
|
||||
|
||||
# Test not high threshold
|
||||
for i in range(100):
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
img.binary([high_threshold], invert = 1)
|
||||
img.binary([high_threshold], invert=1)
|
||||
print(clock.fps())
|
||||
|
||||
@ -7,19 +7,20 @@
|
||||
# histeq() on the image without outliers from oversaturated
|
||||
# parts of the image breaking the algorithm...
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time=2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
thresholds = (220, 255)
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot().binary([thresholds], invert=False, zero=True)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
|
||||
@ -3,15 +3,16 @@
|
||||
# This example shows off how to use histogram equalization to improve
|
||||
# the contrast in the image.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QQVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
sensor.skip_frames(time=2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot().histeq()
|
||||
|
||||
@ -2,26 +2,25 @@
|
||||
#
|
||||
# This example shows off how to use a generic kernel filter.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
|
||||
sensor.skip_frames(time=2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
kernel_size = 1 # 3x3==1, 5x5==2, 7x7==3, etc.
|
||||
kernel_size = 1 # 3x3==1, 5x5==2, 7x7==3, etc.
|
||||
|
||||
kernel = [-2, -1, 0, \
|
||||
-1, 1, 1, \
|
||||
0, 1, 2]
|
||||
kernel = [-2, -1, 0, -1, 1, 1, 0, 1, 2]
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# Run the kernel on every pixel of the image.
|
||||
img.morph(kernel_size, kernel)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
|
||||
@ -5,17 +5,18 @@
|
||||
# detection. Increase the strength below until lines are straight in the view.
|
||||
# Zoom in (higher) or out (lower) until you see enough of the image.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
sensor.skip_frames(time=2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot().lens_corr(strength = 1.8, zoom = 1.0)
|
||||
img = sensor.snapshot().lens_corr(strength=1.8, zoom=1.0)
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -5,17 +5,18 @@
|
||||
# become translations in the X direction and linear changes
|
||||
# in scale become linear translations in the Y direction.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time=2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot().linpolar(reverse=False)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
|
||||
@ -5,17 +5,18 @@
|
||||
# become translations in the X direction and exponential changes
|
||||
# in scale (x2, x4, etc.) become linear translations in the Y direction.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time=2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot().logpolar(reverse=False)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
|
||||
@ -4,22 +4,23 @@
|
||||
# When mean(threshold=True) the mean() method adaptive thresholds the image
|
||||
# by comparing the mean of the pixels around a pixel, minus an offset, with that pixel.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time=2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# The first argument is the kernel size. N coresponds to a ((N*2)+1)^2
|
||||
# kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You
|
||||
# shouldn't ever need to use a value bigger than 2.
|
||||
img.mean(1, threshold=True, offset=5, invert=True)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
|
||||
@ -4,22 +4,23 @@
|
||||
# filter in a NxN neighborhood. Mean filtering removes noise in the image by
|
||||
# bluring everything. But, it's the fastest kernel filter operation.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time=2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# The only argument is the kernel size. N coresponds to a ((N*2)+1)^2
|
||||
# kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You
|
||||
# shouldn't ever need to use a value bigger than 2.
|
||||
img.mean(1)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
|
||||
@ -4,17 +4,18 @@
|
||||
# When median(threshold=True) the median() method adaptive thresholds the image
|
||||
# by comparing the median of the pixels around a pixel, minus an offset, with that pixel.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time=2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# The first argument to the median filter is the kernel size, it can be
|
||||
# either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. The second
|
||||
@ -23,5 +24,5 @@ while(True):
|
||||
# would be the upper quartile.
|
||||
img.median(1, percentile=0.5, threshold=True, offset=5, invert=True)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
|
||||
@ -4,17 +4,18 @@
|
||||
# with the median value of it's NxN neighborhood. Median filtering is good for
|
||||
# removing noise in the image while preserving edges.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time=2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# The first argument to the median filter is the kernel size, it can be
|
||||
# either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. The second
|
||||
@ -23,5 +24,5 @@ while(True):
|
||||
# would be the upper quartile.
|
||||
img.median(1, percentile=0.5)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
|
||||
@ -4,17 +4,18 @@
|
||||
# When midpoint(threshold=True) the midpoint() method adaptive thresholds the image
|
||||
# by comparing the midpoint of the pixels around a pixel, minus an offset, with that pixel.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time=2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# The first argument is the kernel size. N coresponds to a ((N*2)+1)^2
|
||||
# kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You
|
||||
@ -24,5 +25,5 @@ while(True):
|
||||
# makes images darker while the max filter makes images lighter.
|
||||
img.midpoint(1, bias=0.5, threshold=True, offset=5, invert=True)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
|
||||
@ -3,17 +3,18 @@
|
||||
# This example shows off midpoint filtering. Midpoint filtering replaces each
|
||||
# pixel by the average of the min and max pixel values for a NxN neighborhood.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time=2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# The first argument is the kernel size. N coresponds to a ((N*2)+1)^2
|
||||
# kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You
|
||||
@ -23,5 +24,5 @@ while(True):
|
||||
# makes images darker while the max filter makes images lighter.
|
||||
img.midpoint(1, bias=0.5)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
|
||||
@ -5,21 +5,22 @@
|
||||
# by comparing the mode of the pixels around a pixel, minus an offset, with that pixel.
|
||||
# Avoid using the mode filter on RGB565 images. It will cause artifacts on image edges...
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time=2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# The only argument to the median filter is the kernel size, it can be
|
||||
# either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively.
|
||||
img.mode(1, threshold=True, offset=5, invert=True)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
|
||||
@ -5,21 +5,22 @@
|
||||
# of pixels around it. Avoid using the mode filter on RGB565 images. It will
|
||||
# cause artifacts on image edges...
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time=2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# The only argument to the median filter is the kernel size, it can be
|
||||
# either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively.
|
||||
img.mode(1)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
|
||||
@ -3,17 +3,18 @@
|
||||
# This example shows off negating the image. This is not a particularly
|
||||
# useful method but it can come in handy once in a while.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
|
||||
sensor.skip_frames(time=2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot().negate()
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
|
||||
@ -4,12 +4,13 @@
|
||||
# perspective distortion and then to rotate the new corrected image in 3D
|
||||
# space aftwards to handle movement.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
sensor.skip_frames(time=2000)
|
||||
clock = time.clock()
|
||||
|
||||
# The image will be warped such that the following points become the new:
|
||||
@ -27,10 +28,12 @@ clock = time.clock()
|
||||
w = sensor.width()
|
||||
h = sensor.height()
|
||||
|
||||
TARGET_POINTS = [(0, 0), # (x, y) CHANGE ME!
|
||||
(w-1, 0), # (x, y) CHANGE ME!
|
||||
(w-1, h-1), # (x, y) CHANGE ME!
|
||||
(0, h-1)] # (x, y) CHANGE ME!
|
||||
TARGET_POINTS = [
|
||||
(0, 0), # (x, y) CHANGE ME!
|
||||
(w - 1, 0), # (x, y) CHANGE ME!
|
||||
(w - 1, h - 1), # (x, y) CHANGE ME!
|
||||
(0, h - 1),
|
||||
] # (x, y) CHANGE ME!
|
||||
|
||||
# Degrees per frame to rotation by...
|
||||
X_ROTATION_DEGREE_RATE = 5
|
||||
@ -39,30 +42,32 @@ Z_ROTATION_DEGREE_RATE = 0
|
||||
X_OFFSET = 0
|
||||
Y_OFFSET = 0
|
||||
|
||||
ZOOM_AMOUNT = 1 # Lower zooms out - Higher zooms in.
|
||||
FOV_WINDOW = 25 # Between 0 and 180. Represents the field-of-view of the scene
|
||||
# window when rotating the image in 3D space. When closer to
|
||||
# zero results in lines becoming straighter as the window
|
||||
# moves away from the image being rotated in 3D space. A large
|
||||
# value moves the window closer to the image in 3D space which
|
||||
# results in the more perspective distortion and sometimes
|
||||
# the image in 3D intersecting the scene window.
|
||||
ZOOM_AMOUNT = 1 # Lower zooms out - Higher zooms in.
|
||||
FOV_WINDOW = 25 # Between 0 and 180. Represents the field-of-view of the scene
|
||||
# window when rotating the image in 3D space. When closer to
|
||||
# zero results in lines becoming straighter as the window
|
||||
# moves away from the image being rotated in 3D space. A large
|
||||
# value moves the window closer to the image in 3D space which
|
||||
# results in the more perspective distortion and sometimes
|
||||
# the image in 3D intersecting the scene window.
|
||||
|
||||
x_rotation_counter = 0
|
||||
y_rotation_counter = 0
|
||||
z_rotation_counter = 0
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot().rotation_corr(x_rotation = x_rotation_counter, \
|
||||
y_rotation = y_rotation_counter, \
|
||||
z_rotation = z_rotation_counter, \
|
||||
x_translation = X_OFFSET, \
|
||||
y_translation = Y_OFFSET, \
|
||||
zoom = ZOOM_AMOUNT, \
|
||||
fov = FOV_WINDOW, \
|
||||
corners = TARGET_POINTS)
|
||||
img = sensor.snapshot().rotation_corr(
|
||||
x_rotation=x_rotation_counter,
|
||||
y_rotation=y_rotation_counter,
|
||||
z_rotation=z_rotation_counter,
|
||||
x_translation=X_OFFSET,
|
||||
y_translation=Y_OFFSET,
|
||||
zoom=ZOOM_AMOUNT,
|
||||
fov=FOV_WINDOW,
|
||||
corners=TARGET_POINTS,
|
||||
)
|
||||
|
||||
x_rotation_counter += X_ROTATION_DEGREE_RATE
|
||||
y_rotation_counter += Y_ROTATION_DEGREE_RATE
|
||||
|
||||
@ -3,12 +3,13 @@
|
||||
# This example shows off how to use the rotation_corr() to fix perspective
|
||||
# issues related to how your OpenMV Cam is mounted.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
sensor.skip_frames(time=2000)
|
||||
clock = time.clock()
|
||||
|
||||
# The image will be warped such that the following points become the new:
|
||||
@ -26,14 +27,16 @@ clock = time.clock()
|
||||
w = sensor.width()
|
||||
h = sensor.height()
|
||||
|
||||
TARGET_POINTS = [(0, 0), # (x, y) CHANGE ME!
|
||||
(w-1, 0), # (x, y) CHANGE ME!
|
||||
(w-1, h-1), # (x, y) CHANGE ME!
|
||||
(0, h-1)] # (x, y) CHANGE ME!
|
||||
TARGET_POINTS = [
|
||||
(0, 0), # (x, y) CHANGE ME!
|
||||
(w - 1, 0), # (x, y) CHANGE ME!
|
||||
(w - 1, h - 1), # (x, y) CHANGE ME!
|
||||
(0, h - 1),
|
||||
] # (x, y) CHANGE ME!
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot().rotation_corr(corners = TARGET_POINTS)
|
||||
img = sensor.snapshot().rotation_corr(corners=TARGET_POINTS)
|
||||
|
||||
print(clock.fps())
|
||||
|
||||
@ -3,7 +3,8 @@
|
||||
# This example shows off how to use the rotation_corr() to play with the scene
|
||||
# window your OpenMV Cam sees.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
# Degrees per frame to rotation by...
|
||||
X_ROTATION_DEGREE_RATE = 5
|
||||
@ -12,35 +13,37 @@ Z_ROTATION_DEGREE_RATE = 0
|
||||
X_OFFSET = 0
|
||||
Y_OFFSET = 0
|
||||
|
||||
ZOOM_AMOUNT = 1 # Lower zooms out - Higher zooms in.
|
||||
FOV_WINDOW = 60 # Between 0 and 180. Represents the field-of-view of the scene
|
||||
# window when rotating the image in 3D space. When closer to
|
||||
# zero results in lines becoming straighter as the window
|
||||
# moves away from the image being rotated in 3D space. A large
|
||||
# value moves the window closer to the image in 3D space which
|
||||
# results in the more perspective distortion and sometimes
|
||||
# the image in 3D intersecting the scene window.
|
||||
ZOOM_AMOUNT = 1 # Lower zooms out - Higher zooms in.
|
||||
FOV_WINDOW = 60 # Between 0 and 180. Represents the field-of-view of the scene
|
||||
# window when rotating the image in 3D space. When closer to
|
||||
# zero results in lines becoming straighter as the window
|
||||
# moves away from the image being rotated in 3D space. A large
|
||||
# value moves the window closer to the image in 3D space which
|
||||
# results in the more perspective distortion and sometimes
|
||||
# the image in 3D intersecting the scene window.
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
sensor.skip_frames(time=2000)
|
||||
clock = time.clock()
|
||||
|
||||
x_rotation_counter = 0
|
||||
y_rotation_counter = 0
|
||||
z_rotation_counter = 0
|
||||
|
||||
while(True):
|
||||
while True:
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot().rotation_corr(x_rotation = x_rotation_counter, \
|
||||
y_rotation = y_rotation_counter, \
|
||||
z_rotation = z_rotation_counter, \
|
||||
x_translation = X_OFFSET, \
|
||||
y_translation = Y_OFFSET, \
|
||||
zoom = ZOOM_AMOUNT, \
|
||||
fov = FOV_WINDOW)
|
||||
img = sensor.snapshot().rotation_corr(
|
||||
x_rotation=x_rotation_counter,
|
||||
y_rotation=y_rotation_counter,
|
||||
z_rotation=z_rotation_counter,
|
||||
x_translation=X_OFFSET,
|
||||
y_translation=Y_OFFSET,
|
||||
zoom=ZOOM_AMOUNT,
|
||||
fov=FOV_WINDOW,
|
||||
)
|
||||
|
||||
x_rotation_counter += X_ROTATION_DEGREE_RATE
|
||||
y_rotation_counter += Y_ROTATION_DEGREE_RATE
|
||||
|
||||
@ -2,20 +2,21 @@
|
||||
#
|
||||
# This example shows off using the laplacian filter to sharpen images.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time=2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
while True:
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# Run the kernel on every pixel of the image.
|
||||
img.laplacian(1, sharpen=True)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
|
||||
@ -4,16 +4,16 @@
|
||||
# NOTE: ndarrays cause the heap to be fragmented easily. If you run out of memory,
|
||||
# there's not much that can be done about it, lowering the resolution might help.
|
||||
|
||||
import sensor, image, time
|
||||
import sensor
|
||||
import time
|
||||
from ulab import numpy as np
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QQVGA) # Set frame size to QVGA (320x240)
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
sensor.set_framesize(sensor.QQVGA) # Set frame size to QVGA (320x240)
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
while (True):
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
while True:
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
a = np.array(img, dtype=np.uint8)
|
||||
print("mean: %d std:%d"%(np.mean(a), np.std(a)))
|
||||
|
||||
print("mean: %d std:%d" % (np.mean(a), np.std(a)))
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user