mirror of
https://github.com/openmv/openmv.git
synced 2025-11-04 14:49:50 +08:00
scripts/examples: Add Nicla Vision examples.
This commit is contained in:
parent
c26036af23
commit
2c9a88c3e2
@ -0,0 +1,17 @@
|
||||
# Hello World Example
|
||||
#
|
||||
# Welcome to the OpenMV IDE! Click on the green run arrow button below to run the script!
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.skip_frames(time = 2000) # Wait for settings take effect.
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Update the FPS clock.
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
|
||||
# to the IDE. The FPS should increase once disconnected.
|
||||
33
scripts/examples/Arduino/Nicla-Vision/00-Basics/main.py
Normal file
33
scripts/examples/Arduino/Nicla-Vision/00-Basics/main.py
Normal file
@ -0,0 +1,33 @@
|
||||
# Main Module Example
|
||||
#
|
||||
# When your OpenMV Cam is disconnected from your computer it will either run the
|
||||
# main.py script on the SD card (if attached) or the main.py script on
|
||||
# your OpenMV Cam's internal flash drive.
|
||||
|
||||
import time, pyb
|
||||
|
||||
led = pyb.LED(3) # Red LED = 1, Green LED = 2, Blue LED = 3.
|
||||
usb = pyb.USB_VCP() # This is a serial port object that allows you to communciate
|
||||
# with your computer. While it is not open the code below runs.
|
||||
|
||||
while(not usb.isconnected()):
|
||||
led.on()
|
||||
time.sleep_ms(150)
|
||||
led.off()
|
||||
time.sleep_ms(100)
|
||||
led.on()
|
||||
time.sleep_ms(150)
|
||||
led.off()
|
||||
time.sleep_ms(600)
|
||||
|
||||
led = pyb.LED(2) # Switch to using the green LED.
|
||||
|
||||
while(usb.isconnected()):
|
||||
led.on()
|
||||
time.sleep_ms(150)
|
||||
led.off()
|
||||
time.sleep_ms(100)
|
||||
led.on()
|
||||
time.sleep_ms(150)
|
||||
led.off()
|
||||
time.sleep_ms(600)
|
||||
@ -0,0 +1,12 @@
|
||||
# LSM6DSOX Gyro example.
|
||||
import time
|
||||
from lsm6dsox import LSM6DSOX
|
||||
from machine import I2C, SPI, Pin
|
||||
|
||||
lsm = LSM6DSOX(SPI(5), cs_pin=Pin("PF6", Pin.OUT_PP, Pin.PULL_UP))
|
||||
|
||||
while (True):
|
||||
print('Accelerometer: x:{:>8.3f} y:{:>8.3f} z:{:>8.3f}'.format(*lsm.read_accel()))
|
||||
print('Gyroscope: x:{:>8.3f} y:{:>8.3f} z:{:>8.3f}'.format(*lsm.read_gyro()))
|
||||
print("")
|
||||
time.sleep_ms(100)
|
||||
@ -0,0 +1,46 @@
|
||||
# LSM6DSOX IMU MLC (Machine Learning Core) Example.
|
||||
# Download the raw UCF file, copy to storage and reset.
|
||||
|
||||
# NOTE: The pre-trained models (UCF files) for the examples can be found here:
|
||||
# https://github.com/STMicroelectronics/STMems_Machine_Learning_Core/tree/master/application_examples/lsm6dsox
|
||||
|
||||
import time
|
||||
from lsm6dsox import LSM6DSOX
|
||||
from machine import I2C, SPI, Pin
|
||||
|
||||
INT_MODE = True # Run in interrupt mode.
|
||||
INT_FLAG = False # Set True on interrupt.
|
||||
|
||||
def imu_int_handler(pin):
|
||||
global INT_FLAG
|
||||
INT_FLAG = True
|
||||
|
||||
if (INT_MODE == True):
|
||||
int_pin = Pin('PA1', mode=Pin.IN, pull=Pin.PULL_UP)
|
||||
int_pin.irq(handler=imu_int_handler, trigger=Pin.IRQ_RISING)
|
||||
|
||||
# Vibration detection example
|
||||
UCF_FILE = "lsm6dsox_vibration_monitoring.ucf"
|
||||
UCF_LABELS = {0:"no vibration", 1:"low vibration", 2:"high vibration"}
|
||||
# NOTE: Selected data rate and scale must match the MLC data rate and scale.
|
||||
lsm = LSM6DSOX(SPI(5), cs_pin=Pin("PF6", Pin.OUT_PP, Pin.PULL_UP),
|
||||
gyro_odr=26, accel_odr=26, gyro_scale=2000, accel_scale=4, ucf=UCF_FILE)
|
||||
|
||||
# Head gestures example
|
||||
#UCF_FILE = "lsm6dsox_head_gestures.ucf"
|
||||
#UCF_LABELS = {0:"Nod", 1:"Shake", 2:"Stationary", 3:"Swing", 4:"Walk"}
|
||||
# NOTE: Selected data rate and scale must match the MLC data rate and scale.
|
||||
#lsm = LSM6DSOX(SPI(5), cs_pin=Pin("PF6", Pin.OUT_PP, Pin.PULL_UP),
|
||||
# gyro_odr=26, accel_odr=26, gyro_scale=250, accel_scale=2, ucf=UCF_FILE)
|
||||
|
||||
print("MLC configured...")
|
||||
|
||||
while (True):
|
||||
if (INT_MODE):
|
||||
if (INT_FLAG):
|
||||
INT_FLAG=False
|
||||
print(UCF_LABELS[lsm.read_mlc_output()[0]])
|
||||
else:
|
||||
buf = lsm.read_mlc_output()
|
||||
if (buf != None):
|
||||
print(UCF_LABELS[buf[0]])
|
||||
@ -0,0 +1,11 @@
|
||||
# VL53L1X ToF sensor basic distance measurement example.
|
||||
from machine import I2C
|
||||
from vl53l1x import VL53L1X
|
||||
import time
|
||||
|
||||
tof = VL53L1X(I2C(2))
|
||||
|
||||
while True:
|
||||
print(f"Distance: {tof.read()}mm")
|
||||
time.sleep_ms(50)
|
||||
|
||||
@ -0,0 +1,13 @@
|
||||
# ADC Read Example.
|
||||
#
|
||||
# This example shows how to use the ADC to read an analog pin.
|
||||
|
||||
import time
|
||||
from pyb import ADC
|
||||
|
||||
adc = ADC("A0")
|
||||
|
||||
while(True):
|
||||
# The ADC has 12-bits of resolution for 4096 values.
|
||||
print("ADC = %fv" % ((adc.read() * 3.3) / 4095))
|
||||
time.sleep_ms(100)
|
||||
@ -0,0 +1,8 @@
|
||||
# ADC Internal Channels Example
|
||||
#
|
||||
# This example shows how to read internal ADC channels.
|
||||
|
||||
import time, pyb
|
||||
|
||||
adc = pyb.ADCAll(12)
|
||||
print("VREF = %.1fv VBAT = %.1fv Temp = %d" % (adc.read_core_vref(), adc.read_core_vbat(), adc.read_core_temp()))
|
||||
@ -0,0 +1,36 @@
|
||||
# CAN Example
|
||||
#
|
||||
# This example demonstrates CAN communications between two cameras.
|
||||
# NOTE: you need two CAN transceiver shields and DB9 cable to run this example.
|
||||
|
||||
import time, omv
|
||||
from pyb import CAN
|
||||
|
||||
# NOTE: Set to False on receiving node.
|
||||
TRANSMITTER = True
|
||||
|
||||
can = CAN(1, CAN.NORMAL, baudrate=125_000, sample_point=75)
|
||||
# NOTE: uncomment to set bit timing manually, for example:
|
||||
#can.init(CAN.NORMAL, prescaler=32, sjw=1, bs1=8, bs2=3)
|
||||
can.restart()
|
||||
|
||||
if (TRANSMITTER):
|
||||
while (True):
|
||||
# Send message with id 1
|
||||
can.send('Hello', 1)
|
||||
time.sleep_ms(1000)
|
||||
|
||||
else:
|
||||
# Runs on the receiving node.
|
||||
if (omv.board_type() == 'H7'): # FDCAN
|
||||
# Set a filter to receive messages with id=1 -> 4
|
||||
# Filter index, mode (RANGE, DUAL or MASK), FIFO (0 or 1), params
|
||||
can.setfilter(0, CAN.RANGE, 0, (1, 4))
|
||||
else:
|
||||
# Set a filter to receive messages with id=1, 2, 3 and 4
|
||||
# Filter index, mode (LIST16, etc..), FIFO (0 or 1), params
|
||||
can.setfilter(0, CAN.LIST16, 0, (1, 2, 3, 4))
|
||||
|
||||
while (True):
|
||||
# Receive messages on FIFO 0
|
||||
print(can.recv(0, timeout=10000))
|
||||
@ -0,0 +1,24 @@
|
||||
# CPU frequency scaling example.
|
||||
#
|
||||
# This example shows how to use the cpufreq module to change the CPU frequency on the fly.
|
||||
import sensor, image, time, cpufreq
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
def test_image_processing():
|
||||
for i in range(0, 50):
|
||||
clock.tick() # Update the FPS clock.
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
img.find_edges(image.EDGE_CANNY, threshold=(50, 80))
|
||||
|
||||
print("\nFrequency Scaling Test...")
|
||||
for f in cpufreq.get_supported_frequencies():
|
||||
print("Testing CPU Freq: %dMHz..." %(f))
|
||||
cpufreq.set_frequency(f)
|
||||
clock.reset()
|
||||
test_image_processing()
|
||||
freqs = cpufreq.get_current_frequencies()
|
||||
print("CPU Freq:%dMHz HCLK:%dMhz PCLK1:%dMhz PCLK2:%dMhz FPS:%.2f" %(freqs[0], freqs[1], freqs[2], freqs[3], clock.fps()))
|
||||
@ -0,0 +1,19 @@
|
||||
# I2C Control
|
||||
#
|
||||
# This example shows how to use the i2c bus on your OpenMV Cam by dumping the
|
||||
# contents on a standard EEPROM. To run this example either connect the
|
||||
# Thermopile Shield to your OpenMV Cam or an I2C EEPROM to your OpenMV Cam.
|
||||
|
||||
from pyb import I2C
|
||||
|
||||
i2c = I2C(1, I2C.MASTER)
|
||||
mem = i2c.mem_read(256, 0x50, 0) # The eeprom slave address is 0x50.
|
||||
|
||||
print("\n[")
|
||||
for i in range(16):
|
||||
print("\t[", end='')
|
||||
for j in range(16):
|
||||
print("%03d" % mem[(i*16)+j], end='')
|
||||
if j != 15: print(", ", end='')
|
||||
print("]," if i != 15 else "]")
|
||||
print("]")
|
||||
@ -0,0 +1,22 @@
|
||||
# LED Control Example
|
||||
#
|
||||
# This example shows how to control the RGB LED.
|
||||
import time
|
||||
from pyb import LED
|
||||
|
||||
red_led = LED(1)
|
||||
green_led = LED(2)
|
||||
blue_led = LED(3)
|
||||
|
||||
def led_control(x):
|
||||
if (x&1)==0: red_led.off()
|
||||
elif (x&1)==1: red_led.on()
|
||||
if (x&2)==0: green_led.off()
|
||||
elif (x&2)==2: green_led.on()
|
||||
if (x&4)==0: blue_led.off()
|
||||
elif (x&4)==4: blue_led.on()
|
||||
|
||||
while(True):
|
||||
for i in range(16):
|
||||
led_control(i)
|
||||
time.sleep_ms(500)
|
||||
@ -0,0 +1,19 @@
|
||||
import time
|
||||
|
||||
@micropython.asm_thumb
|
||||
def asm():
|
||||
movw(r0, 42)
|
||||
|
||||
@micropython.viper
|
||||
def viper(a, b):
|
||||
return a + b
|
||||
|
||||
@micropython.native
|
||||
def native(a, b):
|
||||
return a + b
|
||||
|
||||
|
||||
print(asm())
|
||||
print(viper(1, 2))
|
||||
print(native(1, 2))
|
||||
|
||||
@ -0,0 +1,13 @@
|
||||
# Pin Control Example
|
||||
#
|
||||
# This example shows how to use the I/O pins in GPIO mode.
|
||||
|
||||
from pyb import Pin
|
||||
|
||||
# Connect a switch to pin 0 that will pull it low when the switch is closed.
|
||||
# Pin 1 will then light up.
|
||||
pin0 = Pin('GPIO1', Pin.IN, Pin.PULL_UP)
|
||||
pin1 = Pin('GPIO2', Pin.OUT_PP, Pin.PULL_NONE)
|
||||
|
||||
while(True):
|
||||
pin1.value(not pin0.value())
|
||||
@ -0,0 +1,30 @@
|
||||
# PWM Control Example
|
||||
#
|
||||
# This example shows how to use PWM.
|
||||
|
||||
import time
|
||||
from pyb import Pin, Timer
|
||||
|
||||
class PWM():
|
||||
def __init__(self, pin, tim, ch):
|
||||
self.pin = pin
|
||||
self.tim = tim
|
||||
self.ch = ch;
|
||||
|
||||
pwms = {
|
||||
'PWM1' : PWM('PE12', 1, 1),
|
||||
'PWM2' : PWM('PE11', 1, 2),
|
||||
# 'PWM3' : PWM('PA9', 1, 2),
|
||||
'PWM3' : PWM('PA10', 1, 3),
|
||||
'PWM4' : PWM('PE14', 1, 4),
|
||||
'PWM5' : PWM('PB8', 4, 3),
|
||||
'PWM6' : PWM('PB9', 4, 4),
|
||||
}
|
||||
|
||||
# Generate a 1KHz square wave with 50% cycle on the following PWM.
|
||||
for k, pwm in pwms.items():
|
||||
tim = Timer(pwm.tim, freq=1000) # Frequency in Hz
|
||||
ch = tim.channel(pwm.ch, Timer.PWM, pin=Pin(pwm.pin), pulse_width_percent=50)
|
||||
|
||||
while (True):
|
||||
time.sleep_ms(1000)
|
||||
@ -0,0 +1,12 @@
|
||||
# RTC Example
|
||||
#
|
||||
# This example shows how to use the RTC.
|
||||
import time
|
||||
from pyb import RTC
|
||||
|
||||
rtc = RTC()
|
||||
rtc.datetime((2013, 7, 9, 2, 0, 0, 0, 0))
|
||||
|
||||
while (True):
|
||||
print(rtc.datetime())
|
||||
time.sleep_ms(1000)
|
||||
@ -0,0 +1,73 @@
|
||||
# SPI Control
|
||||
#
|
||||
# This example shows how to use the SPI bus to control the
|
||||
# 1.8" TFT LCD display (JD-T18003-T01) with ST7735R driver.
|
||||
|
||||
import sensor, image, time
|
||||
from pyb import Pin, SPI
|
||||
|
||||
cs = Pin("GPIO1", Pin.OUT_OD)
|
||||
rst = Pin("GPIO2", Pin.OUT_PP)
|
||||
rs = Pin("GPIO3", Pin.OUT_PP)
|
||||
|
||||
# NOTE: The SPI clock frequency will not always be the requested frequency. The hardware only supports
|
||||
# frequencies that are the bus frequency divided by a prescaler (which can be 2, 4, 8, 16, 32, 64, 128 or 256).
|
||||
spi = SPI(4, SPI.MASTER, baudrate=int(1000000000/66), polarity=0, phase=0)
|
||||
|
||||
def write_command_byte(c):
|
||||
cs.low()
|
||||
rs.low()
|
||||
spi.send(c)
|
||||
cs.high()
|
||||
|
||||
def write_data_byte(c):
|
||||
cs.low()
|
||||
rs.high()
|
||||
spi.send(c)
|
||||
cs.high()
|
||||
|
||||
def write_command(c, *data):
|
||||
write_command_byte(c)
|
||||
if data:
|
||||
for d in data: write_data_byte(d)
|
||||
|
||||
def write_image(img):
|
||||
cs.low()
|
||||
rs.high()
|
||||
spi.send(img)
|
||||
cs.high()
|
||||
|
||||
# Reset the LCD.
|
||||
rst.low()
|
||||
time.sleep_ms(100)
|
||||
rst.high()
|
||||
time.sleep_ms(100)
|
||||
|
||||
write_command(0x11) # Sleep Exit
|
||||
time.sleep_ms(120)
|
||||
|
||||
# Memory Data Access Control
|
||||
# Write 0xC8 for BGR mode.
|
||||
write_command(0x36, 0xC0)
|
||||
|
||||
# Interface Pixel Format
|
||||
write_command(0x3A, 0x05)
|
||||
|
||||
# Display On
|
||||
write_command(0x29)
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QQVGA2)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
write_command(0x2C) # Write image command...
|
||||
write_image(img)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
@ -0,0 +1,19 @@
|
||||
# Timer Control Example
|
||||
#
|
||||
# This example shows how to use a timer for callbacks.
|
||||
|
||||
import time
|
||||
from pyb import Pin, Timer, LED
|
||||
|
||||
blue_led = LED(3)
|
||||
|
||||
# we will receive the timer object when being called
|
||||
# Note: functions that allocate memory are Not allowed in callbacks
|
||||
def tick(timer):
|
||||
blue_led.toggle()
|
||||
|
||||
tim = Timer(2, freq=1) # create a timer object using timer 2 - trigger at 1Hz
|
||||
tim.callback(tick) # set the callback to our tick function
|
||||
|
||||
while (True):
|
||||
time.sleep_ms(1000)
|
||||
@ -0,0 +1,13 @@
|
||||
# UART Control
|
||||
#
|
||||
# This example shows how to use the serial port on your OpenMV Cam.
|
||||
|
||||
import time
|
||||
from pyb import UART
|
||||
|
||||
# Init UART object.
|
||||
uart = UART(4, 19200)
|
||||
|
||||
while(True):
|
||||
uart.write("Hello World!\r")
|
||||
time.sleep_ms(1000)
|
||||
@ -0,0 +1,31 @@
|
||||
# Making OpenMV Camera act as a Mouse using HID.
|
||||
#
|
||||
# First we need to create boot.py file to change the default USB mode (VCP+MSC).
|
||||
# Note: It is recommended to save this file to uSD card not the flash storage.
|
||||
# This will make it easier to restore the default OpenMV (VCP+MSC) USB mode later
|
||||
# by just deleting boot.py from uSD using the PC.
|
||||
#
|
||||
# Add the following script to boot.py:
|
||||
#
|
||||
##import pyb #(UNCOMMENT THIS LINE!)
|
||||
##pyb.usb_mode('VCP+HID') # serial device + mouse (UNCOMMENT THIS LINE!)
|
||||
##pyb.usb_mode('VCP+MSC') # serial device + storage device (default)
|
||||
##pyb.usb_mode('VCP+HID', hid=pyb.hid_keyboard) # serial device + keyboard
|
||||
#
|
||||
# Copy boot.py to the root of the uSD card and restart the camera, it should now
|
||||
# act as a serial device and a mouse.
|
||||
#
|
||||
# Connect to the camera using the IDE and run this script, you should see the mouse move.
|
||||
#
|
||||
# Note: To restore the default VCP+MSC USB mode, either use the PC to remove boot.py
|
||||
# from the uSD card, or use the following Python line: import os; os.remove('boot.py')
|
||||
|
||||
import pyb, time
|
||||
|
||||
hid = pyb.USB_HID()
|
||||
|
||||
while(True):
|
||||
# x, y and scroll
|
||||
# move 10 pixels to the right
|
||||
hid.send((0, 10, 0, 0))
|
||||
time.sleep_ms(500)
|
||||
@ -0,0 +1,37 @@
|
||||
# USB VCP example.
|
||||
# This example shows how to use the USB VCP class to send an image to PC on demand.
|
||||
#
|
||||
# WARNING:
|
||||
# This script should NOT be run from the IDE or command line, it should be saved as main.py
|
||||
# Note the following commented script shows how to receive the image from the host side.
|
||||
#
|
||||
# #!/usr/bin/env python2.7
|
||||
# import sys, serial, struct
|
||||
# port = '/dev/ttyACM0'
|
||||
# sp = serial.Serial(port, baudrate=115200, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE,
|
||||
# xonxoff=False, rtscts=False, stopbits=serial.STOPBITS_ONE, timeout=None, dsrdtr=True)
|
||||
# sp.setDTR(True) # dsrdtr is ignored on Windows.
|
||||
# sp.write("snap")
|
||||
# sp.flush()
|
||||
# size = struct.unpack('<L', sp.read(4))[0]
|
||||
# img = sp.read(size)
|
||||
# sp.close()
|
||||
#
|
||||
# with open("img.jpg", "w") as f:
|
||||
# f.write(img)
|
||||
|
||||
import sensor, image, time, ustruct
|
||||
from pyb import USB_VCP
|
||||
|
||||
usb = USB_VCP()
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.skip_frames(time = 2000) # Wait for settings take effect.
|
||||
|
||||
while(True):
|
||||
cmd = usb.recv(4, timeout=5000)
|
||||
if (cmd == b'snap'):
|
||||
img = sensor.snapshot().compress()
|
||||
usb.send(ustruct.pack("<L", img.size()))
|
||||
usb.send(img)
|
||||
@ -0,0 +1,22 @@
|
||||
# VSYNC GPIO output example.
|
||||
#
|
||||
# This example shows how to toggle a pin on VSYNC interrupt.
|
||||
|
||||
import sensor, image, time
|
||||
from pyb import Pin
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
|
||||
# This pin will be toggled on/off on VSYNC rising and falling edges.
|
||||
led_pin = Pin('LEDB', Pin.OUT_PP, Pin.PULL_NONE)
|
||||
sensor.set_vsync_callback(lambda state, led=led_pin: led_pin.value(state))
|
||||
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Update the FPS clock.
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
|
||||
# to the IDE. The FPS should increase once disconnected.
|
||||
@ -0,0 +1,31 @@
|
||||
# Arrow Drawing
|
||||
#
|
||||
# This example shows off drawing arrows on the OpenMV Cam.
|
||||
|
||||
import sensor, image, time, pyb
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
for i in range(10):
|
||||
x0 = (pyb.rng() % (2*img.width())) - (img.width()//2)
|
||||
y0 = (pyb.rng() % (2*img.height())) - (img.height()//2)
|
||||
x1 = (pyb.rng() % (2*img.width())) - (img.width()//2)
|
||||
y1 = (pyb.rng() % (2*img.height())) - (img.height()//2)
|
||||
r = (pyb.rng() % 127) + 128
|
||||
g = (pyb.rng() % 127) + 128
|
||||
b = (pyb.rng() % 127) + 128
|
||||
|
||||
# If the first argument is a scaler then this method expects
|
||||
# to see x0, y0, x1, and y1. Otherwise, it expects a (x0,y0,x1,y1) tuple.
|
||||
img.draw_arrow(x0, y0, x1, y1, color = (r, g, b), size = 30, thickness = 2)
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,31 @@
|
||||
# Circle Drawing
|
||||
#
|
||||
# This example shows off drawing circles on the OpenMV Cam.
|
||||
|
||||
import sensor, image, time, pyb
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
for i in range(10):
|
||||
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
|
||||
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
|
||||
radius = pyb.rng() % (max(img.height(), img.width())//2)
|
||||
|
||||
r = (pyb.rng() % 127) + 128
|
||||
g = (pyb.rng() % 127) + 128
|
||||
b = (pyb.rng() % 127) + 128
|
||||
|
||||
# If the first argument is a scaler then this method expects
|
||||
# to see x, y, and radius. Otherwise, it expects a (x,y,radius) tuple.
|
||||
img.draw_circle(x, y, radius, color = (r, g, b), thickness = 2, fill = False)
|
||||
|
||||
print(clock.fps())
|
||||
15
scripts/examples/Arduino/Nicla-Vision/03-Drawing/copy2fb.py
Normal file
15
scripts/examples/Arduino/Nicla-Vision/03-Drawing/copy2fb.py
Normal file
@ -0,0 +1,15 @@
|
||||
# Copy image to framebuffer.
|
||||
#
|
||||
# This example shows how to load and copy an image to framebuffer for testing.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_framesize(sensor.QQVGA)
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
|
||||
# Load image
|
||||
img = image.Image("/example.bmp", copy_to_fb=True)
|
||||
|
||||
# Add a small delay to allow the IDE to read the loaded image.
|
||||
time.sleep_ms(500)
|
||||
@ -0,0 +1,29 @@
|
||||
# Cross Drawing
|
||||
#
|
||||
# This example shows off drawing crosses on the OpenMV Cam.
|
||||
|
||||
import sensor, image, time, pyb
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
for i in range(10):
|
||||
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
|
||||
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
|
||||
r = (pyb.rng() % 127) + 128
|
||||
g = (pyb.rng() % 127) + 128
|
||||
b = (pyb.rng() % 127) + 128
|
||||
|
||||
# If the first argument is a scaler then this method expects
|
||||
# to see x and y. Otherwise, it expects a (x,y) tuple.
|
||||
img.draw_cross(x, y, color = (r, g, b), size = 10, thickness = 2)
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,35 @@
|
||||
# Ellipse Drawing
|
||||
#
|
||||
# This example shows off drawing ellipses on the OpenMV Cam.
|
||||
|
||||
import sensor, image, time, pyb
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
for i in range(10):
|
||||
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
|
||||
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
|
||||
radius_x = pyb.rng() % (max(img.height(), img.width())//2)
|
||||
radius_y = pyb.rng() % (max(img.height(), img.width())//2)
|
||||
rot = pyb.rng()
|
||||
|
||||
r = (pyb.rng() % 127) + 128
|
||||
g = (pyb.rng() % 127) + 128
|
||||
b = (pyb.rng() % 127) + 128
|
||||
|
||||
# If the first argument is a scaler then this method expects
|
||||
# to see x, y, radius x, and radius y.
|
||||
# Otherwise, it expects a (x,y,radius_x,radius_y) tuple.
|
||||
img.draw_ellipse(x, y, radius_x, radius_y, rot,
|
||||
color = (r, g, b), thickness = 2, fill = False)
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,35 @@
|
||||
# Flood Fill
|
||||
#
|
||||
# This example shows off flood filling areas in the image.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
# seed_threshold controls the maximum allowed difference between
|
||||
# the initial pixel and any filled pixels. It's important to
|
||||
# set this such that flood fill doesn't fill the whole image.
|
||||
|
||||
# floating_threshold controls the maximum allowed difference
|
||||
# between any two pixels. This can easily fill the whole image
|
||||
# with even a very low threshold.
|
||||
|
||||
# flood_fill will fill pixels that both thresholds.
|
||||
|
||||
# You can invert what gets filled with "invert" and clear
|
||||
# everything but the filled area with "clear_background".
|
||||
|
||||
x = sensor.width() // 2
|
||||
y = sensor.height() // 2
|
||||
img = sensor.snapshot().flood_fill(x, y, \
|
||||
seed_threshold=0.05, floating_thresholds=0.05, \
|
||||
color=(255, 0, 0), invert=False, clear_background=False)
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,25 @@
|
||||
# Draw Image Example
|
||||
#
|
||||
# This example shows off how to draw images in the frame buffer.
|
||||
|
||||
import sensor, image, time, pyb
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
small_img = img.mean_pooled(4, 4) # Makes a copy.
|
||||
|
||||
x = (img.width()//2)-(small_img.width()//2)
|
||||
y = (img.height()//2)-(small_img.height()//2)
|
||||
# Draws an image in the frame buffer.Pass an optional
|
||||
# mask image to control what pixels are drawn.
|
||||
img.draw_image(small_img, x, y, x_scale=1, y_scale=1)
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,93 @@
|
||||
# Draw Image Testing script with bounce
|
||||
#
|
||||
# Exercise draw image with many different values for testing
|
||||
|
||||
import sensor, image, time, pyb
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
BOUNCE = True
|
||||
RESCALE = True
|
||||
|
||||
SMALL_IMAGE_SCALE = 3
|
||||
|
||||
CYCLE_FORMATS = True
|
||||
CYCLE_MASK = True
|
||||
|
||||
# Used when CYCLE_FORMATS or CYCLE_MASK is true
|
||||
value_mixer = 0
|
||||
|
||||
# Location of small image
|
||||
x = 100
|
||||
y = 50
|
||||
|
||||
# Bounce direction
|
||||
xd = 1
|
||||
yd = 1
|
||||
|
||||
# Small image scaling
|
||||
rescale = 1.0
|
||||
rd = 0.1
|
||||
max_rescale = 5
|
||||
min_rescale = rd * 2
|
||||
|
||||
# Boundary to bounce within
|
||||
xmin = -sensor.width() / SMALL_IMAGE_SCALE - 8
|
||||
ymin = -sensor.height() / SMALL_IMAGE_SCALE - 8
|
||||
xmax = sensor.width() + 8
|
||||
ymax = sensor.height() + 8
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
status = ""
|
||||
value_mixer = value_mixer + 1
|
||||
|
||||
img = sensor.snapshot()
|
||||
# Makes a scaled copy of the sensor
|
||||
small_img = img.mean_pooled(SMALL_IMAGE_SCALE, SMALL_IMAGE_SCALE)
|
||||
|
||||
status = 'rgb565 '
|
||||
if CYCLE_FORMATS:
|
||||
image_format = (value_mixer >> 8) & 3
|
||||
# To test combining different formats
|
||||
if (image_format==1): small_img = small_img.to_bitmap(copy=True); status = 'bitmap '
|
||||
if (image_format==2): small_img = small_img.to_grayscale(copy=True); status = 'grayscale '
|
||||
if (image_format==3): small_img = small_img.to_rgb565(copy=True); status = 'rgb565 '
|
||||
|
||||
# update small image location
|
||||
if BOUNCE:
|
||||
x = x + xd
|
||||
if (x<xmin or x>xmax):
|
||||
xd = -xd
|
||||
|
||||
y = y + yd
|
||||
if (y<ymin or y>ymax):
|
||||
yd = -yd
|
||||
|
||||
# Update small image scale
|
||||
if RESCALE:
|
||||
rescale = rescale + rd
|
||||
if (rescale<min_rescale or rescale>max_rescale):
|
||||
rd = -rd
|
||||
|
||||
# Find the center of the image
|
||||
scaled_width = int(small_img.width() * abs(rescale))
|
||||
scaled_height= int(small_img.height() * abs(rescale))
|
||||
|
||||
apply_mask = CYCLE_MASK and ((value_mixer >> 9) & 1)
|
||||
if apply_mask:
|
||||
img.draw_image(small_img, int(x), int(y), mask=small_img.to_bitmap(copy=True), x_scale=rescale, y_scale=rescale, alpha=240, hint=image.IMAGE_HINT_BILINEAR | image.IMAGE_HINT_CENTER)
|
||||
status += 'alpha:240 '
|
||||
status += '+mask '
|
||||
else:
|
||||
img.draw_image(small_img, int(x), int(y), x_scale=rescale, y_scale=rescale, alpha=128, hint=image.IMAGE_HINT_BILINEAR | image.IMAGE_HINT_CENTER)
|
||||
status += 'alpha:128 '
|
||||
|
||||
img.draw_string(8, 0, status, mono_space = False)
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,71 @@
|
||||
# Image Drawing Alpha Blending Test
|
||||
#
|
||||
# This script tests the performance and quality of the draw_image()
|
||||
# method which can perform nearest neighbor, bilinear, bicubic, and
|
||||
# area scaling along with color channel extraction, alpha blending,
|
||||
# color palette application, and alpha palette application.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
|
||||
hint = image.BICUBIC # image.BILINEAR image.BICUBIC
|
||||
|
||||
small_img = image.Image(4, 4, sensor.RGB565)
|
||||
small_img.set_pixel(0, 0, (0, 0, 127))
|
||||
small_img.set_pixel(1, 0, (47, 255, 199))
|
||||
small_img.set_pixel(2, 0, (0, 188, 255))
|
||||
small_img.set_pixel(3, 0, (0, 0, 127))
|
||||
small_img.set_pixel(0, 1, (0, 176, 255))
|
||||
small_img.set_pixel(1, 1, (222, 0, 0 ))
|
||||
small_img.set_pixel(2, 1, (50, 255, 195))
|
||||
small_img.set_pixel(3, 1, (86, 255, 160))
|
||||
small_img.set_pixel(0, 2, (255, 211, 0 ))
|
||||
small_img.set_pixel(1, 2, (83, 255, 163))
|
||||
small_img.set_pixel(2, 2, (255, 211, 0))
|
||||
small_img.set_pixel(3, 2, (0, 80, 255))
|
||||
small_img.set_pixel(0, 3, (255, 118, 0 ))
|
||||
small_img.set_pixel(1, 3, (127, 0, 0 ))
|
||||
small_img.set_pixel(2, 3, (0, 144, 255))
|
||||
small_img.set_pixel(3, 3, (50, 255, 195))
|
||||
#small_img.to_grayscale()
|
||||
#small_img.to_bitmap()
|
||||
|
||||
big_img = image.Image(128, 128, sensor.RGB565)
|
||||
big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint)
|
||||
#big_img.to_grayscale()
|
||||
#big_img.to_bitmap()
|
||||
|
||||
alpha_div = 1
|
||||
alpha_value = 0
|
||||
alpha_step = 2
|
||||
|
||||
x_bounce = sensor.width()//2
|
||||
x_bounce_toggle = 1
|
||||
|
||||
y_bounce = sensor.height()//2
|
||||
y_bounce_toggle = 1
|
||||
|
||||
clock = time.clock()
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
#img.to_grayscale()
|
||||
#img.to_bitmap()
|
||||
img.draw_image(big_img, x_bounce, y_bounce,
|
||||
rgb_channel=-1, alpha=alpha_value//alpha_div,
|
||||
hint=hint|image.CENTER)
|
||||
|
||||
x_bounce += x_bounce_toggle
|
||||
if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle
|
||||
|
||||
y_bounce += y_bounce_toggle
|
||||
if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle
|
||||
|
||||
alpha_value += alpha_step
|
||||
if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,81 @@
|
||||
# Image Drawing Color Table with Alpha Blending Test
|
||||
#
|
||||
# This script tests the performance and quality of the draw_image()
|
||||
# method which can perform nearest neighbor, bilinear, bicubic, and
|
||||
# area scaling along with color channel extraction, alpha blending,
|
||||
# color palette application, and alpha palette application.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
|
||||
hint = image.BICUBIC # image.BILINEAR image.BICUBIC
|
||||
|
||||
# RGB channel extraction is done after scaling normally, this
|
||||
# may produce false colors. Set this flag to do it before.
|
||||
#
|
||||
hint |= 0 # image.EXTRACT_RGB_CHANNEL_FIRST
|
||||
|
||||
# Color table application is done after scaling normally, this
|
||||
# may produce false colors. Set this flag to do it before.
|
||||
#
|
||||
hint |= 0 # image.APPLY_COLOR_PALETTE_FIRST
|
||||
|
||||
small_img = image.Image(4, 4, sensor.RGB565)
|
||||
small_img.set_pixel(0, 0, (0, 0, 127))
|
||||
small_img.set_pixel(1, 0, (47, 255, 199))
|
||||
small_img.set_pixel(2, 0, (0, 188, 255))
|
||||
small_img.set_pixel(3, 0, (0, 0, 127))
|
||||
small_img.set_pixel(0, 1, (0, 176, 255))
|
||||
small_img.set_pixel(1, 1, (222, 0, 0 ))
|
||||
small_img.set_pixel(2, 1, (50, 255, 195))
|
||||
small_img.set_pixel(3, 1, (86, 255, 160))
|
||||
small_img.set_pixel(0, 2, (255, 211, 0 ))
|
||||
small_img.set_pixel(1, 2, (83, 255, 163))
|
||||
small_img.set_pixel(2, 2, (255, 211, 0))
|
||||
small_img.set_pixel(3, 2, (0, 80, 255))
|
||||
small_img.set_pixel(0, 3, (255, 118, 0 ))
|
||||
small_img.set_pixel(1, 3, (127, 0, 0 ))
|
||||
small_img.set_pixel(2, 3, (0, 144, 255))
|
||||
small_img.set_pixel(3, 3, (50, 255, 195))
|
||||
#small_img.to_grayscale()
|
||||
#small_img.to_bitmap()
|
||||
|
||||
big_img = image.Image(128, 128, sensor.RGB565)
|
||||
big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint)
|
||||
#big_img.to_grayscale()
|
||||
#big_img.to_bitmap()
|
||||
|
||||
alpha_div = 1
|
||||
alpha_value = 0
|
||||
alpha_step = 2
|
||||
|
||||
x_bounce = sensor.width()//2
|
||||
x_bounce_toggle = 1
|
||||
|
||||
y_bounce = sensor.height()//2
|
||||
y_bounce_toggle = 1
|
||||
|
||||
clock = time.clock()
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
#img.to_grayscale()
|
||||
#img.to_bitmap()
|
||||
img.draw_image(big_img, x_bounce, y_bounce,
|
||||
rgb_channel=-1, alpha=alpha_value//alpha_div,
|
||||
color_palette=sensor.PALETTE_IRONBOW, hint=hint|image.CENTER)
|
||||
|
||||
x_bounce += x_bounce_toggle
|
||||
if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle
|
||||
|
||||
y_bounce += y_bounce_toggle
|
||||
if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle
|
||||
|
||||
alpha_value += alpha_step
|
||||
if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,75 @@
|
||||
# Image Drawing Alpha Table Test
|
||||
#
|
||||
# This script tests the performance and quality of the draw_image()
|
||||
# method which can perform nearest neighbor, bilinear, bicubic, and
|
||||
# area scaling along with color channel extraction, alpha blending,
|
||||
# color palette application, and alpha palette application.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
|
||||
hint = image.BICUBIC # image.BILINEAR image.BICUBIC
|
||||
|
||||
small_img = image.Image(4, 4, sensor.RGB565)
|
||||
small_img.set_pixel(0, 0, (0, 0, 127))
|
||||
small_img.set_pixel(1, 0, (47, 255, 199))
|
||||
small_img.set_pixel(2, 0, (0, 188, 255))
|
||||
small_img.set_pixel(3, 0, (0, 0, 127))
|
||||
small_img.set_pixel(0, 1, (0, 176, 255))
|
||||
small_img.set_pixel(1, 1, (222, 0, 0 ))
|
||||
small_img.set_pixel(2, 1, (50, 255, 195))
|
||||
small_img.set_pixel(3, 1, (86, 255, 160))
|
||||
small_img.set_pixel(0, 2, (255, 211, 0 ))
|
||||
small_img.set_pixel(1, 2, (83, 255, 163))
|
||||
small_img.set_pixel(2, 2, (255, 211, 0))
|
||||
small_img.set_pixel(3, 2, (0, 80, 255))
|
||||
small_img.set_pixel(0, 3, (255, 118, 0 ))
|
||||
small_img.set_pixel(1, 3, (127, 0, 0 ))
|
||||
small_img.set_pixel(2, 3, (0, 144, 255))
|
||||
small_img.set_pixel(3, 3, (50, 255, 195))
|
||||
#small_img.to_grayscale()
|
||||
#small_img.to_bitmap()
|
||||
|
||||
big_img = image.Image(128, 128, sensor.RGB565)
|
||||
big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint)
|
||||
#big_img.to_grayscale()
|
||||
#big_img.to_bitmap()
|
||||
|
||||
alpha_lut = image.Image(256, 1, sensor.GRAYSCALE)
|
||||
for i in range(256):
|
||||
alpha_lut.set_pixel(i, 0, 255 if i > 127 else 0)
|
||||
|
||||
alpha_div = 1
|
||||
alpha_value = 0
|
||||
alpha_step = 2
|
||||
|
||||
x_bounce = sensor.width()//2
|
||||
x_bounce_toggle = 1
|
||||
|
||||
y_bounce = sensor.height()//2
|
||||
y_bounce_toggle = 1
|
||||
|
||||
clock = time.clock()
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
#img.to_grayscale()
|
||||
#img.to_bitmap()
|
||||
img.draw_image(big_img, x_bounce, y_bounce,
|
||||
rgb_channel=-1, alpha=alpha_value//alpha_div,
|
||||
alpha_palette=alpha_lut, hint=hint|image.CENTER)
|
||||
|
||||
x_bounce += x_bounce_toggle
|
||||
if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle
|
||||
|
||||
y_bounce += y_bounce_toggle
|
||||
if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle
|
||||
|
||||
alpha_value += alpha_step
|
||||
if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,85 @@
|
||||
# Image Drawing Color Table with Alpha Table Test
|
||||
#
|
||||
# This script tests the performance and quality of the draw_image()
|
||||
# method which can perform nearest neighbor, bilinear, bicubic, and
|
||||
# area scaling along with color channel extraction, alpha blending,
|
||||
# color palette application, and alpha palette application.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
|
||||
hint = image.BICUBIC # image.BILINEAR image.BICUBIC
|
||||
|
||||
# RGB channel extraction is done after scaling normally, this
|
||||
# may produce false colors. Set this flag to do it before.
|
||||
#
|
||||
hint |= 0 # image.EXTRACT_RGB_CHANNEL_FIRST
|
||||
|
||||
# Color table application is done after scaling normally, this
|
||||
# may produce false colors. Set this flag to do it before.
|
||||
#
|
||||
hint |= 0 # image.APPLY_COLOR_PALETTE_FIRST
|
||||
|
||||
small_img = image.Image(4, 4, sensor.RGB565)
|
||||
small_img.set_pixel(0, 0, (0, 0, 127))
|
||||
small_img.set_pixel(1, 0, (47, 255, 199))
|
||||
small_img.set_pixel(2, 0, (0, 188, 255))
|
||||
small_img.set_pixel(3, 0, (0, 0, 127))
|
||||
small_img.set_pixel(0, 1, (0, 176, 255))
|
||||
small_img.set_pixel(1, 1, (222, 0, 0 ))
|
||||
small_img.set_pixel(2, 1, (50, 255, 195))
|
||||
small_img.set_pixel(3, 1, (86, 255, 160))
|
||||
small_img.set_pixel(0, 2, (255, 211, 0 ))
|
||||
small_img.set_pixel(1, 2, (83, 255, 163))
|
||||
small_img.set_pixel(2, 2, (255, 211, 0))
|
||||
small_img.set_pixel(3, 2, (0, 80, 255))
|
||||
small_img.set_pixel(0, 3, (255, 118, 0 ))
|
||||
small_img.set_pixel(1, 3, (127, 0, 0 ))
|
||||
small_img.set_pixel(2, 3, (0, 144, 255))
|
||||
small_img.set_pixel(3, 3, (50, 255, 195))
|
||||
#small_img.to_grayscale()
|
||||
#small_img.to_bitmap()
|
||||
|
||||
big_img = image.Image(128, 128, sensor.RGB565)
|
||||
big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint)
|
||||
#big_img.to_grayscale()
|
||||
#big_img.to_bitmap()
|
||||
|
||||
alpha_lut = image.Image(256, 1, sensor.GRAYSCALE)
|
||||
for i in range(256):
|
||||
alpha_lut.set_pixel(i, 0, 255 if i > 127 else 0)
|
||||
|
||||
alpha_div = 1
|
||||
alpha_value = 0
|
||||
alpha_step = 2
|
||||
|
||||
x_bounce = sensor.width()//2
|
||||
x_bounce_toggle = 1
|
||||
|
||||
y_bounce = sensor.height()//2
|
||||
y_bounce_toggle = 1
|
||||
|
||||
clock = time.clock()
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
#img.to_grayscale()
|
||||
#img.to_bitmap()
|
||||
img.draw_image(big_img, x_bounce, y_bounce,
|
||||
rgb_channel=-1, alpha=alpha_value//alpha_div,
|
||||
color_palette=sensor.PALETTE_IRONBOW, alpha_palette=alpha_lut, hint=hint|image.CENTER)
|
||||
|
||||
x_bounce += x_bounce_toggle
|
||||
if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle
|
||||
|
||||
y_bounce += y_bounce_toggle
|
||||
if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle
|
||||
|
||||
alpha_value += alpha_step
|
||||
if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,69 @@
|
||||
# Image Scaling Down Drawing Test
|
||||
#
|
||||
# This script tests the performance and quality of the draw_image()
|
||||
# method which can perform nearest neighbor, bilinear, bicubic, and
|
||||
# area scaling along with color channel extraction, alpha blending,
|
||||
# color palette application, and alpha palette application.
|
||||
|
||||
# DISABLE THE FRAME BUFFER TO SEE THE REAL FPS
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
up_hint = 0 # image.BILINEAR image.BICUBIC
|
||||
down_hint = image.AREA # image.BILINEAR image.BICUBIC image.AREA
|
||||
|
||||
bounce_div = 128
|
||||
|
||||
medium_img = image.Image(32, 32, sensor.RGB565, copy_to_fb=True)
|
||||
#medium_img.to_grayscale()
|
||||
#medium_img.to_bitmap()
|
||||
|
||||
small_img = image.Image(4, 4, sensor.RGB565)
|
||||
small_img.set_pixel(0, 0, (0, 0, 127))
|
||||
small_img.set_pixel(1, 0, (47, 255, 199))
|
||||
small_img.set_pixel(2, 0, (0, 188, 255))
|
||||
small_img.set_pixel(3, 0, (0, 0, 127))
|
||||
small_img.set_pixel(0, 1, (0, 176, 255))
|
||||
small_img.set_pixel(1, 1, (222, 0, 0 ))
|
||||
small_img.set_pixel(2, 1, (50, 255, 195))
|
||||
small_img.set_pixel(3, 1, (86, 255, 160))
|
||||
small_img.set_pixel(0, 2, (255, 211, 0 ))
|
||||
small_img.set_pixel(1, 2, (83, 255, 163))
|
||||
small_img.set_pixel(2, 2, (255, 211, 0))
|
||||
small_img.set_pixel(3, 2, (0, 80, 255))
|
||||
small_img.set_pixel(0, 3, (255, 118, 0 ))
|
||||
small_img.set_pixel(1, 3, (127, 0, 0 ))
|
||||
small_img.set_pixel(2, 3, (0, 144, 255))
|
||||
small_img.set_pixel(3, 3, (50, 255, 195))
|
||||
#small_img.to_grayscale()
|
||||
#small_img.to_bitmap()
|
||||
|
||||
big_img = image.Image(128, 128, sensor.RGB565)
|
||||
big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=up_hint)
|
||||
#big_img.to_grayscale()
|
||||
#big_img.to_bitmap()
|
||||
|
||||
x_bounce = 0
|
||||
x_bounce_toggle = 0
|
||||
|
||||
y_bounce = 0
|
||||
y_bounce_toggle = 0
|
||||
|
||||
clock = time.clock()
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
medium_img.clear()
|
||||
medium_img.draw_image(big_img,
|
||||
x_bounce // bounce_div, y_bounce // bounce_div,
|
||||
x_scale=0.25, y_scale=0.25,
|
||||
hint=down_hint)
|
||||
sensor.flush()
|
||||
|
||||
x_bounce += x_bounce_toggle
|
||||
if abs(x_bounce // bounce_div) >= (medium_img.width()*1.1): x_bounce_toggle = -x_bounce_toggle
|
||||
|
||||
y_bounce += y_bounce_toggle
|
||||
if abs(y_bounce // bounce_div) >= (medium_img.height()*1.1): y_bounce_toggle = -y_bounce_toggle
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,63 @@
|
||||
# Image Scaling Up Drawing Test
|
||||
#
|
||||
# This script tests the performance and quality of the draw_image()
|
||||
# method which can perform nearest neighbor, bilinear, bicubic, and
|
||||
# area scaling along with color channel extraction, alpha blending,
|
||||
# color palette application, and alpha palette application.
|
||||
|
||||
# DISABLE THE FRAME BUFFER TO SEE THE REAL FPS
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
hint = 0 # image.BILINEAR image.BICUBIC
|
||||
|
||||
bounce_div = 32
|
||||
|
||||
big_img = image.Image(128, 128, sensor.RGB565, copy_to_fb=True)
|
||||
#big_img.to_grayscale()
|
||||
#big_img.to_bitmap()
|
||||
|
||||
small_img = image.Image(4, 4, sensor.RGB565)
|
||||
small_img.set_pixel(0, 0, (0, 0, 127))
|
||||
small_img.set_pixel(1, 0, (47, 255, 199))
|
||||
small_img.set_pixel(2, 0, (0, 188, 255))
|
||||
small_img.set_pixel(3, 0, (0, 0, 127))
|
||||
small_img.set_pixel(0, 1, (0, 176, 255))
|
||||
small_img.set_pixel(1, 1, (222, 0, 0 ))
|
||||
small_img.set_pixel(2, 1, (50, 255, 195))
|
||||
small_img.set_pixel(3, 1, (86, 255, 160))
|
||||
small_img.set_pixel(0, 2, (255, 211, 0 ))
|
||||
small_img.set_pixel(1, 2, (83, 255, 163))
|
||||
small_img.set_pixel(2, 2, (255, 211, 0))
|
||||
small_img.set_pixel(3, 2, (0, 80, 255))
|
||||
small_img.set_pixel(0, 3, (255, 118, 0 ))
|
||||
small_img.set_pixel(1, 3, (127, 0, 0 ))
|
||||
small_img.set_pixel(2, 3, (0, 144, 255))
|
||||
small_img.set_pixel(3, 3, (50, 255, 195))
|
||||
#small_img.to_grayscale()
|
||||
#small_img.to_bitmap()
|
||||
|
||||
x_bounce = 0
|
||||
x_bounce_toggle = 0
|
||||
|
||||
y_bounce = 0
|
||||
y_bounce_toggle = 0
|
||||
|
||||
clock = time.clock()
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
big_img.clear()
|
||||
big_img.draw_image(small_img,
|
||||
x_bounce // bounce_div, y_bounce // bounce_div,
|
||||
x_scale=32, y_scale=32,
|
||||
hint=hint)
|
||||
sensor.flush()
|
||||
|
||||
x_bounce += x_bounce_toggle
|
||||
if abs(x_bounce // bounce_div) >= (big_img.width()*1.1): x_bounce_toggle = -x_bounce_toggle
|
||||
|
||||
y_bounce += y_bounce_toggle
|
||||
if abs(y_bounce // bounce_div) >= (big_img.height()*1.1): y_bounce_toggle = -y_bounce_toggle
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,43 @@
|
||||
# Draw Image Example with custom color palette
|
||||
#
|
||||
# This example shows off how to draw images in the frame buffer with a custom generated color palette.
|
||||
|
||||
import sensor, image, time, pyb
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QQVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
# the color palette is actually an image, this allows you to use image ops to create palettes
|
||||
# the image must have 256 entries i.e. 256x1, 64x4, 16x16 and have the format rgb565
|
||||
|
||||
# Initialise palette source colors into an image
|
||||
palette_source_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 0, 255)]
|
||||
palette_source_color_image = image.Image(len(palette_source_colors), 1, sensor.RGB565)
|
||||
for i, color in enumerate(palette_source_colors):
|
||||
palette_source_color_image[i] = color
|
||||
|
||||
# Scale the image to palette width and smooth them
|
||||
palette = image.Image(256,1, sensor.RGB565)
|
||||
palette.draw_image(palette_source_color_image, 0, 0, x_scale=palette.width() / palette_source_color_image.width())
|
||||
palette.mean(int(palette.width() / palette_source_color_image.width()/2))
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
# Get a copy of grayscale image before converting to color
|
||||
img_copy = img.copy()
|
||||
|
||||
img.to_rgb565()
|
||||
|
||||
palette_boundary_inset = int(sensor.width() / 40)
|
||||
palette_scale_x = (sensor.width() - palette_boundary_inset * 2) / palette.width()
|
||||
|
||||
img.draw_image(img_copy, 0, 0, color_palette=palette)
|
||||
img.draw_image(palette, palette_boundary_inset, palette_boundary_inset, x_scale=palette_scale_x, y_scale=8)
|
||||
img.draw_rectangle(palette_boundary_inset, palette_boundary_inset, int(palette.width()*palette_scale_x), 8, color=(255,255,255), thickness=1)
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,31 @@
|
||||
# Keypoints Drawing
|
||||
#
|
||||
# This example shows off drawing keypoints on the OpenMV Cam. Usually you call draw_keypoints()
|
||||
# on a keypoints object but you can also call it on a list of 3-value tuples...
|
||||
|
||||
import sensor, image, time, pyb
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
for i in range(20):
|
||||
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
|
||||
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
|
||||
rot = pyb.rng() % 360
|
||||
|
||||
r = (pyb.rng() % 127) + 128
|
||||
g = (pyb.rng() % 127) + 128
|
||||
b = (pyb.rng() % 127) + 128
|
||||
|
||||
# This method draws a keypoints object or a list of (x, y, rot) tuples...
|
||||
img.draw_keypoints([(x, y, rot)], color = (r, g, b), size = 20, thickness = 2, fill = False)
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,31 @@
|
||||
# Line Drawing
|
||||
#
|
||||
# This example shows off drawing lines on the OpenMV Cam.
|
||||
|
||||
import sensor, image, time, pyb
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
for i in range(10):
|
||||
x0 = (pyb.rng() % (2*img.width())) - (img.width()//2)
|
||||
y0 = (pyb.rng() % (2*img.height())) - (img.height()//2)
|
||||
x1 = (pyb.rng() % (2*img.width())) - (img.width()//2)
|
||||
y1 = (pyb.rng() % (2*img.height())) - (img.height()//2)
|
||||
r = (pyb.rng() % 127) + 128
|
||||
g = (pyb.rng() % 127) + 128
|
||||
b = (pyb.rng() % 127) + 128
|
||||
|
||||
# If the first argument is a scaler then this method expects
|
||||
# to see x0, y0, x1, and y1. Otherwise, it expects a (x0,y0,x1,y1) tuple.
|
||||
img.draw_line(x0, y0, x1, y1, color = (r, g, b), thickness = 2)
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,31 @@
|
||||
# Rectangle Drawing
|
||||
#
|
||||
# This example shows off drawing rectangles on the OpenMV Cam.
|
||||
|
||||
import sensor, image, time, pyb
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
for i in range(10):
|
||||
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
|
||||
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
|
||||
w = (pyb.rng() % (img.width()//2))
|
||||
h = (pyb.rng() % (img.height()//2))
|
||||
r = (pyb.rng() % 127) + 128
|
||||
g = (pyb.rng() % 127) + 128
|
||||
b = (pyb.rng() % 127) + 128
|
||||
|
||||
# If the first argument is a scaler then this method expects
|
||||
# to see x, y, w, and h. Otherwise, it expects a (x,y,w,h) tuple.
|
||||
img.draw_rectangle(x, y, w, h, color = (r, g, b), thickness = 2, fill = False)
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,33 @@
|
||||
# Text Drawing
|
||||
#
|
||||
# This example shows off drawing text on the OpenMV Cam.
|
||||
|
||||
import sensor, image, time, pyb
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
for i in range(10):
|
||||
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
|
||||
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
|
||||
r = (pyb.rng() % 127) + 128
|
||||
g = (pyb.rng() % 127) + 128
|
||||
b = (pyb.rng() % 127) + 128
|
||||
|
||||
# If the first argument is a scaler then this method expects
|
||||
# to see x, y, and text. Otherwise, it expects a (x,y,text) tuple.
|
||||
|
||||
# Character and string rotation can be done at 0, 90, 180, 270, and etc. degrees.
|
||||
img.draw_string(x, y, "Hello World!", color = (r, g, b), scale = 2, mono_space = False,
|
||||
char_rotation = 0, char_hmirror = False, char_vflip = False,
|
||||
string_rotation = 0, string_hmirror = False, string_vflip = False)
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,29 @@
|
||||
# Adaptive Histogram Equalization
|
||||
#
|
||||
# This example shows off how to use adaptive histogram equalization to improve
|
||||
# the contrast in the image. Adaptive histogram equalization splits the image
|
||||
# into regions and then equalizes the histogram in those regions to improve
|
||||
# the image contrast versus a global histogram equalization. Additionally,
|
||||
# you may specify a clip limit to prevent the contrast from going wild.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QQVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
# A clip_limit of < 0 gives you normal adaptive histogram equalization
|
||||
# which may result in huge amounts of contrast noise...
|
||||
|
||||
# A clip_limit of 1 does nothing. For best results go slightly higher
|
||||
# than 1 like below. The higher you go the closer you get back to
|
||||
# standard adaptive histogram equalization with huge contrast swings.
|
||||
|
||||
img = sensor.snapshot().histeq(adaptive=True, clip_limit=3)
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,21 @@
|
||||
# Blur Filter Example
|
||||
#
|
||||
# This example shows off using the guassian filter to blur images.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# Run the kernel on every pixel of the image.
|
||||
img.gaussian(1)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
@ -0,0 +1,29 @@
|
||||
# Cartoon Filter
|
||||
#
|
||||
# This example shows off a simple cartoon filter on images. The cartoon
|
||||
# filter works by joining similar pixel areas of an image and replacing
|
||||
# the pixels in those areas with the area mean.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
|
||||
sensor.set_framesize(sensor.QVGA) # or QQVGA...
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
# seed_threshold controls the maximum area growth of a colored
|
||||
# region. Making this larger will merge more pixels.
|
||||
|
||||
# floating_threshold controls the maximum pixel-to-pixel difference
|
||||
# when growing a region. Settings this very high will quickly combine
|
||||
# all pixels in the image. You should keep this small.
|
||||
|
||||
# cartoon() will grow regions while both thresholds are statisfied...
|
||||
|
||||
img = sensor.snapshot().cartoon(seed_threshold=0.05, floating_thresholds=0.05)
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,33 @@
|
||||
# Color Bilteral Filter Example
|
||||
#
|
||||
# This example shows off using the bilateral filter on color images.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# color_sigma controls how close color wise pixels have to be to each other to be
|
||||
# blured togheter. A smaller value means they have to be closer.
|
||||
# A larger value is less strict.
|
||||
|
||||
# space_sigma controls how close space wise pixels have to be to each other to be
|
||||
# blured togheter. A smaller value means they have to be closer.
|
||||
# A larger value is less strict.
|
||||
|
||||
# Run the kernel on every pixel of the image.
|
||||
img.bilateral(3, color_sigma=0.1, space_sigma=1)
|
||||
|
||||
# Note that the bilateral filter can introduce image defects if you set
|
||||
# color_sigma/space_sigma to aggresively. Increase the sigma values until
|
||||
# the defects go away if you see them.
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
@ -0,0 +1,61 @@
|
||||
# Color Binary Filter Example
|
||||
#
|
||||
# This script shows off the binary image filter. You may pass binary any
|
||||
# number of thresholds to segment the image by.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
# Use the Tools -> Machine Vision -> Threshold Edtor to pick better thresholds.
|
||||
red_threshold = (0,100, 0,127, 0,127) # L A B
|
||||
green_threshold = (0,100, -128,0, 0,127) # L A B
|
||||
blue_threshold = (0,100, -128,127, -128,0) # L A B
|
||||
|
||||
while(True):
|
||||
|
||||
# Test red threshold
|
||||
for i in range(100):
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
img.binary([red_threshold])
|
||||
print(clock.fps())
|
||||
|
||||
# Test green threshold
|
||||
for i in range(100):
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
img.binary([green_threshold])
|
||||
print(clock.fps())
|
||||
|
||||
# Test blue threshold
|
||||
for i in range(100):
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
img.binary([blue_threshold])
|
||||
print(clock.fps())
|
||||
|
||||
# Test not red threshold
|
||||
for i in range(100):
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
img.binary([red_threshold], invert = 1)
|
||||
print(clock.fps())
|
||||
|
||||
# Test not green threshold
|
||||
for i in range(100):
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
img.binary([green_threshold], invert = 1)
|
||||
print(clock.fps())
|
||||
|
||||
# Test not blue threshold
|
||||
for i in range(100):
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
img.binary([blue_threshold], invert = 1)
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,25 @@
|
||||
# Color Light Removal
|
||||
#
|
||||
# This example shows off how to remove bright lights from the image.
|
||||
# You can do this using the binary() method with the "zero=" argument.
|
||||
#
|
||||
# Removing bright lights from the image allows you to now use
|
||||
# histeq() on the image without outliers from oversaturated
|
||||
# parts of the image breaking the algorithm...
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
thresholds = (90, 100, -128, 127, -128, 127)
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot().binary([thresholds], invert=False, zero=True)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
@ -0,0 +1,21 @@
|
||||
# Edge Filter Example
|
||||
#
|
||||
# This example shows off using the laplacian filter to detect edges.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# Run the kernel on every pixel of the image.
|
||||
img.laplacian(1)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
@ -0,0 +1,35 @@
|
||||
# Erode and Dilate Example
|
||||
#
|
||||
# This example shows off the erode and dilate functions which you can run on
|
||||
# a binary image to remove noise. This example was originally a test but its
|
||||
# useful for showing off how these functions work.
|
||||
|
||||
import pyb, sensor, image
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
|
||||
grayscale_thres = (170, 255)
|
||||
rgb565_thres = (70, 100, -128, 127, -128, 127)
|
||||
|
||||
while(True):
|
||||
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
for i in range(20):
|
||||
img = sensor.snapshot()
|
||||
img.binary([grayscale_thres])
|
||||
img.erode(2)
|
||||
for i in range(20):
|
||||
img = sensor.snapshot()
|
||||
img.binary([grayscale_thres])
|
||||
img.dilate(2)
|
||||
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
for i in range(20):
|
||||
img = sensor.snapshot()
|
||||
img.binary([rgb565_thres])
|
||||
img.erode(2)
|
||||
for i in range(20):
|
||||
img = sensor.snapshot()
|
||||
img.binary([rgb565_thres])
|
||||
img.dilate(2)
|
||||
@ -0,0 +1,21 @@
|
||||
# Gamma Correction
|
||||
#
|
||||
# This example shows off gamma correction to make the image brighter. The gamma
|
||||
# correction method can also fix contrast and brightness too.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
# Gamma, contrast, and brightness correction are applied to each color channel. The
|
||||
# values are scaled to the range per color channel per image type...
|
||||
img = sensor.snapshot().gamma_corr(gamma = 0.5, contrast = 1.0, brightness = 0.0)
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,33 @@
|
||||
# Grayscale Bilteral Filter Example
|
||||
#
|
||||
# This example shows off using the bilateral filter on grayscale images.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# color_sigma controls how close color wise pixels have to be to each other to be
|
||||
# blured togheter. A smaller value means they have to be closer.
|
||||
# A larger value is less strict.
|
||||
|
||||
# space_sigma controls how close space wise pixels have to be to each other to be
|
||||
# blured togheter. A smaller value means they have to be closer.
|
||||
# A larger value is less strict.
|
||||
|
||||
# Run the kernel on every pixel of the image.
|
||||
img.bilateral(3, color_sigma=0.1, space_sigma=1)
|
||||
|
||||
# Note that the bilateral filter can introduce image defects if you set
|
||||
# color_sigma/space_sigma to aggresively. Increase the sigma values until
|
||||
# the defects go away if you see them.
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
@ -0,0 +1,45 @@
|
||||
# Grayscale Binary Filter Example
|
||||
#
|
||||
# This script shows off the binary image filter. You may pass binary any
|
||||
# number of thresholds to segment the image by.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
low_threshold = (0, 50)
|
||||
high_threshold = (205, 255)
|
||||
|
||||
while(True):
|
||||
|
||||
# Test low threshold
|
||||
for i in range(100):
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
img.binary([low_threshold])
|
||||
print(clock.fps())
|
||||
|
||||
# Test high threshold
|
||||
for i in range(100):
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
img.binary([high_threshold])
|
||||
print(clock.fps())
|
||||
|
||||
# Test not low threshold
|
||||
for i in range(100):
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
img.binary([low_threshold], invert = 1)
|
||||
print(clock.fps())
|
||||
|
||||
# Test not high threshold
|
||||
for i in range(100):
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
img.binary([high_threshold], invert = 1)
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,25 @@
|
||||
# Grayscale Light Removal
|
||||
#
|
||||
# This example shows off how to remove bright lights from the image.
|
||||
# You can do this using the binary() method with the "zero=" argument.
|
||||
#
|
||||
# Removing bright lights from the image allows you to now use
|
||||
# histeq() on the image without outliers from oversaturated
|
||||
# parts of the image breaking the algorithm...
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
thresholds = (220, 255)
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot().binary([thresholds], invert=False, zero=True)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
@ -0,0 +1,19 @@
|
||||
# Histogram Equalization
|
||||
#
|
||||
# This example shows off how to use histogram equalization to improve
|
||||
# the contrast in the image.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QQVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot().histeq()
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,27 @@
|
||||
# Kernel Filtering Example
|
||||
#
|
||||
# This example shows off how to use a generic kernel filter.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
kernel_size = 1 # 3x3==1, 5x5==2, 7x7==3, etc.
|
||||
|
||||
kernel = [-2, -1, 0, \
|
||||
-1, 1, 1, \
|
||||
0, 1, 2]
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# Run the kernel on every pixel of the image.
|
||||
img.morph(kernel_size, kernel)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
@ -0,0 +1,21 @@
|
||||
# Lens Correction
|
||||
#
|
||||
# This example shows off how to use the lens correction method to fix lens
|
||||
# distortion in an image. You need to do this for qrcode / barcode / data matrix
|
||||
# detection. Increase the strength below until lines are straight in the view.
|
||||
# Zoom in (higher) or out (lower) until you see enough of the image.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot().lens_corr(strength = 1.8, zoom = 1.0)
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,21 @@
|
||||
# Linear Polar Mapping Example
|
||||
#
|
||||
# This example shows off re-projecting the image using a linear polar
|
||||
# transformation. Linear polar images are useful in that rotations
|
||||
# become translations in the X direction and linear changes
|
||||
# in scale become linear translations in the Y direction.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot().linpolar(reverse=False)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
@ -0,0 +1,21 @@
|
||||
# Log Polar Mapping Example
|
||||
#
|
||||
# This example shows off re-projecting the image using a log polar
|
||||
# transformation. Log polar images are useful in that rotations
|
||||
# become translations in the X direction and exponential changes
|
||||
# in scale (x2, x4, etc.) become linear translations in the Y direction.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot().logpolar(reverse=False)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
@ -0,0 +1,25 @@
|
||||
# Mean Adaptive Threshold Filter Example
|
||||
#
|
||||
# This example shows off mean filtering with adaptive thresholding.
|
||||
# When mean(threshold=True) the mean() method adaptive thresholds the image
|
||||
# by comparing the mean of the pixels around a pixel, minus an offset, with that pixel.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# The first argument is the kernel size. N coresponds to a ((N*2)+1)^2
|
||||
# kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You
|
||||
# shouldn't ever need to use a value bigger than 2.
|
||||
img.mean(1, threshold=True, offset=5, invert=True)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
@ -0,0 +1,25 @@
|
||||
# Mean Filter Example
|
||||
#
|
||||
# This example shows off mean filtering. Mean filtering is your standard average
|
||||
# filter in a NxN neighborhood. Mean filtering removes noise in the image by
|
||||
# bluring everything. But, it's the fastest kernel filter operation.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# The only argument is the kernel size. N coresponds to a ((N*2)+1)^2
|
||||
# kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You
|
||||
# shouldn't ever need to use a value bigger than 2.
|
||||
img.mean(1)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
@ -0,0 +1,27 @@
|
||||
# Median Adaptive Threshold Filter Example
|
||||
#
|
||||
# This example shows off median filtering with adaptive thresholding.
|
||||
# When median(threshold=True) the median() method adaptive thresholds the image
|
||||
# by comparing the median of the pixels around a pixel, minus an offset, with that pixel.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# The first argument to the median filter is the kernel size, it can be
|
||||
# either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. The second
|
||||
# argument "percentile" is the percentile number to choose from the NxN
|
||||
# neighborhood. 0.5 is the median, 0.25 is the lower quartile, and 0.75
|
||||
# would be the upper quartile.
|
||||
img.median(1, percentile=0.5, threshold=True, offset=5, invert=True)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
@ -0,0 +1,27 @@
|
||||
# Median Filter Example
|
||||
#
|
||||
# This example shows off median filtering. Median filtering replaces every pixel
|
||||
# with the median value of it's NxN neighborhood. Median filtering is good for
|
||||
# removing noise in the image while preserving edges.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# The first argument to the median filter is the kernel size, it can be
|
||||
# either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. The second
|
||||
# argument "percentile" is the percentile number to choose from the NxN
|
||||
# neighborhood. 0.5 is the median, 0.25 is the lower quartile, and 0.75
|
||||
# would be the upper quartile.
|
||||
img.median(1, percentile=0.5)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
@ -0,0 +1,28 @@
|
||||
# Midpoint Adaptive Threshold Filter Example
|
||||
#
|
||||
# This example shows off midpoint filtering with adaptive thresholding.
|
||||
# When midpoint(threshold=True) the midpoint() method adaptive thresholds the image
|
||||
# by comparing the midpoint of the pixels around a pixel, minus an offset, with that pixel.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# The first argument is the kernel size. N coresponds to a ((N*2)+1)^2
|
||||
# kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You
|
||||
# shouldn't ever need to use a value bigger than 2. The "bias" argument
|
||||
# lets you select between min and max blending. 0.5 == midpoint filter,
|
||||
# 0.0 == min filter, and 1.0 == max filter. Note that the min filter
|
||||
# makes images darker while the max filter makes images lighter.
|
||||
img.midpoint(1, bias=0.5, threshold=True, offset=5, invert=True)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
@ -0,0 +1,27 @@
|
||||
# Midpoint Filter Example
|
||||
#
|
||||
# This example shows off midpoint filtering. Midpoint filtering replaces each
|
||||
# pixel by the average of the min and max pixel values for a NxN neighborhood.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# The first argument is the kernel size. N coresponds to a ((N*2)+1)^2
|
||||
# kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You
|
||||
# shouldn't ever need to use a value bigger than 2. The "bias" argument
|
||||
# lets you select between min and max blending. 0.5 == midpoint filter,
|
||||
# 0.0 == min filter, and 1.0 == max filter. Note that the min filter
|
||||
# makes images darker while the max filter makes images lighter.
|
||||
img.midpoint(1, bias=0.5)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
@ -0,0 +1,25 @@
|
||||
# Mode Adaptive Threshold Filter Example
|
||||
#
|
||||
# This example shows off mode filtering with adaptive thresholding.
|
||||
# When mode(threshold=True) the mode() method adaptive thresholds the image
|
||||
# by comparing the mode of the pixels around a pixel, minus an offset, with that pixel.
|
||||
# Avoid using the mode filter on RGB565 images. It will cause artifacts on image edges...
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# The only argument to the median filter is the kernel size, it can be
|
||||
# either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively.
|
||||
img.mode(1, threshold=True, offset=5, invert=True)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
@ -0,0 +1,25 @@
|
||||
# Mode Filter Example
|
||||
#
|
||||
# This example shows off mode filtering. Mode filtering is a highly non-linear
|
||||
# operation which replaces each pixel with the mode of the NxN neighborhood
|
||||
# of pixels around it. Avoid using the mode filter on RGB565 images. It will
|
||||
# cause artifacts on image edges...
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# The only argument to the median filter is the kernel size, it can be
|
||||
# either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively.
|
||||
img.mode(1)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
@ -0,0 +1,19 @@
|
||||
# Negative Example
|
||||
#
|
||||
# This example shows off negating the image. This is not a particularly
|
||||
# useful method but it can come in handy once in a while.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot().negate()
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
@ -0,0 +1,71 @@
|
||||
# Rotation Correction
|
||||
#
|
||||
# This example shows off how to use the rotation_corr() to both correct for
|
||||
# perspective distortion and then to rotate the new corrected image in 3D
|
||||
# space aftwards to handle movement.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
# The image will be warped such that the following points become the new:
|
||||
#
|
||||
# (0, 0)
|
||||
# (w-1, 0)
|
||||
# (w-1, h-1)
|
||||
# (0, h-1)
|
||||
#
|
||||
# Try setting the points below to the corners of a quadrilateral
|
||||
# (in clock-wise order) in the field-of-view. You can get points
|
||||
# on the image by clicking and dragging on the frame buffer and
|
||||
# recording the values shown in the histogram widget.
|
||||
|
||||
w = sensor.width()
|
||||
h = sensor.height()
|
||||
|
||||
TARGET_POINTS = [(0, 0), # (x, y) CHANGE ME!
|
||||
(w-1, 0), # (x, y) CHANGE ME!
|
||||
(w-1, h-1), # (x, y) CHANGE ME!
|
||||
(0, h-1)] # (x, y) CHANGE ME!
|
||||
|
||||
# Degrees per frame to rotation by...
|
||||
X_ROTATION_DEGREE_RATE = 5
|
||||
Y_ROTATION_DEGREE_RATE = 0.5
|
||||
Z_ROTATION_DEGREE_RATE = 0
|
||||
X_OFFSET = 0
|
||||
Y_OFFSET = 0
|
||||
|
||||
ZOOM_AMOUNT = 1 # Lower zooms out - Higher zooms in.
|
||||
FOV_WINDOW = 25 # Between 0 and 180. Represents the field-of-view of the scene
|
||||
# window when rotating the image in 3D space. When closer to
|
||||
# zero results in lines becoming straighter as the window
|
||||
# moves away from the image being rotated in 3D space. A large
|
||||
# value moves the window closer to the image in 3D space which
|
||||
# results in the more perspective distortion and sometimes
|
||||
# the image in 3D intersecting the scene window.
|
||||
|
||||
x_rotation_counter = 0
|
||||
y_rotation_counter = 0
|
||||
z_rotation_counter = 0
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot().rotation_corr(x_rotation = x_rotation_counter, \
|
||||
y_rotation = y_rotation_counter, \
|
||||
z_rotation = z_rotation_counter, \
|
||||
x_translation = X_OFFSET, \
|
||||
y_translation = Y_OFFSET, \
|
||||
zoom = ZOOM_AMOUNT, \
|
||||
fov = FOV_WINDOW, \
|
||||
corners = TARGET_POINTS)
|
||||
|
||||
x_rotation_counter += X_ROTATION_DEGREE_RATE
|
||||
y_rotation_counter += Y_ROTATION_DEGREE_RATE
|
||||
z_rotation_counter += Z_ROTATION_DEGREE_RATE
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,39 @@
|
||||
# Perspective Correction
|
||||
#
|
||||
# This example shows off how to use the rotation_corr() to fix perspective
|
||||
# issues related to how your OpenMV Cam is mounted.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
# The image will be warped such that the following points become the new:
|
||||
#
|
||||
# (0, 0)
|
||||
# (w-1, 0)
|
||||
# (w-1, h-1)
|
||||
# (0, h-1)
|
||||
#
|
||||
# Try setting the points below to the corners of a quadrilateral
|
||||
# (in clock-wise order) in the field-of-view. You can get points
|
||||
# on the image by clicking and dragging on the frame buffer and
|
||||
# recording the values shown in the histogram widget.
|
||||
|
||||
w = sensor.width()
|
||||
h = sensor.height()
|
||||
|
||||
TARGET_POINTS = [(0, 0), # (x, y) CHANGE ME!
|
||||
(w-1, 0), # (x, y) CHANGE ME!
|
||||
(w-1, h-1), # (x, y) CHANGE ME!
|
||||
(0, h-1)] # (x, y) CHANGE ME!
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot().rotation_corr(corners = TARGET_POINTS)
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,49 @@
|
||||
# Rotation Correction
|
||||
#
|
||||
# This example shows off how to use the rotation_corr() to play with the scene
|
||||
# window your OpenMV Cam sees.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
# Degrees per frame to rotation by...
|
||||
X_ROTATION_DEGREE_RATE = 5
|
||||
Y_ROTATION_DEGREE_RATE = 0.5
|
||||
Z_ROTATION_DEGREE_RATE = 0
|
||||
X_OFFSET = 0
|
||||
Y_OFFSET = 0
|
||||
|
||||
ZOOM_AMOUNT = 1 # Lower zooms out - Higher zooms in.
|
||||
FOV_WINDOW = 60 # Between 0 and 180. Represents the field-of-view of the scene
|
||||
# window when rotating the image in 3D space. When closer to
|
||||
# zero results in lines becoming straighter as the window
|
||||
# moves away from the image being rotated in 3D space. A large
|
||||
# value moves the window closer to the image in 3D space which
|
||||
# results in the more perspective distortion and sometimes
|
||||
# the image in 3D intersecting the scene window.
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
x_rotation_counter = 0
|
||||
y_rotation_counter = 0
|
||||
z_rotation_counter = 0
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot().rotation_corr(x_rotation = x_rotation_counter, \
|
||||
y_rotation = y_rotation_counter, \
|
||||
z_rotation = z_rotation_counter, \
|
||||
x_translation = X_OFFSET, \
|
||||
y_translation = Y_OFFSET, \
|
||||
zoom = ZOOM_AMOUNT, \
|
||||
fov = FOV_WINDOW)
|
||||
|
||||
x_rotation_counter += X_ROTATION_DEGREE_RATE
|
||||
y_rotation_counter += Y_ROTATION_DEGREE_RATE
|
||||
z_rotation_counter += Z_ROTATION_DEGREE_RATE
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,21 @@
|
||||
# Sharpen Filter Example
|
||||
#
|
||||
# This example shows off using the laplacian filter to sharpen images.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# Run the kernel on every pixel of the image.
|
||||
img.laplacian(1, sharpen=True)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
@ -0,0 +1,19 @@
|
||||
# Ulab is a numpy-like module for micropython, meant to simplify and speed up common
|
||||
# mathematical operations on arrays. This basic example shows mean/std on an image.
|
||||
#
|
||||
# NOTE: ndarrays cause the heap to be fragmented easily. If you run out of memory,
|
||||
# there's not much that can be done about it, lowering the resolution might help.
|
||||
|
||||
import sensor, image, time
|
||||
from ulab import numpy as np
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QQVGA) # Set frame size to QVGA (320x240)
|
||||
clock = time.clock() # Create a clock object to track the FPS.
|
||||
|
||||
while (True):
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
a = np.array(img, dtype=np.uint8)
|
||||
print("mean: %d std:%d"%(np.mean(a), np.std(a)))
|
||||
|
||||
@ -0,0 +1,21 @@
|
||||
# Unsharp Filter Example
|
||||
#
|
||||
# This example shows off using the guassian filter to unsharp mask filter images.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
|
||||
# Run the kernel on every pixel of the image.
|
||||
img.gaussian(1, unsharp=True)
|
||||
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
# connected to your computer. The FPS should increase once disconnected.
|
||||
@ -0,0 +1,33 @@
|
||||
# Vertical Flip - Horizontal Mirror - Transpose
|
||||
#
|
||||
# This example shows off how to vertically flip, horizontally mirror, or
|
||||
# transpose an image. Note that:
|
||||
#
|
||||
# vflip=False, hmirror=False, transpose=False -> 0 degree rotation
|
||||
# vflip=True, hmirror=False, transpose=True -> 90 degree rotation
|
||||
# vflip=True, hmirror=True, transpose=False -> 180 degree rotation
|
||||
# vflip=False, hmirror=True, transpose=True -> 270 degree rotation
|
||||
|
||||
import sensor, image, time, pyb
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
mills = pyb.millis()
|
||||
counter = 0
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot().replace(vflip=(counter//2)%2,
|
||||
hmirror=(counter//4)%2,
|
||||
transpose=(counter//8)%2)
|
||||
|
||||
if (pyb.millis() > (mills + 1000)):
|
||||
mills = pyb.millis()
|
||||
counter += 1
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,33 @@
|
||||
# Emboss Snapshot Example
|
||||
#
|
||||
# Note: You will need an SD card to run this example.
|
||||
#
|
||||
# You can use your OpenMV Cam to save modified image files.
|
||||
|
||||
import sensor, image, pyb
|
||||
|
||||
RED_LED_PIN = 1
|
||||
BLUE_LED_PIN = 3
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
|
||||
pyb.LED(RED_LED_PIN).on()
|
||||
sensor.skip_frames(time = 2000) # Give the user time to get ready.
|
||||
|
||||
pyb.LED(RED_LED_PIN).off()
|
||||
pyb.LED(BLUE_LED_PIN).on()
|
||||
|
||||
print("You're on camera!")
|
||||
img = sensor.snapshot()
|
||||
|
||||
img.morph(1, [+2, +1, +0,\
|
||||
+1, +1, -1,\
|
||||
+0, -1, -2]) # Emboss the image.
|
||||
|
||||
img.save("example.jpg") # or "example.bmp" (or others)
|
||||
|
||||
pyb.LED(BLUE_LED_PIN).off()
|
||||
print("Done! Reset the camera to see the saved image.")
|
||||
@ -0,0 +1,27 @@
|
||||
# Snapshot Example
|
||||
#
|
||||
# Note: You will need an SD card to run this example.
|
||||
#
|
||||
# You can use your OpenMV Cam to save image files.
|
||||
|
||||
import sensor, image, pyb
|
||||
|
||||
RED_LED_PIN = 1
|
||||
BLUE_LED_PIN = 3
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
|
||||
pyb.LED(RED_LED_PIN).on()
|
||||
sensor.skip_frames(time = 2000) # Give the user time to get ready.
|
||||
|
||||
pyb.LED(RED_LED_PIN).off()
|
||||
pyb.LED(BLUE_LED_PIN).on()
|
||||
|
||||
print("You're on camera!")
|
||||
sensor.snapshot().save("example.jpg") # or "example.bmp" (or others)
|
||||
|
||||
pyb.LED(BLUE_LED_PIN).off()
|
||||
print("Done! Reset the camera to see the saved image.")
|
||||
@ -0,0 +1,51 @@
|
||||
# Snapshot on Face Detection Example
|
||||
#
|
||||
# Note: You will need an SD card to run this example.
|
||||
#
|
||||
# This example demonstrates using face tracking on your OpenMV Cam to take a
|
||||
# picture.
|
||||
|
||||
import sensor, image, pyb
|
||||
|
||||
RED_LED_PIN = 1
|
||||
BLUE_LED_PIN = 3
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
sensor.set_framesize(sensor.HQVGA) # or sensor.QQVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
|
||||
# Load up a face detection HaarCascade. This is object that your OpenMV Cam
|
||||
# can use to detect faces using the find_features() method below. Your OpenMV
|
||||
# Cam has fontalface HaarCascade built-in. By default, all the stages of the
|
||||
# HaarCascade are loaded. However, You can adjust the number of stages to speed
|
||||
# up processing at the expense of accuracy. The frontalface HaarCascade has 25
|
||||
# stages.
|
||||
face_cascade = image.HaarCascade("frontalface", stages=25)
|
||||
|
||||
while(True):
|
||||
|
||||
pyb.LED(RED_LED_PIN).on()
|
||||
print("About to start detecting faces...")
|
||||
sensor.skip_frames(time = 2000) # Give the user time to get ready.
|
||||
|
||||
pyb.LED(RED_LED_PIN).off()
|
||||
print("Now detecting faces!")
|
||||
pyb.LED(BLUE_LED_PIN).on()
|
||||
|
||||
diff = 10 # We'll say we detected a face after 10 frames.
|
||||
while(diff):
|
||||
img = sensor.snapshot()
|
||||
# Threshold can be between 0.0 and 1.0. A higher threshold results in a
|
||||
# higher detection rate with more false positives. The scale value
|
||||
# controls the matching scale allowing you to detect smaller faces.
|
||||
faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5)
|
||||
|
||||
if faces:
|
||||
diff -= 1
|
||||
for r in faces:
|
||||
img.draw_rectangle(r)
|
||||
|
||||
pyb.LED(BLUE_LED_PIN).off()
|
||||
print("Face detected! Saving image...")
|
||||
sensor.snapshot().save("snapshot-%d.jpg" % pyb.rng()) # Save Pic.
|
||||
@ -0,0 +1,44 @@
|
||||
# Snapshot on Movement Example
|
||||
#
|
||||
# Note: You will need an SD card to run this example.
|
||||
#
|
||||
# This example demonstrates using frame differencing with your OpenMV Cam to do
|
||||
# motion detection. After motion is detected your OpenMV Cam will take picture.
|
||||
|
||||
import sensor, image, pyb, os
|
||||
|
||||
RED_LED_PIN = 1
|
||||
BLUE_LED_PIN = 3
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
|
||||
if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory
|
||||
|
||||
while(True):
|
||||
|
||||
pyb.LED(RED_LED_PIN).on()
|
||||
print("About to save background image...")
|
||||
sensor.skip_frames(time = 2000) # Give the user time to get ready.
|
||||
|
||||
pyb.LED(RED_LED_PIN).off()
|
||||
sensor.snapshot().save("temp/bg.bmp")
|
||||
print("Saved background image - Now detecting motion!")
|
||||
pyb.LED(BLUE_LED_PIN).on()
|
||||
|
||||
diff = 10 # We'll say we detected motion after 10 frames of motion.
|
||||
while(diff):
|
||||
img = sensor.snapshot()
|
||||
img.difference("temp/bg.bmp")
|
||||
stats = img.statistics()
|
||||
# Stats 5 is the max of the lighting color channel. The below code
|
||||
# triggers when the lighting max for the whole image goes above 20.
|
||||
# The lighting difference maximum should be zero normally.
|
||||
if (stats[5] > 20):
|
||||
diff -= 1
|
||||
|
||||
pyb.LED(BLUE_LED_PIN).off()
|
||||
print("Movement detected! Saving image...")
|
||||
sensor.snapshot().save("temp/snapshot-%d.jpg" % pyb.rng()) # Save Pic.
|
||||
@ -0,0 +1,67 @@
|
||||
# Time Lapse Photos (Credit nedhorning)
|
||||
#
|
||||
# This example shows off how to take time lapse photos using your OpenMV
|
||||
# Cam and using the RTC module along with a timer interrupt to achieve
|
||||
# very low power operation.
|
||||
#
|
||||
# Note that if the USB is still plugged in when the camera is taking
|
||||
# pictures it will run the bootloader each time. Please power the camera
|
||||
# from something other than USB to not have the bootloader run.
|
||||
|
||||
import pyb, machine, sensor, image, pyb, os
|
||||
|
||||
# Create and init RTC object. This will allow us to set the current time for
|
||||
# the RTC and let us set an interrupt to wake up later on.
|
||||
rtc = pyb.RTC()
|
||||
newFile = False
|
||||
|
||||
try:
|
||||
os.stat('time.txt')
|
||||
except OSError: # If the log file doesn't exist then set the RTC and set newFile to True
|
||||
# datetime format: year, month, day, weekday (Monday=1, Sunday=7),
|
||||
# hours (24 hour clock), minutes, seconds, subseconds (counds down from 255 to 0)
|
||||
rtc.datetime((2018, 3, 9, 5, 13, 0, 0, 0))
|
||||
newFile = True
|
||||
|
||||
# Extract the date and time from the RTC object.
|
||||
dateTime = rtc.datetime()
|
||||
year = str(dateTime[0])
|
||||
month = '%02d' % dateTime[1]
|
||||
day = '%02d' % dateTime[2]
|
||||
hour = '%02d' % dateTime[4]
|
||||
minute = '%02d' % dateTime[5]
|
||||
second = '%02d' % dateTime[6]
|
||||
subSecond = str(dateTime[7])
|
||||
|
||||
newName='I'+year+month+day+hour+minute+second # Image file name based on RTC
|
||||
|
||||
# Enable RTC interrupts every 10 seconds, camera will RESET after wakeup from deepsleep Mode.
|
||||
rtc.wakeup(10000)
|
||||
|
||||
BLUE_LED_PIN = 3
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
sensor.set_framesize(sensor.VGA)
|
||||
sensor.skip_frames(time = 1000) # Let new settings take affect.
|
||||
|
||||
# Let folks know we are about to take a picture.
|
||||
pyb.LED(BLUE_LED_PIN).on()
|
||||
|
||||
if(newFile): # If log file does not exist then create it.
|
||||
with open('time.txt', 'a') as timeFile: # Write text file to keep track of date, time and image number.
|
||||
timeFile.write('Date and time format: year, month, day, hours, minutes, seconds, subseconds' + '\n')
|
||||
timeFile.write(newName + ',' + year + ',' + month + ',' + day + ',' + hour + ',' + minute + ',' + second + ',' + subSecond + '\n')
|
||||
else:
|
||||
with open('time.txt', 'a') as timeFile: # Append to date, time and image number to text file.
|
||||
timeFile.write(newName + ',' + year + ',' + month + ',' + day + ',' + hour + ',' + minute + ',' + second + ',' + subSecond + '\n')
|
||||
|
||||
if not "images" in os.listdir(): os.mkdir("images") # Make a temp directory
|
||||
|
||||
# Take photo and save to SD card
|
||||
img = sensor.snapshot()
|
||||
img.save('images/' + newName, quality=90)
|
||||
pyb.LED(BLUE_LED_PIN).off()
|
||||
|
||||
# Enter Deepsleep Mode (i.e. the OpenMV Cam effectively turns itself off except for the RTC).
|
||||
machine.deepsleep()
|
||||
@ -0,0 +1,37 @@
|
||||
# GIF Video Recording Example
|
||||
#
|
||||
# Note: You will need an SD card to run this example.
|
||||
#
|
||||
# You can use your OpenMV Cam to record gif files. You can either feed the
|
||||
# recorder object RGB565 frames or Grayscale frames. Use photo editing software
|
||||
# like GIMP to compress and optimize the Gif before uploading it to the web.
|
||||
|
||||
import sensor, image, time, gif, pyb
|
||||
|
||||
RED_LED_PIN = 1
|
||||
BLUE_LED_PIN = 3
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
pyb.LED(RED_LED_PIN).on()
|
||||
sensor.skip_frames(time = 2000) # Give the user time to get ready.
|
||||
|
||||
pyb.LED(RED_LED_PIN).off()
|
||||
pyb.LED(BLUE_LED_PIN).on()
|
||||
|
||||
g = gif.Gif("example.gif", loop=True)
|
||||
|
||||
print("You're on camera!")
|
||||
for i in range(100):
|
||||
clock.tick()
|
||||
# clock.avg() returns the milliseconds between frames - gif delay is in
|
||||
g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds.
|
||||
print(clock.fps())
|
||||
|
||||
g.close()
|
||||
pyb.LED(BLUE_LED_PIN).off()
|
||||
print("Done! Reset the camera to see the saved recording.")
|
||||
@ -0,0 +1,65 @@
|
||||
# GIF Video Recording on Face Detection Example
|
||||
#
|
||||
# Note: You will need an SD card to run this example.
|
||||
#
|
||||
# You can use your OpenMV Cam to record gif files. You can either feed the
|
||||
# recorder object RGB565 frames or Grayscale frames. Use photo editing software
|
||||
# like GIMP to compress and optimize the Gif before uploading it to the web.
|
||||
#
|
||||
# This example demonstrates using face tracking on your OpenMV Cam to take a
|
||||
# gif.
|
||||
|
||||
import sensor, image, time, gif, pyb
|
||||
|
||||
RED_LED_PIN = 1
|
||||
BLUE_LED_PIN = 3
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.HQVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
|
||||
# Load up a face detection HaarCascade. This is object that your OpenMV Cam
|
||||
# can use to detect faces using the find_features() method below. Your OpenMV
|
||||
# Cam has fontalface HaarCascade built-in. By default, all the stages of the
|
||||
# HaarCascade are loaded. However, You can adjust the number of stages to speed
|
||||
# up processing at the expense of accuracy. The frontalface HaarCascade has 25
|
||||
# stages.
|
||||
face_cascade = image.HaarCascade("frontalface", stages=25)
|
||||
|
||||
while(True):
|
||||
|
||||
pyb.LED(RED_LED_PIN).on()
|
||||
print("About to start detecting faces...")
|
||||
sensor.skip_frames(time = 2000) # Give the user time to get ready.
|
||||
|
||||
pyb.LED(RED_LED_PIN).off()
|
||||
print("Now detecting faces!")
|
||||
pyb.LED(BLUE_LED_PIN).on()
|
||||
|
||||
diff = 10 # We'll say we detected a face after 10 frames.
|
||||
while(diff):
|
||||
img = sensor.snapshot()
|
||||
# Threshold can be between 0.0 and 1.0. A higher threshold results in a
|
||||
# higher detection rate with more false positives. The scale value
|
||||
# controls the matching scale allowing you to detect smaller faces.
|
||||
faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5)
|
||||
|
||||
if faces:
|
||||
diff -= 1
|
||||
for r in faces:
|
||||
img.draw_rectangle(r)
|
||||
|
||||
g = gif.Gif("example-%d.gif" % pyb.rng(), loop=True)
|
||||
|
||||
clock = time.clock() # Tracks FPS.
|
||||
print("You're on camera!")
|
||||
for i in range(100):
|
||||
clock.tick()
|
||||
# clock.avg() returns the milliseconds between frames - gif delay is in
|
||||
g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds.
|
||||
print(clock.fps())
|
||||
|
||||
g.close()
|
||||
pyb.LED(BLUE_LED_PIN).off()
|
||||
print("Restarting...")
|
||||
@ -0,0 +1,58 @@
|
||||
# GIF Video Recording on Movement Example
|
||||
#
|
||||
# Note: You will need an SD card to run this example.
|
||||
#
|
||||
# You can use your OpenMV Cam to record gif files. You can either feed the
|
||||
# recorder object RGB565 frames or Grayscale frames. Use photo editing software
|
||||
# like GIMP to compress and optimize the Gif before uploading it to the web.
|
||||
#
|
||||
# This example demonstrates using frame differencing with your OpenMV Cam to do
|
||||
# motion detection. After motion is detected your OpenMV Cam will take video.
|
||||
|
||||
import sensor, image, time, gif, pyb, os
|
||||
|
||||
RED_LED_PIN = 1
|
||||
BLUE_LED_PIN = 3
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
|
||||
if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory
|
||||
|
||||
while(True):
|
||||
|
||||
pyb.LED(RED_LED_PIN).on()
|
||||
print("About to save background image...")
|
||||
sensor.skip_frames(time = 2000) # Give the user time to get ready.
|
||||
|
||||
pyb.LED(RED_LED_PIN).off()
|
||||
sensor.snapshot().save("temp/bg.bmp")
|
||||
print("Saved background image - Now detecting motion!")
|
||||
pyb.LED(BLUE_LED_PIN).on()
|
||||
|
||||
diff = 10 # We'll say we detected motion after 10 frames of motion.
|
||||
while(diff):
|
||||
img = sensor.snapshot()
|
||||
img.difference("temp/bg.bmp")
|
||||
stats = img.statistics()
|
||||
# Stats 5 is the max of the lighting color channel. The below code
|
||||
# triggers when the lighting max for the whole image goes above 20.
|
||||
# The lighting difference maximum should be zero normally.
|
||||
if (stats[5] > 20):
|
||||
diff -= 1
|
||||
|
||||
g = gif.Gif("example-%d.gif" % pyb.rng(), loop=True)
|
||||
|
||||
clock = time.clock() # Tracks FPS.
|
||||
print("You're on camera!")
|
||||
for i in range(100):
|
||||
clock.tick()
|
||||
# clock.avg() returns the milliseconds between frames - gif delay is in
|
||||
g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds.
|
||||
print(clock.fps())
|
||||
|
||||
g.close()
|
||||
pyb.LED(BLUE_LED_PIN).off()
|
||||
print("Restarting...")
|
||||
@ -0,0 +1,33 @@
|
||||
# Image Memory Stream I/O Example
|
||||
#
|
||||
# This example shows how to use the ImageIO stream to record frames in memory and play them back.
|
||||
# Note: While this should work on any board, the board should have an SDRAM to be of any use.
|
||||
import sensor, image, time
|
||||
|
||||
# Number of frames to pre-allocate and record
|
||||
N_FRAMES = 500
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
|
||||
# This frame size must match the image size passed to ImageIO
|
||||
sensor.set_windowing((120, 120))
|
||||
sensor.skip_frames(time = 2000)
|
||||
|
||||
clock = time.clock()
|
||||
|
||||
# Write to memory stream
|
||||
stream = image.ImageIO((120, 120, sensor.RGB565), N_FRAMES)
|
||||
|
||||
for i in range(0, N_FRAMES):
|
||||
clock.tick()
|
||||
stream.write(sensor.snapshot())
|
||||
print(clock.fps())
|
||||
|
||||
while (True):
|
||||
# Rewind stream and play back
|
||||
stream.seek(0)
|
||||
for i in range(0, N_FRAMES):
|
||||
img = stream.read(copy_to_fb=True, pause=True)
|
||||
# Do machine vision algorithms on the image here.
|
||||
@ -0,0 +1,32 @@
|
||||
# Image Reader Example
|
||||
#
|
||||
# USE THIS EXAMPLE WITH A USD CARD!
|
||||
#
|
||||
# This example shows how to use the Image Reader object to replay snapshots of what your
|
||||
# OpenMV Cam saw saved by the Image Writer object for testing machine vision algorithms.
|
||||
|
||||
# Altered to allow full speed reading from SD card for extraction of sequences to the network etc.
|
||||
# Set the new pause parameter to false
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
snapshot_source = False # Set to true once finished to pull data from sensor.
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QQVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
stream = None
|
||||
if snapshot_source == False:
|
||||
stream = image.ImageIO("/stream.bin", "r")
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
if snapshot_source:
|
||||
img = sensor.snapshot()
|
||||
else:
|
||||
img = stream.read(copy_to_fb=True, loop=True, pause=True)
|
||||
# Do machine vision algorithms on the image here.
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,36 @@
|
||||
# Image Writer Example
|
||||
#
|
||||
# USE THIS EXAMPLE WITH A USD CARD! Reset the camera after recording to see the file.
|
||||
#
|
||||
# This example shows how to use the Image Writer object to record snapshots of what your
|
||||
# OpenMV Cam sees for later analysis using the Image Reader object. Images written to disk
|
||||
# by the Image Writer object are stored in a simple file format readable by your OpenMV Cam.
|
||||
|
||||
import sensor, image, pyb, time
|
||||
|
||||
record_time = 10000 # 10 seconds in milliseconds
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565)
|
||||
sensor.set_framesize(sensor.QQVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
stream = image.ImageIO("/stream.bin", "w")
|
||||
|
||||
# Red LED on means we are capturing frames.
|
||||
pyb.LED(1).on()
|
||||
|
||||
start = pyb.millis()
|
||||
while pyb.elapsed_millis(start) < record_time:
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
# Modify the image if you feel like here...
|
||||
stream.write(img)
|
||||
print(clock.fps())
|
||||
|
||||
stream.close()
|
||||
|
||||
# Blue LED on means we are done.
|
||||
pyb.LED(1).off()
|
||||
pyb.LED(3).on()
|
||||
@ -0,0 +1,37 @@
|
||||
# MJPEG Video Recording Example
|
||||
#
|
||||
# Note: You will need an SD card to run this demo.
|
||||
#
|
||||
# You can use your OpenMV Cam to record mjpeg files. You can either feed the
|
||||
# recorder object JPEG frames or RGB565/Grayscale frames. Once you've finished
|
||||
# recording a Mjpeg file you can use VLC to play it. If you are on Ubuntu then
|
||||
# the built-in video player will work too.
|
||||
|
||||
import sensor, image, time, mjpeg, pyb
|
||||
|
||||
RED_LED_PIN = 1
|
||||
BLUE_LED_PIN = 3
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
clock = time.clock() # Tracks FPS.
|
||||
|
||||
pyb.LED(RED_LED_PIN).on()
|
||||
sensor.skip_frames(time = 2000) # Give the user time to get ready.
|
||||
|
||||
pyb.LED(RED_LED_PIN).off()
|
||||
pyb.LED(BLUE_LED_PIN).on()
|
||||
|
||||
m = mjpeg.Mjpeg("example.mjpeg")
|
||||
|
||||
print("You're on camera!")
|
||||
for i in range(200):
|
||||
clock.tick()
|
||||
m.add_frame(sensor.snapshot())
|
||||
print(clock.fps())
|
||||
|
||||
m.close(clock.fps())
|
||||
pyb.LED(BLUE_LED_PIN).off()
|
||||
print("Done! Reset the camera to see the saved recording.")
|
||||
@ -0,0 +1,65 @@
|
||||
# MJPEG Video Recording on Face Detection Example
|
||||
#
|
||||
# Note: You will need an SD card to run this example.
|
||||
#
|
||||
# You can use your OpenMV Cam to record mjpeg files. You can either feed the
|
||||
# recorder object JPEG frames or RGB565/Grayscale frames. Once you've finished
|
||||
# recording a Mjpeg file you can use VLC to play it. If you are on Ubuntu then
|
||||
# the built-in video player will work too.
|
||||
#
|
||||
# This example demonstrates using face tracking on your OpenMV Cam to take a
|
||||
# mjpeg.
|
||||
|
||||
import sensor, image, time, mjpeg, pyb
|
||||
|
||||
RED_LED_PIN = 1
|
||||
BLUE_LED_PIN = 3
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.HQVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
|
||||
# Load up a face detection HaarCascade. This is object that your OpenMV Cam
|
||||
# can use to detect faces using the find_features() method below. Your OpenMV
|
||||
# Cam has fontalface HaarCascade built-in. By default, all the stages of the
|
||||
# HaarCascade are loaded. However, You can adjust the number of stages to speed
|
||||
# up processing at the expense of accuracy. The frontalface HaarCascade has 25
|
||||
# stages.
|
||||
face_cascade = image.HaarCascade("frontalface", stages=25)
|
||||
|
||||
while(True):
|
||||
|
||||
pyb.LED(RED_LED_PIN).on()
|
||||
print("About to start detecting faces...")
|
||||
sensor.skip_frames(time = 2000) # Give the user time to get ready.
|
||||
|
||||
pyb.LED(RED_LED_PIN).off()
|
||||
print("Now detecting faces!")
|
||||
pyb.LED(BLUE_LED_PIN).on()
|
||||
|
||||
diff = 10 # We'll say we detected a face after 10 frames.
|
||||
while(diff):
|
||||
img = sensor.snapshot()
|
||||
# Threshold can be between 0.0 and 1.0. A higher threshold results in a
|
||||
# higher detection rate with more false positives. The scale value
|
||||
# controls the matching scale allowing you to detect smaller faces.
|
||||
faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5)
|
||||
|
||||
if faces:
|
||||
diff -= 1
|
||||
for r in faces:
|
||||
img.draw_rectangle(r)
|
||||
|
||||
m = mjpeg.Mjpeg("example-%d.mjpeg" % pyb.rng())
|
||||
|
||||
clock = time.clock() # Tracks FPS.
|
||||
print("You're on camera!")
|
||||
for i in range(200):
|
||||
clock.tick()
|
||||
m.add_frame(sensor.snapshot())
|
||||
print(clock.fps())
|
||||
|
||||
m.close(clock.fps())
|
||||
pyb.LED(BLUE_LED_PIN).off()
|
||||
print("Restarting...")
|
||||
@ -0,0 +1,58 @@
|
||||
# MJPEG Video Recording on Movement Example
|
||||
#
|
||||
# Note: You will need an SD card to run this example.
|
||||
#
|
||||
# You can use your OpenMV Cam to record mjpeg files. You can either feed the
|
||||
# recorder object JPEG frames or RGB565/Grayscale frames. Once you've finished
|
||||
# recording a Mjpeg file you can use VLC to play it. If you are on Ubuntu then
|
||||
# the built-in video player will work too.
|
||||
#
|
||||
# This example demonstrates using frame differencing with your OpenMV Cam to do
|
||||
# motion detection. After motion is detected your OpenMV Cam will take video.
|
||||
|
||||
import sensor, image, time, mjpeg, pyb, os
|
||||
|
||||
RED_LED_PIN = 1
|
||||
BLUE_LED_PIN = 3
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
|
||||
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
|
||||
if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory
|
||||
|
||||
while(True):
|
||||
|
||||
pyb.LED(RED_LED_PIN).on()
|
||||
print("About to save background image...")
|
||||
sensor.skip_frames(time = 2000) # Give the user time to get ready.
|
||||
|
||||
pyb.LED(RED_LED_PIN).off()
|
||||
sensor.snapshot().save("temp/bg.bmp")
|
||||
print("Saved background image - Now detecting motion!")
|
||||
pyb.LED(BLUE_LED_PIN).on()
|
||||
|
||||
diff = 10 # We'll say we detected motion after 10 frames of motion.
|
||||
while(diff):
|
||||
img = sensor.snapshot()
|
||||
img.difference("temp/bg.bmp")
|
||||
stats = img.statistics()
|
||||
# Stats 5 is the max of the lighting color channel. The below code
|
||||
# triggers when the lighting max for the whole image goes above 20.
|
||||
# The lighting difference maximum should be zero normally.
|
||||
if (stats[5] > 20):
|
||||
diff -= 1
|
||||
|
||||
m = mjpeg.Mjpeg("example-%d.mjpeg" % pyb.rng())
|
||||
|
||||
clock = time.clock() # Tracks FPS.
|
||||
print("You're on camera!")
|
||||
for i in range(200):
|
||||
clock.tick()
|
||||
m.add_frame(sensor.snapshot())
|
||||
print(clock.fps())
|
||||
|
||||
m.close(clock.fps())
|
||||
pyb.LED(BLUE_LED_PIN).off()
|
||||
print("Restarting...")
|
||||
@ -0,0 +1,46 @@
|
||||
# Face Detection Example
|
||||
#
|
||||
# This example shows off the built-in face detection feature of the OpenMV Cam.
|
||||
#
|
||||
# Face detection works by using the Haar Cascade feature detector on an image. A
|
||||
# Haar Cascade is a series of simple area contrasts checks. For the built-in
|
||||
# frontalface detector there are 25 stages of checks with each stage having
|
||||
# hundreds of checks a piece. Haar Cascades run fast because later stages are
|
||||
# only evaluated if previous stages pass. Additionally, your OpenMV Cam uses
|
||||
# a data structure called the integral image to quickly execute each area
|
||||
# contrast check in constant time (the reason for feature detection being
|
||||
# grayscale only is because of the space requirment for the integral image).
|
||||
|
||||
import sensor, time, image
|
||||
|
||||
# Reset sensor
|
||||
sensor.reset()
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
|
||||
# Load Haar Cascade
|
||||
# By default this will use all stages, lower satges is faster but less accurate.
|
||||
face_cascade = image.HaarCascade("frontalface", stages=25)
|
||||
print(face_cascade)
|
||||
|
||||
# FPS clock
|
||||
clock = time.clock()
|
||||
|
||||
while (True):
|
||||
clock.tick()
|
||||
|
||||
# Capture snapshot
|
||||
img = sensor.snapshot()
|
||||
|
||||
# Find objects.
|
||||
# Note: Lower scale factor scales-down the image more and detects smaller objects.
|
||||
# Higher threshold results in a higher detection rate, with more false positives.
|
||||
objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25)
|
||||
|
||||
# Draw objects
|
||||
for r in objects:
|
||||
img.draw_rectangle(r)
|
||||
|
||||
# Print FPS.
|
||||
# Note: Actual FPS is higher, streaming the FB makes it slower.
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,27 @@
|
||||
# Face recognition with LBP descriptors.
|
||||
# See Timo Ahonen's "Face Recognition with Local Binary Patterns".
|
||||
#
|
||||
# Before running the example:
|
||||
# 1) Download the AT&T faces database http://www.cl.cam.ac.uk/Research/DTG/attarchive/pub/data/att_faces.zip
|
||||
# 2) Exract and copy the orl_faces directory to the SD card root.
|
||||
#
|
||||
# NOTE: This is just a PoC implementation of the paper mentioned above, it does Not work well in real life conditions.
|
||||
|
||||
import sensor, time, image
|
||||
|
||||
SUB = "s2"
|
||||
NUM_SUBJECTS = 5
|
||||
NUM_SUBJECTS_IMGS = 10
|
||||
|
||||
img = image.Image("orl_faces/%s/1.pgm"%(SUB)).mask_ellipse()
|
||||
d0 = img.find_lbp((0, 0, img.width(), img.height()))
|
||||
img = None
|
||||
|
||||
print("")
|
||||
for s in range(1, NUM_SUBJECTS+1):
|
||||
dist = 0
|
||||
for i in range(2, NUM_SUBJECTS_IMGS+1):
|
||||
img = image.Image("orl_faces/s%d/%d.pgm"%(s, i)).mask_ellipse()
|
||||
d1 = img.find_lbp((0, 0, img.width(), img.height()))
|
||||
dist += image.match_descriptor(d0, d1)
|
||||
print("Average dist for subject %d: %d"%(s, dist/NUM_SUBJECTS_IMGS))
|
||||
@ -0,0 +1,63 @@
|
||||
# Face Tracking Example
|
||||
#
|
||||
# This example shows off using the keypoints feature of your OpenMV Cam to track
|
||||
# a face after it has been detected by a Haar Cascade. The first part of this
|
||||
# script finds a face in the image using the frontalface Haar Cascade.
|
||||
# After which the script uses the keypoints feature to automatically learn your
|
||||
# face and track it. Keypoints can be used to automatically track anything.
|
||||
import sensor, time, image
|
||||
|
||||
# Reset sensor
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
|
||||
# Load Haar Cascade
|
||||
# By default this will use all stages, lower satges is faster but less accurate.
|
||||
face_cascade = image.HaarCascade("frontalface", stages=25)
|
||||
print(face_cascade)
|
||||
|
||||
# First set of keypoints
|
||||
kpts1 = None
|
||||
|
||||
# Find a face!
|
||||
while (kpts1 == None):
|
||||
img = sensor.snapshot()
|
||||
img.draw_string(0, 0, "Looking for a face...")
|
||||
# Find faces
|
||||
objects = img.find_features(face_cascade, threshold=0.5, scale=1.25)
|
||||
if objects:
|
||||
# Expand the ROI by 31 pixels in every direction
|
||||
face = (objects[0][0]-31, objects[0][1]-31,objects[0][2]+31*2, objects[0][3]+31*2)
|
||||
# Extract keypoints using the detect face size as the ROI
|
||||
kpts1 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, roi=face)
|
||||
# Draw a rectangle around the first face
|
||||
img.draw_rectangle(objects[0])
|
||||
|
||||
# Draw keypoints
|
||||
print(kpts1)
|
||||
img.draw_keypoints(kpts1, size=24)
|
||||
img = sensor.snapshot()
|
||||
time.sleep_ms(2000)
|
||||
|
||||
# FPS clock
|
||||
clock = time.clock()
|
||||
|
||||
while (True):
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
# Extract keypoints from the whole frame
|
||||
kpts2 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, normalized=True)
|
||||
|
||||
if (kpts2):
|
||||
# Match the first set of keypoints with the second one
|
||||
c=image.match_descriptor(kpts1, kpts2, threshold=85)
|
||||
match = c[6] # C[6] contains the number of matches.
|
||||
if (match>5):
|
||||
img.draw_rectangle(c[2:6])
|
||||
img.draw_cross(c[0], c[1], size=10)
|
||||
print(kpts2, "matched:%d dt:%d"%(match, c[7]))
|
||||
|
||||
# Draw FPS
|
||||
img.draw_string(0, 0, "FPS:%.2f"%(clock.fps()))
|
||||
@ -0,0 +1,19 @@
|
||||
# Edge detection with Canny:
|
||||
#
|
||||
# This example demonstrates the Canny edge detector.
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset() # Initialize the camera sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
|
||||
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
|
||||
sensor.skip_frames(time = 2000) # Let new settings take affect.
|
||||
|
||||
clock = time.clock() # Tracks FPS.
|
||||
while(True):
|
||||
clock.tick() # Track elapsed milliseconds between snapshots().
|
||||
img = sensor.snapshot() # Take a picture and return the image.
|
||||
# Use Canny edge detector
|
||||
img.find_edges(image.EDGE_CANNY, threshold=(50, 80))
|
||||
# Faster simpler edge detection
|
||||
#img.find_edges(image.EDGE_SIMPLE, threshold=(100, 255))
|
||||
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
|
||||
@ -0,0 +1,39 @@
|
||||
# Find Circles Example
|
||||
#
|
||||
# This example shows off how to find circles in the image using the Hough
|
||||
# Transform. https://en.wikipedia.org/wiki/Circle_Hough_Transform
|
||||
#
|
||||
# Note that the find_circles() method will only find circles which are completely
|
||||
# inside of the image. Circles which go outside of the image/roi are ignored...
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # grayscale is faster
|
||||
sensor.set_framesize(sensor.QQVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
img = sensor.snapshot().lens_corr(1.8)
|
||||
|
||||
# Circle objects have four values: x, y, r (radius), and magnitude. The
|
||||
# magnitude is the strength of the detection of the circle. Higher is
|
||||
# better...
|
||||
|
||||
# `threshold` controls how many circles are found. Increase its value
|
||||
# to decrease the number of circles detected...
|
||||
|
||||
# `x_margin`, `y_margin`, and `r_margin` control the merging of similar
|
||||
# circles in the x, y, and r (radius) directions.
|
||||
|
||||
# r_min, r_max, and r_step control what radiuses of circles are tested.
|
||||
# Shrinking the number of tested circle radiuses yields a big performance boost.
|
||||
|
||||
for c in img.find_circles(threshold = 2000, x_margin = 10, y_margin = 10, r_margin = 10,
|
||||
r_min = 2, r_max = 100, r_step = 2):
|
||||
img.draw_circle(c.x(), c.y(), c.r(), color = (255, 0, 0))
|
||||
print(c)
|
||||
|
||||
print("FPS %f" % clock.fps())
|
||||
@ -0,0 +1,39 @@
|
||||
# Find Line Segments Example
|
||||
#
|
||||
# This example shows off how to find line segments in the image. For each line object
|
||||
# found in the image a line object is returned which includes the line's rotation.
|
||||
|
||||
# find_line_segments() finds finite length lines (but is slow).
|
||||
# Use find_line_segments() to find non-infinite lines (and is fast).
|
||||
|
||||
enable_lens_corr = False # turn on for straighter lines...
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # grayscale is faster
|
||||
sensor.set_framesize(sensor.QQVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
# All lines also have `x1()`, `y1()`, `x2()`, and `y2()` methods to get their end-points
|
||||
# and a `line()` method to get all the above as one 4 value tuple for `draw_line()`.
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
if enable_lens_corr: img.lens_corr(1.8) # for 2.8mm lens...
|
||||
|
||||
# `merge_distance` controls the merging of nearby lines. At 0 (the default), no
|
||||
# merging is done. At 1, any line 1 pixel away from another is merged... and so
|
||||
# on as you increase this value. You may wish to merge lines as line segment
|
||||
# detection produces a lot of line segment results.
|
||||
|
||||
# `max_theta_diff` controls the maximum amount of rotation difference between
|
||||
# any two lines about to be merged. The default setting allows for 15 degrees.
|
||||
|
||||
for l in img.find_line_segments(merge_distance = 0, max_theta_diff = 5):
|
||||
img.draw_line(l.line(), color = (255, 0, 0))
|
||||
# print(l)
|
||||
|
||||
print("FPS %f" % clock.fps())
|
||||
@ -0,0 +1,57 @@
|
||||
# Find Lines Example
|
||||
#
|
||||
# This example shows off how to find lines in the image. For each line object
|
||||
# found in the image a line object is returned which includes the line's rotation.
|
||||
|
||||
# Note: Line detection is done by using the Hough Transform:
|
||||
# http://en.wikipedia.org/wiki/Hough_transform
|
||||
# Please read about it above for more information on what `theta` and `rho` are.
|
||||
|
||||
# find_lines() finds infinite length lines. Use find_line_segments() to find non-infinite lines.
|
||||
|
||||
enable_lens_corr = False # turn on for straighter lines...
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # grayscale is faster
|
||||
sensor.set_framesize(sensor.QQVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
# All line objects have a `theta()` method to get their rotation angle in degrees.
|
||||
# You can filter lines based on their rotation angle.
|
||||
|
||||
min_degree = 0
|
||||
max_degree = 179
|
||||
|
||||
# All lines also have `x1()`, `y1()`, `x2()`, and `y2()` methods to get their end-points
|
||||
# and a `line()` method to get all the above as one 4 value tuple for `draw_line()`.
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
if enable_lens_corr: img.lens_corr(1.8) # for 2.8mm lens...
|
||||
|
||||
# `threshold` controls how many lines in the image are found. Only lines with
|
||||
# edge difference magnitude sums greater than `threshold` are detected...
|
||||
|
||||
# More about `threshold` - each pixel in the image contributes a magnitude value
|
||||
# to a line. The sum of all contributions is the magintude for that line. Then
|
||||
# when lines are merged their magnitudes are added togheter. Note that `threshold`
|
||||
# filters out lines with low magnitudes before merging. To see the magnitude of
|
||||
# un-merged lines set `theta_margin` and `rho_margin` to 0...
|
||||
|
||||
# `theta_margin` and `rho_margin` control merging similar lines. If two lines
|
||||
# theta and rho value differences are less than the margins then they are merged.
|
||||
|
||||
for l in img.find_lines(threshold = 1000, theta_margin = 25, rho_margin = 25):
|
||||
if (min_degree <= l.theta()) and (l.theta() <= max_degree):
|
||||
img.draw_line(l.line(), color = (255, 0, 0))
|
||||
# print(l)
|
||||
|
||||
print("FPS %f" % clock.fps())
|
||||
|
||||
# About negative rho values:
|
||||
#
|
||||
# A [theta+0:-rho] tuple is the same as [theta+180:+rho].
|
||||
@ -0,0 +1,31 @@
|
||||
# Find Rects Example
|
||||
#
|
||||
# This example shows off how to find rectangles in the image using the quad threshold
|
||||
# detection code from our April Tags code. The quad threshold detection algorithm
|
||||
# detects rectangles in an extremely robust way and is much better than Hough
|
||||
# Transform based methods. For example, it can still detect rectangles even when lens
|
||||
# distortion causes those rectangles to look bent. Rounded rectangles are no problem!
|
||||
# (But, given this the code will also detect small radius circles too)...
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.RGB565) # grayscale is faster (160x120 max on OpenMV-M7)
|
||||
sensor.set_framesize(sensor.QQVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
|
||||
# `threshold` below should be set to a high enough value to filter out noise
|
||||
# rectangles detected in the image which have low edge magnitudes. Rectangles
|
||||
# have larger edge magnitudes the larger and more contrasty they are...
|
||||
|
||||
for r in img.find_rects(threshold = 10000):
|
||||
img.draw_rectangle(r.rect(), color = (255, 0, 0))
|
||||
for p in r.corners(): img.draw_circle(p[0], p[1], 5, color = (0, 255, 0))
|
||||
print(r)
|
||||
|
||||
print("FPS %f" % clock.fps())
|
||||
@ -0,0 +1,25 @@
|
||||
# Histogram of Oriented Gradients (HoG) Example
|
||||
#
|
||||
# This example demonstrates HoG visualization.
|
||||
#
|
||||
# Note: Due to JPEG artifacts, the HoG visualization looks blurry. To see the
|
||||
# image without JPEG artifacts, uncomment the lines that save the image to uSD.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
sensor.skip_frames(time = 2000)
|
||||
|
||||
clock = time.clock() # Tracks FPS.
|
||||
while (True):
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
img.find_hog()
|
||||
|
||||
# Uncomment to save raw FB to file and exit the loop
|
||||
#img.save("/hog.pgm")
|
||||
#break
|
||||
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,51 @@
|
||||
# Object tracking with keypoints example.
|
||||
# Show the camera an object and then run the script. A set of keypoints will be extracted
|
||||
# once and then tracked in the following frames. If you want a new set of keypoints re-run
|
||||
# the script. NOTE: see the docs for arguments to tune find_keypoints and match_keypoints.
|
||||
import sensor, time, image
|
||||
|
||||
# Reset sensor
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
|
||||
def draw_keypoints(img, kpts):
|
||||
if kpts:
|
||||
print(kpts)
|
||||
img.draw_keypoints(kpts)
|
||||
img = sensor.snapshot()
|
||||
time.sleep_ms(1000)
|
||||
|
||||
kpts1 = None
|
||||
# NOTE: uncomment to load a keypoints descriptor from file
|
||||
#kpts1 = image.load_descriptor("/desc.orb")
|
||||
#img = sensor.snapshot()
|
||||
#draw_keypoints(img, kpts1)
|
||||
|
||||
clock = time.clock()
|
||||
while (True):
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
if (kpts1 == None):
|
||||
# NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid.
|
||||
kpts1 = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2)
|
||||
draw_keypoints(img, kpts1)
|
||||
else:
|
||||
# NOTE: When extracting keypoints to match the first descriptor, we use normalized=True to extract
|
||||
# keypoints from the first scale only, which will match one of the scales in the first descriptor.
|
||||
kpts2 = img.find_keypoints(max_keypoints=150, threshold=10, normalized=True)
|
||||
if (kpts2):
|
||||
match = image.match_descriptor(kpts1, kpts2, threshold=85)
|
||||
if (match.count()>10):
|
||||
# If we have at least n "good matches"
|
||||
# Draw bounding rectangle and cross.
|
||||
img.draw_rectangle(match.rect())
|
||||
img.draw_cross(match.cx(), match.cy(), size=10)
|
||||
|
||||
print(kpts2, "matched:%d dt:%d"%(match.count(), match.theta()))
|
||||
# NOTE: uncomment if you want to draw the keypoints
|
||||
#img.draw_keypoints(kpts2, size=KEYPOINTS_SIZE, matched=True)
|
||||
|
||||
# Draw FPS
|
||||
img.draw_string(0, 0, "FPS:%.2f"%(clock.fps()))
|
||||
@ -0,0 +1,30 @@
|
||||
# Keypoints descriptor example.
|
||||
# This example shows how to save a keypoints descriptor to file. Show the camera an object
|
||||
# and then run the script. The script will extract and save a keypoints descriptor and the image.
|
||||
# You can use the keypoints_editor.py util to remove unwanted keypoints.
|
||||
#
|
||||
# NOTE: Please reset the camera after running this script to see the new file.
|
||||
import sensor, time, image
|
||||
|
||||
# Reset sensor
|
||||
sensor.reset()
|
||||
sensor.set_framesize(sensor.QVGA)
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
sensor.skip_frames(time = 2000)
|
||||
|
||||
FILE_NAME = "desc"
|
||||
img = sensor.snapshot()
|
||||
# NOTE: See the docs for other arguments
|
||||
# NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid.
|
||||
kpts = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2)
|
||||
|
||||
if (kpts == None):
|
||||
raise(Exception("Couldn't find any keypoints!"))
|
||||
|
||||
image.save_descriptor(kpts, "/%s.orb"%(FILE_NAME))
|
||||
img.save("/%s.pgm"%(FILE_NAME))
|
||||
|
||||
img.draw_keypoints(kpts)
|
||||
sensor.snapshot()
|
||||
time.sleep_ms(1000)
|
||||
raise(Exception("Done! Please reset the camera"))
|
||||
@ -0,0 +1,49 @@
|
||||
# Local Binary Patterns (LBP) Example
|
||||
#
|
||||
# This example shows off how to use the local binary pattern feature descriptor
|
||||
# on your OpenMV Cam. LBP descriptors work like Freak feature descriptors.
|
||||
#
|
||||
# WARNING: LBP supports needs to be reworked! As of right now this feature needs
|
||||
# a lot of work to be made into somethin useful. This script will reamin to show
|
||||
# that the functionality exists, but, in its current state is inadequate.
|
||||
|
||||
import sensor, time, image
|
||||
sensor.reset()
|
||||
|
||||
# Reset sensor
|
||||
sensor.reset()
|
||||
sensor.set_framesize(sensor.HQVGA)
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
|
||||
# Load Haar Cascade
|
||||
# By default this will use all stages, lower satges is faster but less accurate.
|
||||
face_cascade = image.HaarCascade("frontalface", stages=25)
|
||||
print(face_cascade)
|
||||
|
||||
# Skip a few frames to allow the sensor settle down
|
||||
# Note: This takes more time when exec from the IDE.
|
||||
for i in range(0, 30):
|
||||
img = sensor.snapshot()
|
||||
img.draw_string(0, 0, "Please wait...")
|
||||
|
||||
d0 = None
|
||||
#d0 = image.load_descriptor("/desc.lbp")
|
||||
clock = time.clock()
|
||||
|
||||
while (True):
|
||||
clock.tick()
|
||||
img = sensor.snapshot()
|
||||
|
||||
objects = img.find_features(face_cascade, threshold=0.5, scale_factor=1.25)
|
||||
if objects:
|
||||
face = objects[0]
|
||||
d1 = img.find_lbp(face)
|
||||
if (d0 == None):
|
||||
d0 = d1
|
||||
else:
|
||||
dist = image.match_descriptor(d0, d1)
|
||||
img.draw_string(0, 10, "Match %d%%"%(dist))
|
||||
|
||||
img.draw_rectangle(face)
|
||||
# Draw FPS
|
||||
img.draw_string(0, 0, "FPS:%.2f"%(clock.fps()))
|
||||
@ -0,0 +1,43 @@
|
||||
# Fast Linear Regression Example
|
||||
#
|
||||
# This example shows off how to use the get_regression() method on your OpenMV Cam
|
||||
# to get the linear regression of a ROI. Using this method you can easily build
|
||||
# a robot which can track lines which all point in the same general direction
|
||||
# but are not actually connected. Use find_blobs() on lines that are nicely
|
||||
# connected for better filtering options and control.
|
||||
#
|
||||
# This is called the fast linear regression because we use the least-squares
|
||||
# method to fit the line. However, this method is NOT GOOD FOR ANY images that
|
||||
# have a lot (or really any) outlier points which corrupt the line fit...
|
||||
|
||||
THRESHOLD = (0, 100) # Grayscale threshold for dark things...
|
||||
BINARY_VISIBLE = True # Does binary first so you can see what the linear regression
|
||||
# is being run on... might lower FPS though.
|
||||
|
||||
import sensor, image, time
|
||||
|
||||
sensor.reset()
|
||||
sensor.set_pixformat(sensor.GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QQVGA)
|
||||
sensor.skip_frames(time = 2000)
|
||||
clock = time.clock()
|
||||
|
||||
while(True):
|
||||
clock.tick()
|
||||
img = sensor.snapshot().binary([THRESHOLD]) if BINARY_VISIBLE else sensor.snapshot()
|
||||
|
||||
# Returns a line object similar to line objects returned by find_lines() and
|
||||
# find_line_segments(). You have x1(), y1(), x2(), y2(), length(),
|
||||
# theta() (rotation in degrees), rho(), and magnitude().
|
||||
#
|
||||
# magnitude() represents how well the linear regression worked. It goes from
|
||||
# (0, INF] where 0 is returned for a circle. The more linear the
|
||||
# scene is the higher the magnitude.
|
||||
line = img.get_regression([(255,255) if BINARY_VISIBLE else THRESHOLD])
|
||||
|
||||
if (line): img.draw_line(line.line(), color = 127)
|
||||
print("FPS %f, mag = %s" % (clock.fps(), str(line.magnitude()) if (line) else "N/A"))
|
||||
|
||||
# About negative rho values:
|
||||
#
|
||||
# A [theta+0:-rho] tuple is the same as [theta+180:+rho].
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user