diff --git a/scripts/examples/00-Arduino/arduino_i2c_slave.py b/scripts/examples/Arduino/00-Arduino/arduino_i2c_slave.py similarity index 100% rename from scripts/examples/00-Arduino/arduino_i2c_slave.py rename to scripts/examples/Arduino/00-Arduino/arduino_i2c_slave.py diff --git a/scripts/examples/00-Arduino/arduino_spi_slave.py b/scripts/examples/Arduino/00-Arduino/arduino_spi_slave.py similarity index 100% rename from scripts/examples/00-Arduino/arduino_spi_slave.py rename to scripts/examples/Arduino/00-Arduino/arduino_spi_slave.py diff --git a/scripts/examples/00-Arduino/arduino_uart.py b/scripts/examples/Arduino/00-Arduino/arduino_uart.py similarity index 100% rename from scripts/examples/00-Arduino/arduino_uart.py rename to scripts/examples/Arduino/00-Arduino/arduino_uart.py diff --git a/scripts/examples/01-Basics/helloworld.py b/scripts/examples/Arduino/01-Basics/helloworld.py similarity index 100% rename from scripts/examples/01-Basics/helloworld.py rename to scripts/examples/Arduino/01-Basics/helloworld.py diff --git a/scripts/examples/01-Basics/main.py b/scripts/examples/Arduino/01-Basics/main.py similarity index 100% rename from scripts/examples/01-Basics/main.py rename to scripts/examples/Arduino/01-Basics/main.py diff --git a/scripts/examples/02-Board-Control/adc_read.py b/scripts/examples/Arduino/02-Board-Control/adc_read.py similarity index 100% rename from scripts/examples/02-Board-Control/adc_read.py rename to scripts/examples/Arduino/02-Board-Control/adc_read.py diff --git a/scripts/examples/02-Board-Control/adc_read_int_channel.py b/scripts/examples/Arduino/02-Board-Control/adc_read_int_channel.py similarity index 100% rename from scripts/examples/02-Board-Control/adc_read_int_channel.py rename to scripts/examples/Arduino/02-Board-Control/adc_read_int_channel.py diff --git a/scripts/examples/02-Board-Control/can.py b/scripts/examples/Arduino/02-Board-Control/can.py similarity index 100% rename from scripts/examples/02-Board-Control/can.py rename to scripts/examples/Arduino/02-Board-Control/can.py diff --git a/scripts/examples/02-Board-Control/cpufreq_scaling.py b/scripts/examples/Arduino/02-Board-Control/cpufreq_scaling.py similarity index 100% rename from scripts/examples/02-Board-Control/cpufreq_scaling.py rename to scripts/examples/Arduino/02-Board-Control/cpufreq_scaling.py diff --git a/scripts/examples/02-Board-Control/dac_write.py b/scripts/examples/Arduino/02-Board-Control/dac_write.py similarity index 100% rename from scripts/examples/02-Board-Control/dac_write.py rename to scripts/examples/Arduino/02-Board-Control/dac_write.py diff --git a/scripts/examples/02-Board-Control/dac_write_timed.py b/scripts/examples/Arduino/02-Board-Control/dac_write_timed.py similarity index 100% rename from scripts/examples/02-Board-Control/dac_write_timed.py rename to scripts/examples/Arduino/02-Board-Control/dac_write_timed.py diff --git a/scripts/examples/02-Board-Control/i2c_control.py b/scripts/examples/Arduino/02-Board-Control/i2c_control.py similarity index 100% rename from scripts/examples/02-Board-Control/i2c_control.py rename to scripts/examples/Arduino/02-Board-Control/i2c_control.py diff --git a/scripts/examples/02-Board-Control/led_control.py b/scripts/examples/Arduino/02-Board-Control/led_control.py similarity index 100% rename from scripts/examples/02-Board-Control/led_control.py rename to scripts/examples/Arduino/02-Board-Control/led_control.py diff --git a/scripts/examples/02-Board-Control/native_emitters.py b/scripts/examples/Arduino/02-Board-Control/native_emitters.py similarity index 100% rename from scripts/examples/02-Board-Control/native_emitters.py rename to scripts/examples/Arduino/02-Board-Control/native_emitters.py diff --git a/scripts/examples/02-Board-Control/pin_control.py b/scripts/examples/Arduino/02-Board-Control/pin_control.py similarity index 100% rename from scripts/examples/02-Board-Control/pin_control.py rename to scripts/examples/Arduino/02-Board-Control/pin_control.py diff --git a/scripts/examples/02-Board-Control/pwm_control.py b/scripts/examples/Arduino/02-Board-Control/pwm_control.py similarity index 100% rename from scripts/examples/02-Board-Control/pwm_control.py rename to scripts/examples/Arduino/02-Board-Control/pwm_control.py diff --git a/scripts/examples/02-Board-Control/rtc.py b/scripts/examples/Arduino/02-Board-Control/rtc.py similarity index 100% rename from scripts/examples/02-Board-Control/rtc.py rename to scripts/examples/Arduino/02-Board-Control/rtc.py diff --git a/scripts/examples/02-Board-Control/servo_control.py b/scripts/examples/Arduino/02-Board-Control/servo_control.py similarity index 100% rename from scripts/examples/02-Board-Control/servo_control.py rename to scripts/examples/Arduino/02-Board-Control/servo_control.py diff --git a/scripts/examples/02-Board-Control/spi_control.py b/scripts/examples/Arduino/02-Board-Control/spi_control.py similarity index 100% rename from scripts/examples/02-Board-Control/spi_control.py rename to scripts/examples/Arduino/02-Board-Control/spi_control.py diff --git a/scripts/examples/02-Board-Control/timer_control.py b/scripts/examples/Arduino/02-Board-Control/timer_control.py similarity index 100% rename from scripts/examples/02-Board-Control/timer_control.py rename to scripts/examples/Arduino/02-Board-Control/timer_control.py diff --git a/scripts/examples/02-Board-Control/timer_tests.py b/scripts/examples/Arduino/02-Board-Control/timer_tests.py similarity index 100% rename from scripts/examples/02-Board-Control/timer_tests.py rename to scripts/examples/Arduino/02-Board-Control/timer_tests.py diff --git a/scripts/examples/02-Board-Control/uart_control.py b/scripts/examples/Arduino/02-Board-Control/uart_control.py similarity index 100% rename from scripts/examples/02-Board-Control/uart_control.py rename to scripts/examples/Arduino/02-Board-Control/uart_control.py diff --git a/scripts/examples/02-Board-Control/usb_hid.py b/scripts/examples/Arduino/02-Board-Control/usb_hid.py similarity index 100% rename from scripts/examples/02-Board-Control/usb_hid.py rename to scripts/examples/Arduino/02-Board-Control/usb_hid.py diff --git a/scripts/examples/02-Board-Control/usb_vcp.py b/scripts/examples/Arduino/02-Board-Control/usb_vcp.py similarity index 100% rename from scripts/examples/02-Board-Control/usb_vcp.py rename to scripts/examples/Arduino/02-Board-Control/usb_vcp.py diff --git a/scripts/examples/02-Board-Control/vsync_gpio_output.py b/scripts/examples/Arduino/02-Board-Control/vsync_gpio_output.py similarity index 100% rename from scripts/examples/02-Board-Control/vsync_gpio_output.py rename to scripts/examples/Arduino/02-Board-Control/vsync_gpio_output.py diff --git a/scripts/examples/03-Drawing/arrow_drawing.py b/scripts/examples/Arduino/03-Drawing/arrow_drawing.py similarity index 100% rename from scripts/examples/03-Drawing/arrow_drawing.py rename to scripts/examples/Arduino/03-Drawing/arrow_drawing.py diff --git a/scripts/examples/03-Drawing/circle_drawing.py b/scripts/examples/Arduino/03-Drawing/circle_drawing.py similarity index 100% rename from scripts/examples/03-Drawing/circle_drawing.py rename to scripts/examples/Arduino/03-Drawing/circle_drawing.py diff --git a/scripts/examples/03-Drawing/copy2fb.py b/scripts/examples/Arduino/03-Drawing/copy2fb.py similarity index 100% rename from scripts/examples/03-Drawing/copy2fb.py rename to scripts/examples/Arduino/03-Drawing/copy2fb.py diff --git a/scripts/examples/03-Drawing/cross_drawing.py b/scripts/examples/Arduino/03-Drawing/cross_drawing.py similarity index 100% rename from scripts/examples/03-Drawing/cross_drawing.py rename to scripts/examples/Arduino/03-Drawing/cross_drawing.py diff --git a/scripts/examples/03-Drawing/ellipse_drawing.py b/scripts/examples/Arduino/03-Drawing/ellipse_drawing.py similarity index 100% rename from scripts/examples/03-Drawing/ellipse_drawing.py rename to scripts/examples/Arduino/03-Drawing/ellipse_drawing.py diff --git a/scripts/examples/03-Drawing/flood_fill.py b/scripts/examples/Arduino/03-Drawing/flood_fill.py similarity index 100% rename from scripts/examples/03-Drawing/flood_fill.py rename to scripts/examples/Arduino/03-Drawing/flood_fill.py diff --git a/scripts/examples/03-Drawing/image_drawing.py b/scripts/examples/Arduino/03-Drawing/image_drawing.py similarity index 100% rename from scripts/examples/03-Drawing/image_drawing.py rename to scripts/examples/Arduino/03-Drawing/image_drawing.py diff --git a/scripts/examples/03-Drawing/image_drawing_advanced.py b/scripts/examples/Arduino/03-Drawing/image_drawing_advanced.py similarity index 100% rename from scripts/examples/03-Drawing/image_drawing_advanced.py rename to scripts/examples/Arduino/03-Drawing/image_drawing_advanced.py diff --git a/scripts/examples/03-Drawing/image_drawing_alpha_blending_test.py b/scripts/examples/Arduino/03-Drawing/image_drawing_alpha_blending_test.py similarity index 100% rename from scripts/examples/03-Drawing/image_drawing_alpha_blending_test.py rename to scripts/examples/Arduino/03-Drawing/image_drawing_alpha_blending_test.py diff --git a/scripts/examples/03-Drawing/image_drawing_alpha_blending_with_color_table_test.py b/scripts/examples/Arduino/03-Drawing/image_drawing_alpha_blending_with_color_table_test.py similarity index 100% rename from scripts/examples/03-Drawing/image_drawing_alpha_blending_with_color_table_test.py rename to scripts/examples/Arduino/03-Drawing/image_drawing_alpha_blending_with_color_table_test.py diff --git a/scripts/examples/03-Drawing/image_drawing_alpha_table_test.py b/scripts/examples/Arduino/03-Drawing/image_drawing_alpha_table_test.py similarity index 100% rename from scripts/examples/03-Drawing/image_drawing_alpha_table_test.py rename to scripts/examples/Arduino/03-Drawing/image_drawing_alpha_table_test.py diff --git a/scripts/examples/03-Drawing/image_drawing_alpha_table_with_color_table_test.py b/scripts/examples/Arduino/03-Drawing/image_drawing_alpha_table_with_color_table_test.py similarity index 100% rename from scripts/examples/03-Drawing/image_drawing_alpha_table_with_color_table_test.py rename to scripts/examples/Arduino/03-Drawing/image_drawing_alpha_table_with_color_table_test.py diff --git a/scripts/examples/03-Drawing/image_drawing_scale_down_test.py b/scripts/examples/Arduino/03-Drawing/image_drawing_scale_down_test.py similarity index 100% rename from scripts/examples/03-Drawing/image_drawing_scale_down_test.py rename to scripts/examples/Arduino/03-Drawing/image_drawing_scale_down_test.py diff --git a/scripts/examples/03-Drawing/image_drawing_scale_up_test.py b/scripts/examples/Arduino/03-Drawing/image_drawing_scale_up_test.py similarity index 100% rename from scripts/examples/03-Drawing/image_drawing_scale_up_test.py rename to scripts/examples/Arduino/03-Drawing/image_drawing_scale_up_test.py diff --git a/scripts/examples/03-Drawing/image_drawing_with_custom_palette.py b/scripts/examples/Arduino/03-Drawing/image_drawing_with_custom_palette.py similarity index 100% rename from scripts/examples/03-Drawing/image_drawing_with_custom_palette.py rename to scripts/examples/Arduino/03-Drawing/image_drawing_with_custom_palette.py diff --git a/scripts/examples/03-Drawing/keypoints_drawing.py b/scripts/examples/Arduino/03-Drawing/keypoints_drawing.py similarity index 100% rename from scripts/examples/03-Drawing/keypoints_drawing.py rename to scripts/examples/Arduino/03-Drawing/keypoints_drawing.py diff --git a/scripts/examples/03-Drawing/line_drawing.py b/scripts/examples/Arduino/03-Drawing/line_drawing.py similarity index 100% rename from scripts/examples/03-Drawing/line_drawing.py rename to scripts/examples/Arduino/03-Drawing/line_drawing.py diff --git a/scripts/examples/03-Drawing/rectangle_drawing.py b/scripts/examples/Arduino/03-Drawing/rectangle_drawing.py similarity index 100% rename from scripts/examples/03-Drawing/rectangle_drawing.py rename to scripts/examples/Arduino/03-Drawing/rectangle_drawing.py diff --git a/scripts/examples/03-Drawing/text_drawing.py b/scripts/examples/Arduino/03-Drawing/text_drawing.py similarity index 100% rename from scripts/examples/03-Drawing/text_drawing.py rename to scripts/examples/Arduino/03-Drawing/text_drawing.py diff --git a/scripts/examples/04-Image-Filters/adaptive_histogram_equalization.py b/scripts/examples/Arduino/04-Image-Filters/adaptive_histogram_equalization.py similarity index 100% rename from scripts/examples/04-Image-Filters/adaptive_histogram_equalization.py rename to scripts/examples/Arduino/04-Image-Filters/adaptive_histogram_equalization.py diff --git a/scripts/examples/04-Image-Filters/blur_filter.py b/scripts/examples/Arduino/04-Image-Filters/blur_filter.py similarity index 100% rename from scripts/examples/04-Image-Filters/blur_filter.py rename to scripts/examples/Arduino/04-Image-Filters/blur_filter.py diff --git a/scripts/examples/04-Image-Filters/cartoon_filter.py b/scripts/examples/Arduino/04-Image-Filters/cartoon_filter.py similarity index 100% rename from scripts/examples/04-Image-Filters/cartoon_filter.py rename to scripts/examples/Arduino/04-Image-Filters/cartoon_filter.py diff --git a/scripts/examples/04-Image-Filters/color_bilateral_filter.py b/scripts/examples/Arduino/04-Image-Filters/color_bilateral_filter.py similarity index 100% rename from scripts/examples/04-Image-Filters/color_bilateral_filter.py rename to scripts/examples/Arduino/04-Image-Filters/color_bilateral_filter.py diff --git a/scripts/examples/04-Image-Filters/color_binary_filter.py b/scripts/examples/Arduino/04-Image-Filters/color_binary_filter.py similarity index 100% rename from scripts/examples/04-Image-Filters/color_binary_filter.py rename to scripts/examples/Arduino/04-Image-Filters/color_binary_filter.py diff --git a/scripts/examples/04-Image-Filters/color_light_removal.py b/scripts/examples/Arduino/04-Image-Filters/color_light_removal.py similarity index 100% rename from scripts/examples/04-Image-Filters/color_light_removal.py rename to scripts/examples/Arduino/04-Image-Filters/color_light_removal.py diff --git a/scripts/examples/04-Image-Filters/edge_filter.py b/scripts/examples/Arduino/04-Image-Filters/edge_filter.py similarity index 100% rename from scripts/examples/04-Image-Filters/edge_filter.py rename to scripts/examples/Arduino/04-Image-Filters/edge_filter.py diff --git a/scripts/examples/04-Image-Filters/erode_and_dilate.py b/scripts/examples/Arduino/04-Image-Filters/erode_and_dilate.py similarity index 100% rename from scripts/examples/04-Image-Filters/erode_and_dilate.py rename to scripts/examples/Arduino/04-Image-Filters/erode_and_dilate.py diff --git a/scripts/examples/04-Image-Filters/gamma_correction.py b/scripts/examples/Arduino/04-Image-Filters/gamma_correction.py similarity index 100% rename from scripts/examples/04-Image-Filters/gamma_correction.py rename to scripts/examples/Arduino/04-Image-Filters/gamma_correction.py diff --git a/scripts/examples/04-Image-Filters/grayscale_bilateral_filter.py b/scripts/examples/Arduino/04-Image-Filters/grayscale_bilateral_filter.py similarity index 100% rename from scripts/examples/04-Image-Filters/grayscale_bilateral_filter.py rename to scripts/examples/Arduino/04-Image-Filters/grayscale_bilateral_filter.py diff --git a/scripts/examples/04-Image-Filters/grayscale_binary_filter.py b/scripts/examples/Arduino/04-Image-Filters/grayscale_binary_filter.py similarity index 100% rename from scripts/examples/04-Image-Filters/grayscale_binary_filter.py rename to scripts/examples/Arduino/04-Image-Filters/grayscale_binary_filter.py diff --git a/scripts/examples/04-Image-Filters/grayscale_light_removal.py b/scripts/examples/Arduino/04-Image-Filters/grayscale_light_removal.py similarity index 100% rename from scripts/examples/04-Image-Filters/grayscale_light_removal.py rename to scripts/examples/Arduino/04-Image-Filters/grayscale_light_removal.py diff --git a/scripts/examples/04-Image-Filters/histogram_equalization.py b/scripts/examples/Arduino/04-Image-Filters/histogram_equalization.py similarity index 100% rename from scripts/examples/04-Image-Filters/histogram_equalization.py rename to scripts/examples/Arduino/04-Image-Filters/histogram_equalization.py diff --git a/scripts/examples/04-Image-Filters/kernel_filters.py b/scripts/examples/Arduino/04-Image-Filters/kernel_filters.py similarity index 100% rename from scripts/examples/04-Image-Filters/kernel_filters.py rename to scripts/examples/Arduino/04-Image-Filters/kernel_filters.py diff --git a/scripts/examples/04-Image-Filters/lens_correction.py b/scripts/examples/Arduino/04-Image-Filters/lens_correction.py similarity index 100% rename from scripts/examples/04-Image-Filters/lens_correction.py rename to scripts/examples/Arduino/04-Image-Filters/lens_correction.py diff --git a/scripts/examples/04-Image-Filters/linear_polar.py b/scripts/examples/Arduino/04-Image-Filters/linear_polar.py similarity index 100% rename from scripts/examples/04-Image-Filters/linear_polar.py rename to scripts/examples/Arduino/04-Image-Filters/linear_polar.py diff --git a/scripts/examples/04-Image-Filters/log_polar.py b/scripts/examples/Arduino/04-Image-Filters/log_polar.py similarity index 100% rename from scripts/examples/04-Image-Filters/log_polar.py rename to scripts/examples/Arduino/04-Image-Filters/log_polar.py diff --git a/scripts/examples/04-Image-Filters/mean_adaptive_threshold_filter.py b/scripts/examples/Arduino/04-Image-Filters/mean_adaptive_threshold_filter.py similarity index 100% rename from scripts/examples/04-Image-Filters/mean_adaptive_threshold_filter.py rename to scripts/examples/Arduino/04-Image-Filters/mean_adaptive_threshold_filter.py diff --git a/scripts/examples/04-Image-Filters/mean_filter.py b/scripts/examples/Arduino/04-Image-Filters/mean_filter.py similarity index 100% rename from scripts/examples/04-Image-Filters/mean_filter.py rename to scripts/examples/Arduino/04-Image-Filters/mean_filter.py diff --git a/scripts/examples/04-Image-Filters/median_adaptive_threshold_filter.py b/scripts/examples/Arduino/04-Image-Filters/median_adaptive_threshold_filter.py similarity index 100% rename from scripts/examples/04-Image-Filters/median_adaptive_threshold_filter.py rename to scripts/examples/Arduino/04-Image-Filters/median_adaptive_threshold_filter.py diff --git a/scripts/examples/04-Image-Filters/median_filter.py b/scripts/examples/Arduino/04-Image-Filters/median_filter.py similarity index 100% rename from scripts/examples/04-Image-Filters/median_filter.py rename to scripts/examples/Arduino/04-Image-Filters/median_filter.py diff --git a/scripts/examples/04-Image-Filters/midpoint_adaptive_threshold_filter.py b/scripts/examples/Arduino/04-Image-Filters/midpoint_adaptive_threshold_filter.py similarity index 100% rename from scripts/examples/04-Image-Filters/midpoint_adaptive_threshold_filter.py rename to scripts/examples/Arduino/04-Image-Filters/midpoint_adaptive_threshold_filter.py diff --git a/scripts/examples/04-Image-Filters/midpoint_filter.py b/scripts/examples/Arduino/04-Image-Filters/midpoint_filter.py similarity index 100% rename from scripts/examples/04-Image-Filters/midpoint_filter.py rename to scripts/examples/Arduino/04-Image-Filters/midpoint_filter.py diff --git a/scripts/examples/04-Image-Filters/mode_adaptive_threshold_filter.py b/scripts/examples/Arduino/04-Image-Filters/mode_adaptive_threshold_filter.py similarity index 100% rename from scripts/examples/04-Image-Filters/mode_adaptive_threshold_filter.py rename to scripts/examples/Arduino/04-Image-Filters/mode_adaptive_threshold_filter.py diff --git a/scripts/examples/04-Image-Filters/mode_filter.py b/scripts/examples/Arduino/04-Image-Filters/mode_filter.py similarity index 100% rename from scripts/examples/04-Image-Filters/mode_filter.py rename to scripts/examples/Arduino/04-Image-Filters/mode_filter.py diff --git a/scripts/examples/04-Image-Filters/negative.py b/scripts/examples/Arduino/04-Image-Filters/negative.py similarity index 100% rename from scripts/examples/04-Image-Filters/negative.py rename to scripts/examples/Arduino/04-Image-Filters/negative.py diff --git a/scripts/examples/04-Image-Filters/perspective_and_rotation_correction.py b/scripts/examples/Arduino/04-Image-Filters/perspective_and_rotation_correction.py similarity index 100% rename from scripts/examples/04-Image-Filters/perspective_and_rotation_correction.py rename to scripts/examples/Arduino/04-Image-Filters/perspective_and_rotation_correction.py diff --git a/scripts/examples/04-Image-Filters/perspective_correction.py b/scripts/examples/Arduino/04-Image-Filters/perspective_correction.py similarity index 100% rename from scripts/examples/04-Image-Filters/perspective_correction.py rename to scripts/examples/Arduino/04-Image-Filters/perspective_correction.py diff --git a/scripts/examples/04-Image-Filters/rotation_correction.py b/scripts/examples/Arduino/04-Image-Filters/rotation_correction.py similarity index 100% rename from scripts/examples/04-Image-Filters/rotation_correction.py rename to scripts/examples/Arduino/04-Image-Filters/rotation_correction.py diff --git a/scripts/examples/04-Image-Filters/sharpen_filter.py b/scripts/examples/Arduino/04-Image-Filters/sharpen_filter.py similarity index 100% rename from scripts/examples/04-Image-Filters/sharpen_filter.py rename to scripts/examples/Arduino/04-Image-Filters/sharpen_filter.py diff --git a/scripts/examples/04-Image-Filters/ulab.py b/scripts/examples/Arduino/04-Image-Filters/ulab.py similarity index 100% rename from scripts/examples/04-Image-Filters/ulab.py rename to scripts/examples/Arduino/04-Image-Filters/ulab.py diff --git a/scripts/examples/04-Image-Filters/unsharp_filter.py b/scripts/examples/Arduino/04-Image-Filters/unsharp_filter.py similarity index 100% rename from scripts/examples/04-Image-Filters/unsharp_filter.py rename to scripts/examples/Arduino/04-Image-Filters/unsharp_filter.py diff --git a/scripts/examples/04-Image-Filters/vflip_hmirror_transpose.py b/scripts/examples/Arduino/04-Image-Filters/vflip_hmirror_transpose.py similarity index 100% rename from scripts/examples/04-Image-Filters/vflip_hmirror_transpose.py rename to scripts/examples/Arduino/04-Image-Filters/vflip_hmirror_transpose.py diff --git a/scripts/examples/05-Snapshot/emboss_snapshot.py b/scripts/examples/Arduino/05-Snapshot/emboss_snapshot.py similarity index 100% rename from scripts/examples/05-Snapshot/emboss_snapshot.py rename to scripts/examples/Arduino/05-Snapshot/emboss_snapshot.py diff --git a/scripts/examples/05-Snapshot/snapshot.py b/scripts/examples/Arduino/05-Snapshot/snapshot.py similarity index 100% rename from scripts/examples/05-Snapshot/snapshot.py rename to scripts/examples/Arduino/05-Snapshot/snapshot.py diff --git a/scripts/examples/05-Snapshot/snapshot_on_face_detection.py b/scripts/examples/Arduino/05-Snapshot/snapshot_on_face_detection.py similarity index 100% rename from scripts/examples/05-Snapshot/snapshot_on_face_detection.py rename to scripts/examples/Arduino/05-Snapshot/snapshot_on_face_detection.py diff --git a/scripts/examples/05-Snapshot/snapshot_on_movement.py b/scripts/examples/Arduino/05-Snapshot/snapshot_on_movement.py similarity index 100% rename from scripts/examples/05-Snapshot/snapshot_on_movement.py rename to scripts/examples/Arduino/05-Snapshot/snapshot_on_movement.py diff --git a/scripts/examples/05-Snapshot/time_lapse_photos.py b/scripts/examples/Arduino/05-Snapshot/time_lapse_photos.py similarity index 100% rename from scripts/examples/05-Snapshot/time_lapse_photos.py rename to scripts/examples/Arduino/05-Snapshot/time_lapse_photos.py diff --git a/scripts/examples/06-Video-Recording/gif.py b/scripts/examples/Arduino/06-Video-Recording/gif.py similarity index 100% rename from scripts/examples/06-Video-Recording/gif.py rename to scripts/examples/Arduino/06-Video-Recording/gif.py diff --git a/scripts/examples/06-Video-Recording/gif_on_face_detection.py b/scripts/examples/Arduino/06-Video-Recording/gif_on_face_detection.py similarity index 100% rename from scripts/examples/06-Video-Recording/gif_on_face_detection.py rename to scripts/examples/Arduino/06-Video-Recording/gif_on_face_detection.py diff --git a/scripts/examples/06-Video-Recording/gif_on_movement.py b/scripts/examples/Arduino/06-Video-Recording/gif_on_movement.py similarity index 100% rename from scripts/examples/06-Video-Recording/gif_on_movement.py rename to scripts/examples/Arduino/06-Video-Recording/gif_on_movement.py diff --git a/scripts/examples/06-Video-Recording/image_reader.py b/scripts/examples/Arduino/06-Video-Recording/image_reader.py similarity index 100% rename from scripts/examples/06-Video-Recording/image_reader.py rename to scripts/examples/Arduino/06-Video-Recording/image_reader.py diff --git a/scripts/examples/06-Video-Recording/image_writer.py b/scripts/examples/Arduino/06-Video-Recording/image_writer.py similarity index 100% rename from scripts/examples/06-Video-Recording/image_writer.py rename to scripts/examples/Arduino/06-Video-Recording/image_writer.py diff --git a/scripts/examples/06-Video-Recording/mjpeg.py b/scripts/examples/Arduino/06-Video-Recording/mjpeg.py similarity index 100% rename from scripts/examples/06-Video-Recording/mjpeg.py rename to scripts/examples/Arduino/06-Video-Recording/mjpeg.py diff --git a/scripts/examples/06-Video-Recording/mjpeg_on_face_detection.py b/scripts/examples/Arduino/06-Video-Recording/mjpeg_on_face_detection.py similarity index 100% rename from scripts/examples/06-Video-Recording/mjpeg_on_face_detection.py rename to scripts/examples/Arduino/06-Video-Recording/mjpeg_on_face_detection.py diff --git a/scripts/examples/06-Video-Recording/mjpeg_on_movement.py b/scripts/examples/Arduino/06-Video-Recording/mjpeg_on_movement.py similarity index 100% rename from scripts/examples/06-Video-Recording/mjpeg_on_movement.py rename to scripts/examples/Arduino/06-Video-Recording/mjpeg_on_movement.py diff --git a/scripts/examples/07-Face-Detection/face_detection.py b/scripts/examples/Arduino/07-Face-Detection/face_detection.py similarity index 100% rename from scripts/examples/07-Face-Detection/face_detection.py rename to scripts/examples/Arduino/07-Face-Detection/face_detection.py diff --git a/scripts/examples/07-Face-Detection/face_recognition.py b/scripts/examples/Arduino/07-Face-Detection/face_recognition.py similarity index 100% rename from scripts/examples/07-Face-Detection/face_recognition.py rename to scripts/examples/Arduino/07-Face-Detection/face_recognition.py diff --git a/scripts/examples/07-Face-Detection/face_tracking.py b/scripts/examples/Arduino/07-Face-Detection/face_tracking.py similarity index 100% rename from scripts/examples/07-Face-Detection/face_tracking.py rename to scripts/examples/Arduino/07-Face-Detection/face_tracking.py diff --git a/scripts/examples/08-Eye-Tracking/face_eye_detection.py b/scripts/examples/Arduino/08-Eye-Tracking/face_eye_detection.py similarity index 100% rename from scripts/examples/08-Eye-Tracking/face_eye_detection.py rename to scripts/examples/Arduino/08-Eye-Tracking/face_eye_detection.py diff --git a/scripts/examples/08-Eye-Tracking/iris_detection.py b/scripts/examples/Arduino/08-Eye-Tracking/iris_detection.py similarity index 100% rename from scripts/examples/08-Eye-Tracking/iris_detection.py rename to scripts/examples/Arduino/08-Eye-Tracking/iris_detection.py diff --git a/scripts/examples/09-Feature-Detection/edges.py b/scripts/examples/Arduino/09-Feature-Detection/edges.py similarity index 100% rename from scripts/examples/09-Feature-Detection/edges.py rename to scripts/examples/Arduino/09-Feature-Detection/edges.py diff --git a/scripts/examples/09-Feature-Detection/find_circles.py b/scripts/examples/Arduino/09-Feature-Detection/find_circles.py similarity index 100% rename from scripts/examples/09-Feature-Detection/find_circles.py rename to scripts/examples/Arduino/09-Feature-Detection/find_circles.py diff --git a/scripts/examples/09-Feature-Detection/find_line_segments.py b/scripts/examples/Arduino/09-Feature-Detection/find_line_segments.py similarity index 100% rename from scripts/examples/09-Feature-Detection/find_line_segments.py rename to scripts/examples/Arduino/09-Feature-Detection/find_line_segments.py diff --git a/scripts/examples/09-Feature-Detection/find_lines.py b/scripts/examples/Arduino/09-Feature-Detection/find_lines.py similarity index 100% rename from scripts/examples/09-Feature-Detection/find_lines.py rename to scripts/examples/Arduino/09-Feature-Detection/find_lines.py diff --git a/scripts/examples/09-Feature-Detection/find_rects.py b/scripts/examples/Arduino/09-Feature-Detection/find_rects.py similarity index 100% rename from scripts/examples/09-Feature-Detection/find_rects.py rename to scripts/examples/Arduino/09-Feature-Detection/find_rects.py diff --git a/scripts/examples/09-Feature-Detection/hog.py b/scripts/examples/Arduino/09-Feature-Detection/hog.py similarity index 100% rename from scripts/examples/09-Feature-Detection/hog.py rename to scripts/examples/Arduino/09-Feature-Detection/hog.py diff --git a/scripts/examples/09-Feature-Detection/keypoints.py b/scripts/examples/Arduino/09-Feature-Detection/keypoints.py similarity index 100% rename from scripts/examples/09-Feature-Detection/keypoints.py rename to scripts/examples/Arduino/09-Feature-Detection/keypoints.py diff --git a/scripts/examples/09-Feature-Detection/keypoints_save.py b/scripts/examples/Arduino/09-Feature-Detection/keypoints_save.py similarity index 100% rename from scripts/examples/09-Feature-Detection/keypoints_save.py rename to scripts/examples/Arduino/09-Feature-Detection/keypoints_save.py diff --git a/scripts/examples/09-Feature-Detection/lbp.py b/scripts/examples/Arduino/09-Feature-Detection/lbp.py similarity index 100% rename from scripts/examples/09-Feature-Detection/lbp.py rename to scripts/examples/Arduino/09-Feature-Detection/lbp.py diff --git a/scripts/examples/09-Feature-Detection/linear_regression_fast.py b/scripts/examples/Arduino/09-Feature-Detection/linear_regression_fast.py similarity index 100% rename from scripts/examples/09-Feature-Detection/linear_regression_fast.py rename to scripts/examples/Arduino/09-Feature-Detection/linear_regression_fast.py diff --git a/scripts/examples/09-Feature-Detection/linear_regression_robust.py b/scripts/examples/Arduino/09-Feature-Detection/linear_regression_robust.py similarity index 100% rename from scripts/examples/09-Feature-Detection/linear_regression_robust.py rename to scripts/examples/Arduino/09-Feature-Detection/linear_regression_robust.py diff --git a/scripts/examples/09-Feature-Detection/selective_search.py b/scripts/examples/Arduino/09-Feature-Detection/selective_search.py similarity index 100% rename from scripts/examples/09-Feature-Detection/selective_search.py rename to scripts/examples/Arduino/09-Feature-Detection/selective_search.py diff --git a/scripts/examples/09-Feature-Detection/template_matching.py b/scripts/examples/Arduino/09-Feature-Detection/template_matching.py similarity index 100% rename from scripts/examples/09-Feature-Detection/template_matching.py rename to scripts/examples/Arduino/09-Feature-Detection/template_matching.py diff --git a/scripts/examples/10-Color-Tracking/automatic_grayscale_color_tracking.py b/scripts/examples/Arduino/10-Color-Tracking/automatic_grayscale_color_tracking.py similarity index 100% rename from scripts/examples/10-Color-Tracking/automatic_grayscale_color_tracking.py rename to scripts/examples/Arduino/10-Color-Tracking/automatic_grayscale_color_tracking.py diff --git a/scripts/examples/10-Color-Tracking/automatic_rgb565_color_tracking.py b/scripts/examples/Arduino/10-Color-Tracking/automatic_rgb565_color_tracking.py similarity index 100% rename from scripts/examples/10-Color-Tracking/automatic_rgb565_color_tracking.py rename to scripts/examples/Arduino/10-Color-Tracking/automatic_rgb565_color_tracking.py diff --git a/scripts/examples/10-Color-Tracking/black_grayscale_line_following.py b/scripts/examples/Arduino/10-Color-Tracking/black_grayscale_line_following.py similarity index 100% rename from scripts/examples/10-Color-Tracking/black_grayscale_line_following.py rename to scripts/examples/Arduino/10-Color-Tracking/black_grayscale_line_following.py diff --git a/scripts/examples/10-Color-Tracking/image_histogram_info.py b/scripts/examples/Arduino/10-Color-Tracking/image_histogram_info.py similarity index 100% rename from scripts/examples/10-Color-Tracking/image_histogram_info.py rename to scripts/examples/Arduino/10-Color-Tracking/image_histogram_info.py diff --git a/scripts/examples/10-Color-Tracking/image_statistics_info.py b/scripts/examples/Arduino/10-Color-Tracking/image_statistics_info.py similarity index 100% rename from scripts/examples/10-Color-Tracking/image_statistics_info.py rename to scripts/examples/Arduino/10-Color-Tracking/image_statistics_info.py diff --git a/scripts/examples/10-Color-Tracking/ir_beacon_grayscale_tracking.py b/scripts/examples/Arduino/10-Color-Tracking/ir_beacon_grayscale_tracking.py similarity index 100% rename from scripts/examples/10-Color-Tracking/ir_beacon_grayscale_tracking.py rename to scripts/examples/Arduino/10-Color-Tracking/ir_beacon_grayscale_tracking.py diff --git a/scripts/examples/10-Color-Tracking/ir_beacon_rgb565_tracking.py b/scripts/examples/Arduino/10-Color-Tracking/ir_beacon_rgb565_tracking.py similarity index 100% rename from scripts/examples/10-Color-Tracking/ir_beacon_rgb565_tracking.py rename to scripts/examples/Arduino/10-Color-Tracking/ir_beacon_rgb565_tracking.py diff --git a/scripts/examples/10-Color-Tracking/multi_color_blob_tracking.py b/scripts/examples/Arduino/10-Color-Tracking/multi_color_blob_tracking.py similarity index 100% rename from scripts/examples/10-Color-Tracking/multi_color_blob_tracking.py rename to scripts/examples/Arduino/10-Color-Tracking/multi_color_blob_tracking.py diff --git a/scripts/examples/10-Color-Tracking/multi_color_code_tracking.py b/scripts/examples/Arduino/10-Color-Tracking/multi_color_code_tracking.py similarity index 100% rename from scripts/examples/10-Color-Tracking/multi_color_code_tracking.py rename to scripts/examples/Arduino/10-Color-Tracking/multi_color_code_tracking.py diff --git a/scripts/examples/10-Color-Tracking/single_color_code_tracking.py b/scripts/examples/Arduino/10-Color-Tracking/single_color_code_tracking.py similarity index 100% rename from scripts/examples/10-Color-Tracking/single_color_code_tracking.py rename to scripts/examples/Arduino/10-Color-Tracking/single_color_code_tracking.py diff --git a/scripts/examples/10-Color-Tracking/single_color_grayscale_blob_tracking.py b/scripts/examples/Arduino/10-Color-Tracking/single_color_grayscale_blob_tracking.py similarity index 100% rename from scripts/examples/10-Color-Tracking/single_color_grayscale_blob_tracking.py rename to scripts/examples/Arduino/10-Color-Tracking/single_color_grayscale_blob_tracking.py diff --git a/scripts/examples/10-Color-Tracking/single_color_rgb565_blob_tracking.py b/scripts/examples/Arduino/10-Color-Tracking/single_color_rgb565_blob_tracking.py similarity index 100% rename from scripts/examples/10-Color-Tracking/single_color_rgb565_blob_tracking.py rename to scripts/examples/Arduino/10-Color-Tracking/single_color_rgb565_blob_tracking.py diff --git a/scripts/examples/16-Codes/find_barcodes.py b/scripts/examples/Arduino/16-Codes/find_barcodes.py similarity index 100% rename from scripts/examples/16-Codes/find_barcodes.py rename to scripts/examples/Arduino/16-Codes/find_barcodes.py diff --git a/scripts/examples/16-Codes/find_datamatrices.py b/scripts/examples/Arduino/16-Codes/find_datamatrices.py similarity index 100% rename from scripts/examples/16-Codes/find_datamatrices.py rename to scripts/examples/Arduino/16-Codes/find_datamatrices.py diff --git a/scripts/examples/16-Codes/find_datamatrices_w_lens_zoom.py b/scripts/examples/Arduino/16-Codes/find_datamatrices_w_lens_zoom.py similarity index 100% rename from scripts/examples/16-Codes/find_datamatrices_w_lens_zoom.py rename to scripts/examples/Arduino/16-Codes/find_datamatrices_w_lens_zoom.py diff --git a/scripts/examples/16-Codes/qrcodes_with_lens_corr.py b/scripts/examples/Arduino/16-Codes/qrcodes_with_lens_corr.py similarity index 100% rename from scripts/examples/16-Codes/qrcodes_with_lens_corr.py rename to scripts/examples/Arduino/16-Codes/qrcodes_with_lens_corr.py diff --git a/scripts/examples/16-Codes/qrcodes_with_lens_zoom.py b/scripts/examples/Arduino/16-Codes/qrcodes_with_lens_zoom.py similarity index 100% rename from scripts/examples/16-Codes/qrcodes_with_lens_zoom.py rename to scripts/examples/Arduino/16-Codes/qrcodes_with_lens_zoom.py diff --git a/scripts/examples/17-Pixy-Emulation/apriltags_pixy_i2c_emulation.py b/scripts/examples/Arduino/17-Pixy-Emulation/apriltags_pixy_i2c_emulation.py similarity index 100% rename from scripts/examples/17-Pixy-Emulation/apriltags_pixy_i2c_emulation.py rename to scripts/examples/Arduino/17-Pixy-Emulation/apriltags_pixy_i2c_emulation.py diff --git a/scripts/examples/17-Pixy-Emulation/apriltags_pixy_spi_emulation.py b/scripts/examples/Arduino/17-Pixy-Emulation/apriltags_pixy_spi_emulation.py similarity index 100% rename from scripts/examples/17-Pixy-Emulation/apriltags_pixy_spi_emulation.py rename to scripts/examples/Arduino/17-Pixy-Emulation/apriltags_pixy_spi_emulation.py diff --git a/scripts/examples/17-Pixy-Emulation/apriltags_pixy_uart_emulation.py b/scripts/examples/Arduino/17-Pixy-Emulation/apriltags_pixy_uart_emulation.py similarity index 100% rename from scripts/examples/17-Pixy-Emulation/apriltags_pixy_uart_emulation.py rename to scripts/examples/Arduino/17-Pixy-Emulation/apriltags_pixy_uart_emulation.py diff --git a/scripts/examples/17-Pixy-Emulation/pixy_i2c_emulation.py b/scripts/examples/Arduino/17-Pixy-Emulation/pixy_i2c_emulation.py similarity index 100% rename from scripts/examples/17-Pixy-Emulation/pixy_i2c_emulation.py rename to scripts/examples/Arduino/17-Pixy-Emulation/pixy_i2c_emulation.py diff --git a/scripts/examples/17-Pixy-Emulation/pixy_spi_emulation.py b/scripts/examples/Arduino/17-Pixy-Emulation/pixy_spi_emulation.py similarity index 100% rename from scripts/examples/17-Pixy-Emulation/pixy_spi_emulation.py rename to scripts/examples/Arduino/17-Pixy-Emulation/pixy_spi_emulation.py diff --git a/scripts/examples/17-Pixy-Emulation/pixy_uart_emulation.py b/scripts/examples/Arduino/17-Pixy-Emulation/pixy_uart_emulation.py similarity index 100% rename from scripts/examples/17-Pixy-Emulation/pixy_uart_emulation.py rename to scripts/examples/Arduino/17-Pixy-Emulation/pixy_uart_emulation.py diff --git a/scripts/examples/18-MAVLink/mavlink_apriltags_landing_target.py b/scripts/examples/Arduino/18-MAVLink/mavlink_apriltags_landing_target.py similarity index 100% rename from scripts/examples/18-MAVLink/mavlink_apriltags_landing_target.py rename to scripts/examples/Arduino/18-MAVLink/mavlink_apriltags_landing_target.py diff --git a/scripts/examples/18-MAVLink/mavlink_opticalflow.py b/scripts/examples/Arduino/18-MAVLink/mavlink_opticalflow.py similarity index 100% rename from scripts/examples/18-MAVLink/mavlink_opticalflow.py rename to scripts/examples/Arduino/18-MAVLink/mavlink_opticalflow.py diff --git a/scripts/examples/19-Low-Power/deep_sleep.py b/scripts/examples/Arduino/19-Low-Power/deep_sleep.py similarity index 100% rename from scripts/examples/19-Low-Power/deep_sleep.py rename to scripts/examples/Arduino/19-Low-Power/deep_sleep.py diff --git a/scripts/examples/19-Low-Power/extint_wakeup.py b/scripts/examples/Arduino/19-Low-Power/extint_wakeup.py similarity index 100% rename from scripts/examples/19-Low-Power/extint_wakeup.py rename to scripts/examples/Arduino/19-Low-Power/extint_wakeup.py diff --git a/scripts/examples/19-Low-Power/sensor_sleep.py b/scripts/examples/Arduino/19-Low-Power/sensor_sleep.py similarity index 100% rename from scripts/examples/19-Low-Power/sensor_sleep.py rename to scripts/examples/Arduino/19-Low-Power/sensor_sleep.py diff --git a/scripts/examples/19-Low-Power/stop_mode.py b/scripts/examples/Arduino/19-Low-Power/stop_mode.py similarity index 100% rename from scripts/examples/19-Low-Power/stop_mode.py rename to scripts/examples/Arduino/19-Low-Power/stop_mode.py diff --git a/scripts/examples/20-Frame-Differencing/in_memory_advanced_frame_differencing.py b/scripts/examples/Arduino/20-Frame-Differencing/in_memory_advanced_frame_differencing.py similarity index 100% rename from scripts/examples/20-Frame-Differencing/in_memory_advanced_frame_differencing.py rename to scripts/examples/Arduino/20-Frame-Differencing/in_memory_advanced_frame_differencing.py diff --git a/scripts/examples/20-Frame-Differencing/in_memory_basic_frame_differencing.py b/scripts/examples/Arduino/20-Frame-Differencing/in_memory_basic_frame_differencing.py similarity index 100% rename from scripts/examples/20-Frame-Differencing/in_memory_basic_frame_differencing.py rename to scripts/examples/Arduino/20-Frame-Differencing/in_memory_basic_frame_differencing.py diff --git a/scripts/examples/20-Frame-Differencing/in_memory_shadow_removal.py b/scripts/examples/Arduino/20-Frame-Differencing/in_memory_shadow_removal.py similarity index 100% rename from scripts/examples/20-Frame-Differencing/in_memory_shadow_removal.py rename to scripts/examples/Arduino/20-Frame-Differencing/in_memory_shadow_removal.py diff --git a/scripts/examples/20-Frame-Differencing/in_memory_structural_similarity.py b/scripts/examples/Arduino/20-Frame-Differencing/in_memory_structural_similarity.py similarity index 100% rename from scripts/examples/20-Frame-Differencing/in_memory_structural_similarity.py rename to scripts/examples/Arduino/20-Frame-Differencing/in_memory_structural_similarity.py diff --git a/scripts/examples/20-Frame-Differencing/on_disk_advanced_frame_differencing.py b/scripts/examples/Arduino/20-Frame-Differencing/on_disk_advanced_frame_differencing.py similarity index 100% rename from scripts/examples/20-Frame-Differencing/on_disk_advanced_frame_differencing.py rename to scripts/examples/Arduino/20-Frame-Differencing/on_disk_advanced_frame_differencing.py diff --git a/scripts/examples/20-Frame-Differencing/on_disk_basic_frame_differencing.py b/scripts/examples/Arduino/20-Frame-Differencing/on_disk_basic_frame_differencing.py similarity index 100% rename from scripts/examples/20-Frame-Differencing/on_disk_basic_frame_differencing.py rename to scripts/examples/Arduino/20-Frame-Differencing/on_disk_basic_frame_differencing.py diff --git a/scripts/examples/20-Frame-Differencing/on_disk_shadow_removal.py b/scripts/examples/Arduino/20-Frame-Differencing/on_disk_shadow_removal.py similarity index 100% rename from scripts/examples/20-Frame-Differencing/on_disk_shadow_removal.py rename to scripts/examples/Arduino/20-Frame-Differencing/on_disk_shadow_removal.py diff --git a/scripts/examples/20-Frame-Differencing/on_disk_structural_similarity.py b/scripts/examples/Arduino/20-Frame-Differencing/on_disk_structural_similarity.py similarity index 100% rename from scripts/examples/20-Frame-Differencing/on_disk_structural_similarity.py rename to scripts/examples/Arduino/20-Frame-Differencing/on_disk_structural_similarity.py diff --git a/scripts/examples/21-Sensor-Control/sensor_auto_gain_control.py b/scripts/examples/Arduino/21-Sensor-Control/sensor_auto_gain_control.py similarity index 100% rename from scripts/examples/21-Sensor-Control/sensor_auto_gain_control.py rename to scripts/examples/Arduino/21-Sensor-Control/sensor_auto_gain_control.py diff --git a/scripts/examples/21-Sensor-Control/sensor_exposure_control.py b/scripts/examples/Arduino/21-Sensor-Control/sensor_exposure_control.py similarity index 100% rename from scripts/examples/21-Sensor-Control/sensor_exposure_control.py rename to scripts/examples/Arduino/21-Sensor-Control/sensor_exposure_control.py diff --git a/scripts/examples/21-Sensor-Control/sensor_horizontal_mirror.py b/scripts/examples/Arduino/21-Sensor-Control/sensor_horizontal_mirror.py similarity index 100% rename from scripts/examples/21-Sensor-Control/sensor_horizontal_mirror.py rename to scripts/examples/Arduino/21-Sensor-Control/sensor_horizontal_mirror.py diff --git a/scripts/examples/21-Sensor-Control/sensor_manual_whitebal_control.py b/scripts/examples/Arduino/21-Sensor-Control/sensor_manual_whitebal_control.py similarity index 100% rename from scripts/examples/21-Sensor-Control/sensor_manual_whitebal_control.py rename to scripts/examples/Arduino/21-Sensor-Control/sensor_manual_whitebal_control.py diff --git a/scripts/examples/21-Sensor-Control/sensor_vertical_flip.py b/scripts/examples/Arduino/21-Sensor-Control/sensor_vertical_flip.py similarity index 100% rename from scripts/examples/21-Sensor-Control/sensor_vertical_flip.py rename to scripts/examples/Arduino/21-Sensor-Control/sensor_vertical_flip.py diff --git a/scripts/examples/21-Sensor-Control/sesnor_manual_gain_control.py b/scripts/examples/Arduino/21-Sensor-Control/sesnor_manual_gain_control.py similarity index 100% rename from scripts/examples/21-Sensor-Control/sesnor_manual_gain_control.py rename to scripts/examples/Arduino/21-Sensor-Control/sesnor_manual_gain_control.py diff --git a/scripts/examples/22-Optical-Flow/absolute-rotation-scale.py b/scripts/examples/Arduino/22-Optical-Flow/absolute-rotation-scale.py similarity index 100% rename from scripts/examples/22-Optical-Flow/absolute-rotation-scale.py rename to scripts/examples/Arduino/22-Optical-Flow/absolute-rotation-scale.py diff --git a/scripts/examples/22-Optical-Flow/absolute-translation.py b/scripts/examples/Arduino/22-Optical-Flow/absolute-translation.py similarity index 100% rename from scripts/examples/22-Optical-Flow/absolute-translation.py rename to scripts/examples/Arduino/22-Optical-Flow/absolute-translation.py diff --git a/scripts/examples/22-Optical-Flow/differential-rotation-scale.py b/scripts/examples/Arduino/22-Optical-Flow/differential-rotation-scale.py similarity index 100% rename from scripts/examples/22-Optical-Flow/differential-rotation-scale.py rename to scripts/examples/Arduino/22-Optical-Flow/differential-rotation-scale.py diff --git a/scripts/examples/22-Optical-Flow/differential-translation.py b/scripts/examples/Arduino/22-Optical-Flow/differential-translation.py similarity index 100% rename from scripts/examples/22-Optical-Flow/differential-translation.py rename to scripts/examples/Arduino/22-Optical-Flow/differential-translation.py diff --git a/scripts/examples/22-Optical-Flow/image-patches-absolute-rotation-scale.py b/scripts/examples/Arduino/22-Optical-Flow/image-patches-absolute-rotation-scale.py similarity index 100% rename from scripts/examples/22-Optical-Flow/image-patches-absolute-rotation-scale.py rename to scripts/examples/Arduino/22-Optical-Flow/image-patches-absolute-rotation-scale.py diff --git a/scripts/examples/22-Optical-Flow/image-patches-absolute-translation.py b/scripts/examples/Arduino/22-Optical-Flow/image-patches-absolute-translation.py similarity index 100% rename from scripts/examples/22-Optical-Flow/image-patches-absolute-translation.py rename to scripts/examples/Arduino/22-Optical-Flow/image-patches-absolute-translation.py diff --git a/scripts/examples/22-Optical-Flow/image-patches-differential-rotation-scale.py b/scripts/examples/Arduino/22-Optical-Flow/image-patches-differential-rotation-scale.py similarity index 100% rename from scripts/examples/22-Optical-Flow/image-patches-differential-rotation-scale.py rename to scripts/examples/Arduino/22-Optical-Flow/image-patches-differential-rotation-scale.py diff --git a/scripts/examples/22-Optical-Flow/image-patches-differential-translation.py b/scripts/examples/Arduino/22-Optical-Flow/image-patches-differential-translation.py similarity index 100% rename from scripts/examples/22-Optical-Flow/image-patches-differential-translation.py rename to scripts/examples/Arduino/22-Optical-Flow/image-patches-differential-translation.py diff --git a/scripts/examples/24-External-Sensors/I2C_Lidar_Lite_V3_example_code.py b/scripts/examples/Arduino/24-External-Sensors/I2C_Lidar_Lite_V3_example_code.py similarity index 100% rename from scripts/examples/24-External-Sensors/I2C_Lidar_Lite_V3_example_code.py rename to scripts/examples/Arduino/24-External-Sensors/I2C_Lidar_Lite_V3_example_code.py diff --git a/scripts/examples/25-Machine-Learning/nn_stm32cubeai.py b/scripts/examples/Arduino/25-Machine-Learning/nn_stm32cubeai.py similarity index 100% rename from scripts/examples/25-Machine-Learning/nn_stm32cubeai.py rename to scripts/examples/Arduino/25-Machine-Learning/nn_stm32cubeai.py diff --git a/scripts/examples/25-Machine-Learning/tf_face_collection.py b/scripts/examples/Arduino/25-Machine-Learning/tf_face_collection.py similarity index 100% rename from scripts/examples/25-Machine-Learning/tf_face_collection.py rename to scripts/examples/Arduino/25-Machine-Learning/tf_face_collection.py diff --git a/scripts/examples/25-Machine-Learning/tf_face_recognition.py b/scripts/examples/Arduino/25-Machine-Learning/tf_face_recognition.py similarity index 100% rename from scripts/examples/25-Machine-Learning/tf_face_recognition.py rename to scripts/examples/Arduino/25-Machine-Learning/tf_face_recognition.py diff --git a/scripts/examples/25-Machine-Learning/tf_mobilenet_search_whole_window.py b/scripts/examples/Arduino/25-Machine-Learning/tf_mobilenet_search_whole_window.py similarity index 100% rename from scripts/examples/25-Machine-Learning/tf_mobilenet_search_whole_window.py rename to scripts/examples/Arduino/25-Machine-Learning/tf_mobilenet_search_whole_window.py diff --git a/scripts/examples/25-Machine-Learning/tf_mobilenet_serach_just_center.py b/scripts/examples/Arduino/25-Machine-Learning/tf_mobilenet_serach_just_center.py similarity index 100% rename from scripts/examples/25-Machine-Learning/tf_mobilenet_serach_just_center.py rename to scripts/examples/Arduino/25-Machine-Learning/tf_mobilenet_serach_just_center.py diff --git a/scripts/examples/25-Machine-Learning/tf_person_detection_search_just_center.py b/scripts/examples/Arduino/25-Machine-Learning/tf_person_detection_search_just_center.py similarity index 100% rename from scripts/examples/25-Machine-Learning/tf_person_detection_search_just_center.py rename to scripts/examples/Arduino/25-Machine-Learning/tf_person_detection_search_just_center.py diff --git a/scripts/examples/25-Machine-Learning/tf_person_detection_search_whole_window.py b/scripts/examples/Arduino/25-Machine-Learning/tf_person_detection_search_whole_window.py similarity index 100% rename from scripts/examples/25-Machine-Learning/tf_person_detection_search_whole_window.py rename to scripts/examples/Arduino/25-Machine-Learning/tf_person_detection_search_whole_window.py diff --git a/scripts/examples/26-April-Tags/find_apriltags.py b/scripts/examples/Arduino/26-April-Tags/find_apriltags.py similarity index 100% rename from scripts/examples/26-April-Tags/find_apriltags.py rename to scripts/examples/Arduino/26-April-Tags/find_apriltags.py diff --git a/scripts/examples/26-April-Tags/find_apriltags_3d_pose.py b/scripts/examples/Arduino/26-April-Tags/find_apriltags_3d_pose.py similarity index 100% rename from scripts/examples/26-April-Tags/find_apriltags_3d_pose.py rename to scripts/examples/Arduino/26-April-Tags/find_apriltags_3d_pose.py diff --git a/scripts/examples/26-April-Tags/find_apriltags_max_res.py b/scripts/examples/Arduino/26-April-Tags/find_apriltags_max_res.py similarity index 100% rename from scripts/examples/26-April-Tags/find_apriltags_max_res.py rename to scripts/examples/Arduino/26-April-Tags/find_apriltags_max_res.py diff --git a/scripts/examples/26-April-Tags/find_apriltags_w_lens_zoom.py b/scripts/examples/Arduino/26-April-Tags/find_apriltags_w_lens_zoom.py similarity index 100% rename from scripts/examples/26-April-Tags/find_apriltags_w_lens_zoom.py rename to scripts/examples/Arduino/26-April-Tags/find_apriltags_w_lens_zoom.py diff --git a/scripts/examples/26-April-Tags/find_small_apriltags.py b/scripts/examples/Arduino/26-April-Tags/find_small_apriltags.py similarity index 100% rename from scripts/examples/26-April-Tags/find_small_apriltags.py rename to scripts/examples/Arduino/26-April-Tags/find_small_apriltags.py diff --git a/scripts/examples/28-Global-Shutter/high_fps.py b/scripts/examples/Arduino/28-Global-Shutter/high_fps.py similarity index 100% rename from scripts/examples/28-Global-Shutter/high_fps.py rename to scripts/examples/Arduino/28-Global-Shutter/high_fps.py diff --git a/scripts/examples/28-Global-Shutter/triggered_mode.py b/scripts/examples/Arduino/28-Global-Shutter/triggered_mode.py similarity index 100% rename from scripts/examples/28-Global-Shutter/triggered_mode.py rename to scripts/examples/Arduino/28-Global-Shutter/triggered_mode.py diff --git a/scripts/examples/32-modbus/modbus_apriltag.py b/scripts/examples/Arduino/32-modbus/modbus_apriltag.py similarity index 100% rename from scripts/examples/32-modbus/modbus_apriltag.py rename to scripts/examples/Arduino/32-modbus/modbus_apriltag.py diff --git a/scripts/examples/32-modbus/modbus_rtu_slave.py b/scripts/examples/Arduino/32-modbus/modbus_rtu_slave.py similarity index 100% rename from scripts/examples/32-modbus/modbus_rtu_slave.py rename to scripts/examples/Arduino/32-modbus/modbus_rtu_slave.py diff --git a/scripts/examples/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py b/scripts/examples/Arduino/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py similarity index 100% rename from scripts/examples/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py rename to scripts/examples/Arduino/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py diff --git a/scripts/examples/34-Remote-Control/image_transfer_jpg_streaming_as_the_remote_device_for_your_computer.py b/scripts/examples/Arduino/34-Remote-Control/image_transfer_jpg_streaming_as_the_remote_device_for_your_computer.py similarity index 100% rename from scripts/examples/34-Remote-Control/image_transfer_jpg_streaming_as_the_remote_device_for_your_computer.py rename to scripts/examples/Arduino/34-Remote-Control/image_transfer_jpg_streaming_as_the_remote_device_for_your_computer.py diff --git a/scripts/examples/34-Remote-Control/image_transfer_raw_as_the_controller_device.py b/scripts/examples/Arduino/34-Remote-Control/image_transfer_raw_as_the_controller_device.py similarity index 100% rename from scripts/examples/34-Remote-Control/image_transfer_raw_as_the_controller_device.py rename to scripts/examples/Arduino/34-Remote-Control/image_transfer_raw_as_the_controller_device.py diff --git a/scripts/examples/34-Remote-Control/image_transfer_raw_as_the_remote_device.py b/scripts/examples/Arduino/34-Remote-Control/image_transfer_raw_as_the_remote_device.py similarity index 100% rename from scripts/examples/34-Remote-Control/image_transfer_raw_as_the_remote_device.py rename to scripts/examples/Arduino/34-Remote-Control/image_transfer_raw_as_the_remote_device.py diff --git a/scripts/examples/34-Remote-Control/popular_features_as_the_controller_device.py b/scripts/examples/Arduino/34-Remote-Control/popular_features_as_the_controller_device.py similarity index 100% rename from scripts/examples/34-Remote-Control/popular_features_as_the_controller_device.py rename to scripts/examples/Arduino/34-Remote-Control/popular_features_as_the_controller_device.py diff --git a/scripts/examples/34-Remote-Control/popular_features_as_the_remote_device.py b/scripts/examples/Arduino/34-Remote-Control/popular_features_as_the_remote_device.py similarity index 100% rename from scripts/examples/34-Remote-Control/popular_features_as_the_remote_device.py rename to scripts/examples/Arduino/34-Remote-Control/popular_features_as_the_remote_device.py diff --git a/scripts/examples/35-Readout-Control/100_fps_ir_led_tracking.py b/scripts/examples/Arduino/35-Readout-Control/100_fps_ir_led_tracking.py similarity index 100% rename from scripts/examples/35-Readout-Control/100_fps_ir_led_tracking.py rename to scripts/examples/Arduino/35-Readout-Control/100_fps_ir_led_tracking.py diff --git a/scripts/examples/35-Readout-Control/apriltag_tracking.py b/scripts/examples/Arduino/35-Readout-Control/apriltag_tracking.py similarity index 100% rename from scripts/examples/35-Readout-Control/apriltag_tracking.py rename to scripts/examples/Arduino/35-Readout-Control/apriltag_tracking.py diff --git a/scripts/examples/36-Web-Servers/rtsp_video_server.py b/scripts/examples/Arduino/36-Web-Servers/rtsp_video_server.py similarity index 100% rename from scripts/examples/36-Web-Servers/rtsp_video_server.py rename to scripts/examples/Arduino/36-Web-Servers/rtsp_video_server.py diff --git a/scripts/examples/99-Tests/colorbar.py b/scripts/examples/Arduino/99-Tests/colorbar.py similarity index 100% rename from scripts/examples/99-Tests/colorbar.py rename to scripts/examples/Arduino/99-Tests/colorbar.py diff --git a/scripts/examples/99-Tests/fps.py b/scripts/examples/Arduino/99-Tests/fps.py similarity index 100% rename from scripts/examples/99-Tests/fps.py rename to scripts/examples/Arduino/99-Tests/fps.py diff --git a/scripts/examples/99-Tests/selftest.py b/scripts/examples/Arduino/99-Tests/selftest.py similarity index 100% rename from scripts/examples/99-Tests/selftest.py rename to scripts/examples/Arduino/99-Tests/selftest.py diff --git a/scripts/examples/99-Tests/unittests.py b/scripts/examples/Arduino/99-Tests/unittests.py similarity index 100% rename from scripts/examples/99-Tests/unittests.py rename to scripts/examples/Arduino/99-Tests/unittests.py diff --git a/scripts/examples/OpenMV/00-Arduino/arduino_i2c_slave.py b/scripts/examples/OpenMV/00-Arduino/arduino_i2c_slave.py new file mode 100644 index 000000000..41aa5e09d --- /dev/null +++ b/scripts/examples/OpenMV/00-Arduino/arduino_i2c_slave.py @@ -0,0 +1,91 @@ +# I2C with the Arduino as the master device and the OpenMV Cam as the slave. +# +# Please wire up your OpenMV Cam to your Arduino like this: +# +# OpenMV Cam Master I2C Data (P5) - Arduino Uno Data (A4) +# OpenMV Cam Master I2C Clock (P4) - Arduino Uno Clock (A5) +# OpenMV Cam Ground - Arduino Ground + +import pyb, ustruct + +text = "Hello World!\n" +data = ustruct.pack("<%ds" % len(text), text) +# Use "ustruct" to build data packets to send. +# "<" puts the data in the struct in little endian order. +# "%ds" puts a string in the data stream. E.g. "13s" for "Hello World!\n" (13 chars). +# See https://docs.python.org/3/library/struct.html + +# READ ME!!! +# +# Please understand that when your OpenMV Cam is not the I2C master it may miss responding to +# sending data as a I2C slave no matter if you call "i2c.send()" in an interupt callback or in the +# main loop below. When this happens the Arduino will get a NAK and have to try reading from the +# OpenMV Cam again. Note that both the Arduino and OpenMV Cam I2C drivers are not good at getting +# unstuck after encountering any I2C errors. On the OpenMV Cam and Arduino you can recover by +# de-initing and then re-initing the I2C peripherals. + +# The hardware I2C bus for your OpenMV Cam is always I2C bus 2. +bus = pyb.I2C(2, pyb.I2C.SLAVE, addr=0x12) +bus.deinit() # Fully reset I2C device... +bus = pyb.I2C(2, pyb.I2C.SLAVE, addr=0x12) +print("Waiting for Arduino...") + +# Note that for sync up to work correctly the OpenMV Cam must be running this script before the +# Arduino starts to poll the OpenMV Cam for data. Otherwise the I2C byte framing gets messed up, +# and etc. So, keep the Arduino in reset until the OpenMV Cam is "Waiting for Arduino...". + +while(True): + try: + bus.send(ustruct.pack(" +# #define BAUD_RATE 19200 +# #define CHAR_BUF 128 +# +# void setup() { +# Serial.begin(BAUD_RATE); +# Wire.begin(); +# delay(1000); // Give the OpenMV Cam time to bootup. +# } +# +# void loop() { +# int32_t temp = 0; +# char buff[CHAR_BUF] = {0}; +# +# Wire.requestFrom(0x12, 2); +# if(Wire.available() == 2) { // got length? +# +# temp = Wire.read() | (Wire.read() << 8); +# delay(1); // Give some setup time... +# +# Wire.requestFrom(0x12, temp); +# if(Wire.available() == temp) { // got full message? +# +# temp = 0; +# while(Wire.available()) buff[temp++] = Wire.read(); +# +# } else { +# while(Wire.available()) Wire.read(); // Toss garbage bytes. +# } +# } else { +# while(Wire.available()) Wire.read(); // Toss garbage bytes. +# } +# +# Serial.print(buff); +# delay(1); // Don't loop to quickly. +# } diff --git a/scripts/examples/OpenMV/00-Arduino/arduino_spi_slave.py b/scripts/examples/OpenMV/00-Arduino/arduino_spi_slave.py new file mode 100644 index 000000000..37adf4a23 --- /dev/null +++ b/scripts/examples/OpenMV/00-Arduino/arduino_spi_slave.py @@ -0,0 +1,94 @@ +# SPI with the Arduino as the master device and the OpenMV Cam as the slave. +# +# Please wire up your OpenMV Cam to your Arduino like this: +# +# OpenMV Cam Master Out Slave In (P0) - Arduino Uno MOSI (11) +# OpenMV Cam Master In Slave Out (P1) - Arduino Uno MISO (12) +# OpenMV Cam Serial Clock (P2) - Arduino Uno SCK (13) +# OpenMV Cam Slave Select (P3) - Arduino Uno SS (10) +# OpenMV Cam Ground - Arduino Ground + +import pyb, ustruct, time + +text = "Hello World!\n" +data = ustruct.pack(" clock is idle low. +# phase = 0 -> sample data on rising clock edge, output data on falling clock edge. +spi = pyb.SPI(2, pyb.SPI.SLAVE, polarity=0, phase=0) + +# NSS callback. +def nss_callback(line): + global spi, data + try: + spi.send(data, timeout=1000) + except OSError as err: + pass # Don't care about errors - so pass. + # Note that there are 3 possible errors. A timeout error, a general purpose error, or + # a busy error. The error codes are 116, 5, 16 respectively for "err.arg[0]". + +# Configure NSS/CS in IRQ mode to send data when requested by the master. +pyb.ExtInt(pyb.Pin("P3"), pyb.ExtInt.IRQ_FALLING, pyb.Pin.PULL_UP, nss_callback) + +while(True): + time.sleep(1000) + +################################################################################################### +# Arduino Code +################################################################################################### +# +# #include +# #define SS_PIN 10 +# #define BAUD_RATE 19200 +# #define CHAR_BUF 128 +# +# void setup() { +# pinMode(SS_PIN, OUTPUT); +# Serial.begin(BAUD_RATE); +# SPI.begin(); +# SPI.setBitOrder(MSBFIRST); +# SPI.setClockDivider(SPI_CLOCK_DIV16); +# SPI.setDataMode(SPI_MODE0); +# delay(1000); // Give the OpenMV Cam time to bootup. +# } +# +# void loop() { +# int32_t len = 0; +# char buff[CHAR_BUF] = {0}; +# digitalWrite(SS_PIN, LOW); +# delay(1); // Give the OpenMV Cam some time to setup to send data. +# +# if(SPI.transfer(1) == 85) { // saw sync char? +# SPI.transfer(&len, 4); // get length +# if (len) { +# SPI.transfer(&buff, min(len, CHAR_BUF)); +# len -= min(len, CHAR_BUF); +# } +# while (len--) SPI.transfer(0); // eat any remaining bytes +# } +# +# digitalWrite(SS_PIN, HIGH); +# Serial.print(buff); +# delay(1); // Don't loop to quickly. +# } diff --git a/scripts/examples/OpenMV/00-Arduino/arduino_uart.py b/scripts/examples/OpenMV/00-Arduino/arduino_uart.py new file mode 100644 index 000000000..12255cf3b --- /dev/null +++ b/scripts/examples/OpenMV/00-Arduino/arduino_uart.py @@ -0,0 +1,38 @@ +# Basic UART communications between OpenMV and Arduino Uno. + +# 1) Wire up your OpenMV Cam to your Arduino Uno like this: +# +# OpenMV Cam Ground Pin ----> Arduino Ground +# OpenMV Cam UART3_TX(P4) ----> Arduino Uno UART_RX(0) +# OpenMV Cam UART3_RX(P5) ----> Arduino Uno UART_TX(1) + +# 2) Uncomment and upload the following sketch to Arduino: +# +# void setup() { +# // put your setup code here, to run once: +# Serial.begin(19200); +# } +# +# void loop() { +# // put your main code here, to run repeatedly: +# if (Serial.available()) { +# // Read the most recent byte +# byte byteRead = Serial.read(); +# // ECHO the value that was read +# Serial.write(byteRead); +# } +# } + +# 3) Run the following script in OpenMV IDE: + +import time +from pyb import UART + +# UART 3, and baudrate. +uart = UART(3, 19200) + +while(True): + uart.write("Hello World!\n") + if (uart.any()): + print(uart.read()) + time.sleep(1000) diff --git a/scripts/examples/OpenMV/01-Basics/helloworld.py b/scripts/examples/OpenMV/01-Basics/helloworld.py new file mode 100644 index 000000000..a18b84b21 --- /dev/null +++ b/scripts/examples/OpenMV/01-Basics/helloworld.py @@ -0,0 +1,17 @@ +# Hello World Example +# +# Welcome to the OpenMV IDE! Click on the green run arrow button below to run the script! + +import sensor, image, time + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +while(True): + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected + # to the IDE. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/01-Basics/main.py b/scripts/examples/OpenMV/01-Basics/main.py new file mode 100644 index 000000000..97d6d921f --- /dev/null +++ b/scripts/examples/OpenMV/01-Basics/main.py @@ -0,0 +1,33 @@ +# Main Module Example +# +# When your OpenMV Cam is disconnected from your computer it will either run the +# main.py script on the SD card (if attached) or the main.py script on +# your OpenMV Cam's internal flash drive. + +import time, pyb + +led = pyb.LED(3) # Red LED = 1, Green LED = 2, Blue LED = 3, IR LEDs = 4. +usb = pyb.USB_VCP() # This is a serial port object that allows you to +# communciate with your computer. While it is not open the code below runs. + +while(not usb.isconnected()): + led.on() + time.sleep(150) + led.off() + time.sleep(100) + led.on() + time.sleep(150) + led.off() + time.sleep(600) + +led = pyb.LED(2) # Switch to using the green LED. + +while(usb.isconnected()): + led.on() + time.sleep(150) + led.off() + time.sleep(100) + led.on() + time.sleep(150) + led.off() + time.sleep(600) diff --git a/scripts/examples/OpenMV/02-Board-Control/adc_read.py b/scripts/examples/OpenMV/02-Board-Control/adc_read.py new file mode 100644 index 000000000..1854226e0 --- /dev/null +++ b/scripts/examples/OpenMV/02-Board-Control/adc_read.py @@ -0,0 +1,13 @@ +# ADC Read Example. +# +# This example shows how to use the ADC to read an analog pin. + +import time +from pyb import ADC + +adc = ADC("P6") # Must always be "P6". + +while(True): + # The ADC has 12-bits of resolution for 4096 values. + print("ADC = %fv" % ((adc.read() * 3.3) / 4095)) + time.sleep(100) diff --git a/scripts/examples/OpenMV/02-Board-Control/adc_read_int_channel.py b/scripts/examples/OpenMV/02-Board-Control/adc_read_int_channel.py new file mode 100644 index 000000000..43e0c60ba --- /dev/null +++ b/scripts/examples/OpenMV/02-Board-Control/adc_read_int_channel.py @@ -0,0 +1,8 @@ +# ADC Internal Channels Example +# +# This example shows how to read internal ADC channels. + +import time, pyb + +adc = pyb.ADCAll(12) +print("VREF = %.1fv VREF = %.1fv Temp = %d" % (adc.read_core_vref(), adc.read_core_vbat(), adc.read_core_temp())) diff --git a/scripts/examples/OpenMV/02-Board-Control/can.py b/scripts/examples/OpenMV/02-Board-Control/can.py new file mode 100644 index 000000000..5e8c6a6f9 --- /dev/null +++ b/scripts/examples/OpenMV/02-Board-Control/can.py @@ -0,0 +1,42 @@ +# CAN Shield Example +# +# This example demonstrates CAN communications between two cameras. +# NOTE: you need two CAN transceiver shields and DB9 cable to run this example. + +import time, omv +from pyb import CAN + +# NOTE: Set to False on receiving node. +TRANSMITTER = True + +can = CAN(2, CAN.NORMAL) +# Set a different baudrate (default is 125Kbps) +# NOTE: The following parameters are for the H7 only. +# +# can.init(CAN.NORMAL, prescaler=32, sjw=1, bs1=8, bs2=3) # 125Kbps +# can.init(CAN.NORMAL, prescaler=16, sjw=1, bs1=8, bs2=3) # 250Kbps +# can.init(CAN.NORMAL, prescaler=8, sjw=1, bs1=8, bs2=3) # 500Kbps +# can.init(CAN.NORMAL, prescaler=4, sjw=1, bs1=8, bs2=3) # 1000Kbps + +can.restart() + +if (TRANSMITTER): + while (True): + # Send message with id 1 + can.send('Hello', 1) + time.sleep(1000) + +else: + # Runs on the receiving node. + if (omv.board_type() == 'H7'): # FDCAN + # Set a filter to receive messages with id=1 -> 4 + # Filter index, mode (RANGE, DUAL or MASK), FIFO (0 or 1), params + can.setfilter(0, CAN.RANGE, 0, (1, 4)) + else: + # Set a filter to receive messages with id=1, 2, 3 and 4 + # Filter index, mode (LIST16, etc..), FIFO (0 or 1), params + can.setfilter(0, CAN.LIST16, 0, (1, 2, 3, 4)) + + while (True): + # Receive messages on FIFO 0 + print(can.recv(0, timeout=10000)) diff --git a/scripts/examples/OpenMV/02-Board-Control/cpufreq_scaling.py b/scripts/examples/OpenMV/02-Board-Control/cpufreq_scaling.py new file mode 100644 index 000000000..963810575 --- /dev/null +++ b/scripts/examples/OpenMV/02-Board-Control/cpufreq_scaling.py @@ -0,0 +1,24 @@ +# CPU frequency scaling example. +# +# This example shows how to use the cpufreq module to change the CPU frequency on the fly. +import sensor, image, time, cpufreq + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +clock = time.clock() # Create a clock object to track the FPS. + +def test_image_processing(): + for i in range(0, 50): + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + img.find_edges(image.EDGE_CANNY, threshold=(50, 80)) + +print("\nFrequency Scaling Test...") +for f in cpufreq.get_supported_frequencies(): + print("Testing CPU Freq: %dMHz..." %(f)) + cpufreq.set_frequency(f) + clock.reset() + test_image_processing() + freqs = cpufreq.get_current_frequencies() + print("CPU Freq:%dMHz HCLK:%dMhz PCLK1:%dMhz PCLK2:%dMhz FPS:%.2f" %(freqs[0], freqs[1], freqs[2], freqs[3], clock.fps())) diff --git a/scripts/examples/OpenMV/02-Board-Control/dac_write.py b/scripts/examples/OpenMV/02-Board-Control/dac_write.py new file mode 100644 index 000000000..d5f942880 --- /dev/null +++ b/scripts/examples/OpenMV/02-Board-Control/dac_write.py @@ -0,0 +1,17 @@ +# DAC Control Example +# +# This example shows how to use the DAC pin output onboard your OpenMV Cam. + +import time +from pyb import DAC + +dac = DAC("P6") # Must always be "P6". + +while(True): + # The DAC has 8-12 bits of resolution (default 8-bits). + for i in range(256): + dac.write(i) + time.sleep(20) + for i in range(256): + dac.write(255-i) + time.sleep(20) diff --git a/scripts/examples/OpenMV/02-Board-Control/dac_write_timed.py b/scripts/examples/OpenMV/02-Board-Control/dac_write_timed.py new file mode 100644 index 000000000..8bcd61892 --- /dev/null +++ b/scripts/examples/OpenMV/02-Board-Control/dac_write_timed.py @@ -0,0 +1,15 @@ +# DAC Timed Write Example +# +# This example shows how to use the DAC pin output onboard your OpenMV Cam. + +import math +from pyb import DAC + +# create a buffer containing a sine-wave +buf = bytearray(100) +for i in range(len(buf)): + buf[i] = 128 + int(127 * math.sin(2 * math.pi * i / len(buf))) + +# output the sine-wave at 400Hz +dac = DAC("P6") +dac.write_timed(buf, 400 * len(buf), mode=DAC.CIRCULAR) diff --git a/scripts/examples/OpenMV/02-Board-Control/i2c_control.py b/scripts/examples/OpenMV/02-Board-Control/i2c_control.py new file mode 100644 index 000000000..a284cb2a0 --- /dev/null +++ b/scripts/examples/OpenMV/02-Board-Control/i2c_control.py @@ -0,0 +1,19 @@ +# I2C Control +# +# This example shows how to use the i2c bus on your OpenMV Cam by dumping the +# contents on a standard EEPROM. To run this example either connect the +# Thermopile Shield to your OpenMV Cam or an I2C EEPROM to your OpenMV Cam. + +from pyb import I2C + +i2c = I2C(2, I2C.MASTER) # The i2c bus must always be 2. +mem = i2c.mem_read(256, 0x50, 0) # The eeprom slave address is 0x50. + +print("\n[") +for i in range(16): + print("\t[", end='') + for j in range(16): + print("%03d" % mem[(i*16)+j], end='') + if j != 15: print(", ", end='') + print("]," if i != 15 else "]") +print("]") diff --git a/scripts/examples/OpenMV/02-Board-Control/led_control.py b/scripts/examples/OpenMV/02-Board-Control/led_control.py new file mode 100644 index 000000000..9fcc0fbfc --- /dev/null +++ b/scripts/examples/OpenMV/02-Board-Control/led_control.py @@ -0,0 +1,27 @@ +# LED Control Example +# +# This example shows how to control your OpenMV Cam's built-in LEDs. Use your +# smart phone's camera to see the IR LEDs. + +import time +from pyb import LED + +red_led = LED(1) +green_led = LED(2) +blue_led = LED(3) +ir_led = LED(4) + +def led_control(x): + if (x&1)==0: red_led.off() + elif (x&1)==1: red_led.on() + if (x&2)==0: green_led.off() + elif (x&2)==2: green_led.on() + if (x&4)==0: blue_led.off() + elif (x&4)==4: blue_led.on() + if (x&8)==0: ir_led.off() + elif (x&8)==8: ir_led.on() + +while(True): + for i in range(16): + led_control(i) + time.sleep(500) diff --git a/scripts/examples/OpenMV/02-Board-Control/native_emitters.py b/scripts/examples/OpenMV/02-Board-Control/native_emitters.py new file mode 100644 index 000000000..3839297fd --- /dev/null +++ b/scripts/examples/OpenMV/02-Board-Control/native_emitters.py @@ -0,0 +1,19 @@ +import time + +@micropython.asm_thumb +def asm(): + movw(r0, 42) + +@micropython.viper +def viper(a, b): + return a + b + +@micropython.native +def native(a, b): + return a + b + + +print(asm()) +print(viper(1, 2)) +print(native(1, 2)) + diff --git a/scripts/examples/OpenMV/02-Board-Control/pin_control.py b/scripts/examples/OpenMV/02-Board-Control/pin_control.py new file mode 100644 index 000000000..dbcae5fcd --- /dev/null +++ b/scripts/examples/OpenMV/02-Board-Control/pin_control.py @@ -0,0 +1,13 @@ +# Pin Control Example +# +# This example shows how to use the I/O pins in GPIO mode on your OpenMV Cam. + +from pyb import Pin + +# Connect a switch to pin 0 that will pull it low when the switch is closed. +# Pin 1 will then light up. +pin0 = Pin('P0', Pin.IN, Pin.PULL_UP) +pin1 = Pin('P1', Pin.OUT_PP, Pin.PULL_NONE) + +while(True): + pin1.value(not pin0.value()) diff --git a/scripts/examples/OpenMV/02-Board-Control/pwm_control.py b/scripts/examples/OpenMV/02-Board-Control/pwm_control.py new file mode 100644 index 000000000..b96e57cc0 --- /dev/null +++ b/scripts/examples/OpenMV/02-Board-Control/pwm_control.py @@ -0,0 +1,15 @@ +# PWM Control Example +# +# This example shows how to do PWM with your OpenMV Cam. + +import time +from pyb import Pin, Timer + +tim = Timer(4, freq=1000) # Frequency in Hz +# Generate a 1KHz square wave on TIM4 with 50%, 75% and 50% duty cycles on channels 1, 2 and 3 respectively. +ch1 = tim.channel(1, Timer.PWM, pin=Pin("P7"), pulse_width_percent=50) +ch2 = tim.channel(2, Timer.PWM, pin=Pin("P8"), pulse_width_percent=75) +ch3 = tim.channel(3, Timer.PWM, pin=Pin("P9"), pulse_width_percent=50) + +while (True): + time.sleep(1000) diff --git a/scripts/examples/OpenMV/02-Board-Control/rtc.py b/scripts/examples/OpenMV/02-Board-Control/rtc.py new file mode 100644 index 000000000..6911df74b --- /dev/null +++ b/scripts/examples/OpenMV/02-Board-Control/rtc.py @@ -0,0 +1,12 @@ +# RTC Example +# +# This example shows how to use the RTC. +import time +from pyb import RTC + +rtc = RTC() +rtc.datetime((2013, 7, 9, 2, 0, 0, 0, 0)) + +while (True): + print(rtc.datetime()) + time.sleep(1000) diff --git a/scripts/examples/OpenMV/02-Board-Control/servo_control.py b/scripts/examples/OpenMV/02-Board-Control/servo_control.py new file mode 100644 index 000000000..4f5b40909 --- /dev/null +++ b/scripts/examples/OpenMV/02-Board-Control/servo_control.py @@ -0,0 +1,22 @@ +# Servo Control Example +# +# This example shows how to use your OpenMV Cam to control servos. + +import time +from pyb import Servo + +s1 = Servo(1) # P7 +s2 = Servo(2) # P8 +s3 = Servo(3) # P9 + +while(True): + for i in range(1000): + s1.pulse_width(1000 + i) + s2.pulse_width(1999 - i) + s3.pulse_width(1000 + i) + time.sleep(10) + for i in range(1000): + s1.pulse_width(1999 - i) + s2.pulse_width(1000 + i) + s3.pulse_width(1999 - i) + time.sleep(10) diff --git a/scripts/examples/OpenMV/02-Board-Control/spi_control.py b/scripts/examples/OpenMV/02-Board-Control/spi_control.py new file mode 100644 index 000000000..8136fc7c0 --- /dev/null +++ b/scripts/examples/OpenMV/02-Board-Control/spi_control.py @@ -0,0 +1,75 @@ +# SPI Control +# +# This example shows how to use the SPI bus on your OpenMV Cam to directly +# the LCD shield without using the built-in lcd shield driver. You will need +# the LCD shield to run this example. + +import sensor, image, time +from pyb import Pin, SPI + +cs = Pin("P3", Pin.OUT_OD) +rst = Pin("P7", Pin.OUT_PP) +rs = Pin("P8", Pin.OUT_PP) +# The hardware SPI bus for your OpenMV Cam is always SPI bus 2. + +# NOTE: The SPI clock frequency will not always be the requested frequency. The hardware only supports +# frequencies that are the bus frequency divided by a prescaler (which can be 2, 4, 8, 16, 32, 64, 128 or 256). +spi = SPI(2, SPI.MASTER, baudrate=int(1000000000/66), polarity=0, phase=0) + +def write_command_byte(c): + cs.low() + rs.low() + spi.send(c) + cs.high() + +def write_data_byte(c): + cs.low() + rs.high() + spi.send(c) + cs.high() + +def write_command(c, *data): + write_command_byte(c) + if data: + for d in data: write_data_byte(d) + +def write_image(img): + cs.low() + rs.high() + spi.send(img) + cs.high() + +# Reset the LCD. +rst.low() +time.sleep(100) +rst.high() +time.sleep(100) + +write_command(0x11) # Sleep Exit +time.sleep(120) + +# Memory Data Access Control +# Write 0xC8 for BGR mode. +write_command(0x36, 0xC0) + +# Interface Pixel Format +write_command(0x3A, 0x05) + +# Display On +write_command(0x29) + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # must be this +sensor.set_framesize(sensor.QQVGA2) # must be this +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + write_command(0x2C) # Write image command... + write_image(img) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/02-Board-Control/timer_control.py b/scripts/examples/OpenMV/02-Board-Control/timer_control.py new file mode 100644 index 000000000..4bb7a024f --- /dev/null +++ b/scripts/examples/OpenMV/02-Board-Control/timer_control.py @@ -0,0 +1,19 @@ +# Timer Control Example +# +# This example shows how to use a timer for callbacks. + +import time +from pyb import Pin, Timer, LED + +blue_led = LED(3) + +# we will receive the timer object when being called +# Note: functions that allocate memory are Not allowed in callbacks +def tick(timer): + blue_led.toggle() + +tim = Timer(2, freq=1) # create a timer object using timer 2 - trigger at 1Hz +tim.callback(tick) # set the callback to our tick function + +while (True): + time.sleep(1000) diff --git a/scripts/examples/OpenMV/02-Board-Control/timer_tests.py b/scripts/examples/OpenMV/02-Board-Control/timer_tests.py new file mode 100644 index 000000000..6e8daa714 --- /dev/null +++ b/scripts/examples/OpenMV/02-Board-Control/timer_tests.py @@ -0,0 +1,25 @@ +# Timer Test Example +# +# This example tests all the timers. + +import time +from pyb import Pin, Timer, LED + +blue_led = LED(3) + +# Note: functions that allocate memory are Not allowed in callbacks +def tick(timer): + blue_led.toggle() + +print("") +for i in range(1, 18): + try: + print("Testing TIM%d... "%(i), end="") + tim = Timer(i, freq=10) # create a timer object using timer 4 - trigger at 1Hz + tim.callback(tick) # set the callback to our tick function + time.sleep(1000) + tim.deinit() + except ValueError as e: + print(e) + continue + print("done!") diff --git a/scripts/examples/OpenMV/02-Board-Control/uart_control.py b/scripts/examples/OpenMV/02-Board-Control/uart_control.py new file mode 100644 index 000000000..dc516a5c8 --- /dev/null +++ b/scripts/examples/OpenMV/02-Board-Control/uart_control.py @@ -0,0 +1,17 @@ +# UART Control +# +# This example shows how to use the serial port on your OpenMV Cam. Attach pin +# P4 to the serial input of a serial LCD screen to see "Hello World!" printed +# on the serial LCD display. + +import time +from pyb import UART + +# Always pass UART 3 for the UART number for your OpenMV Cam. +# The second argument is the UART baud rate. For a more advanced UART control +# example see the BLE-Shield driver. +uart = UART(3, 19200) + +while(True): + uart.write("Hello World!\r") + time.sleep(1000) diff --git a/scripts/examples/OpenMV/02-Board-Control/usb_hid.py b/scripts/examples/OpenMV/02-Board-Control/usb_hid.py new file mode 100644 index 000000000..9b839a475 --- /dev/null +++ b/scripts/examples/OpenMV/02-Board-Control/usb_hid.py @@ -0,0 +1,31 @@ +# Making OpenMV Camera act as a Mouse using HID. +# +# First we need to create boot.py file to change the default USB mode (VCP+MSC). +# Note: It is recommended to save this file to uSD card not the flash storage. +# This will make it easier to restore the default OpenMV (VCP+MSC) USB mode later +# by just deleting boot.py from uSD using the PC. +# +# Add the following script to boot.py: +# +##import pyb #(UNCOMMENT THIS LINE!) +##pyb.usb_mode('VCP+HID') # serial device + mouse (UNCOMMENT THIS LINE!) +##pyb.usb_mode('VCP+MSC') # serial device + storage device (default) +##pyb.usb_mode('VCP+HID', hid=pyb.hid_keyboard) # serial device + keyboard +# +# Copy boot.py to the root of the uSD card and restart the camera, it should now +# act as a serial device and a mouse. +# +# Connect to the camera using the IDE and run this script, you should see the mouse move. +# +# Note: To restore the default VCP+MSC USB mode, either use the PC to remove boot.py +# from the uSD card, or use the following Python line: import os; os.remove('boot.py') + +import pyb, time + +hid = pyb.USB_HID() + +while(True): + # x, y and scroll + # move 10 pixels to the right + hid.send((0, 10, 0, 0)) + time.sleep(500) diff --git a/scripts/examples/OpenMV/02-Board-Control/usb_vcp.py b/scripts/examples/OpenMV/02-Board-Control/usb_vcp.py new file mode 100644 index 000000000..159cfd101 --- /dev/null +++ b/scripts/examples/OpenMV/02-Board-Control/usb_vcp.py @@ -0,0 +1,37 @@ +# USB VCP example. +# This example shows how to use the USB VCP class to send an image to PC on demand. +# +# WARNING: +# This script should NOT be run from the IDE or command line, it should be saved as main.py +# Note the following commented script shows how to receive the image from the host side. +# +# #!/usr/bin/env python2.7 +# import sys, serial, struct +# port = '/dev/ttyACM0' +# sp = serial.Serial(port, baudrate=115200, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, +# xonxoff=False, rtscts=False, stopbits=serial.STOPBITS_ONE, timeout=None, dsrdtr=True) +# sp.setDTR(True) # dsrdtr is ignored on Windows. +# sp.write("snap") +# sp.flush() +# size = struct.unpack('> 8) & 3 + # To test combining different formats + if (image_format==1): small_img = small_img.to_bitmap(copy=True); status = 'bitmap ' + if (image_format==2): small_img = small_img.to_grayscale(copy=True); status = 'grayscale ' + if (image_format==3): small_img = small_img.to_rgb565(copy=True); status = 'rgb565 ' + + # update small image location + if BOUNCE: + x = x + xd + if (xxmax): + xd = -xd + + y = y + yd + if (yymax): + yd = -yd + + # Update small image scale + if RESCALE: + rescale = rescale + rd + if (rescalemax_rescale): + rd = -rd + + # Find the center of the image + scaled_width = int(small_img.width() * abs(rescale)) + scaled_height= int(small_img.height() * abs(rescale)) + + apply_mask = CYCLE_MASK and ((value_mixer >> 9) & 1) + if apply_mask: + img.draw_image(small_img, int(x), int(y), mask=small_img.to_bitmap(copy=True), x_scale=rescale, y_scale=rescale, alpha=240, hint=image.IMAGE_HINT_BILINEAR | image.IMAGE_HINT_CENTER) + status += 'alpha:240 ' + status += '+mask ' + else: + img.draw_image(small_img, int(x), int(y), x_scale=rescale, y_scale=rescale, alpha=128, hint=image.IMAGE_HINT_BILINEAR | image.IMAGE_HINT_CENTER) + status += 'alpha:128 ' + + img.draw_string(8, 0, status, mono_space = False) + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_blending_test.py b/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_blending_test.py new file mode 100644 index 000000000..8cae0107a --- /dev/null +++ b/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_blending_test.py @@ -0,0 +1,71 @@ +# Image Drawing Alpha Blending Test +# +# This script tests the performance and quality of the draw_image() +# method which can perform nearest neighbor, bilinear, bicubic, and +# area scaling along with color channel extraction, alpha blending, +# color palette application, and alpha palette application. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) + +hint = image.BICUBIC # image.BILINEAR image.BICUBIC + +small_img = image.Image(4, 4, sensor.RGB565) +small_img.set_pixel(0, 0, (0, 0, 127)) +small_img.set_pixel(1, 0, (47, 255, 199)) +small_img.set_pixel(2, 0, (0, 188, 255)) +small_img.set_pixel(3, 0, (0, 0, 127)) +small_img.set_pixel(0, 1, (0, 176, 255)) +small_img.set_pixel(1, 1, (222, 0, 0 )) +small_img.set_pixel(2, 1, (50, 255, 195)) +small_img.set_pixel(3, 1, (86, 255, 160)) +small_img.set_pixel(0, 2, (255, 211, 0 )) +small_img.set_pixel(1, 2, (83, 255, 163)) +small_img.set_pixel(2, 2, (255, 211, 0)) +small_img.set_pixel(3, 2, (0, 80, 255)) +small_img.set_pixel(0, 3, (255, 118, 0 )) +small_img.set_pixel(1, 3, (127, 0, 0 )) +small_img.set_pixel(2, 3, (0, 144, 255)) +small_img.set_pixel(3, 3, (50, 255, 195)) +#small_img.to_grayscale() +#small_img.to_bitmap() + +big_img = image.Image(128, 128, sensor.RGB565) +big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint) +#big_img.to_grayscale() +#big_img.to_bitmap() + +alpha_div = 1 +alpha_value = 0 +alpha_step = 2 + +x_bounce = sensor.width()//2 +x_bounce_toggle = 1 + +y_bounce = sensor.height()//2 +y_bounce_toggle = 1 + +clock = time.clock() +while(True): + clock.tick() + + img = sensor.snapshot() + #img.to_grayscale() + #img.to_bitmap() + img.draw_image(big_img, x_bounce, y_bounce, + rgb_channel=-1, alpha=alpha_value//alpha_div, + hint=hint|image.CENTER) + + x_bounce += x_bounce_toggle + if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle + + y_bounce += y_bounce_toggle + if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle + + alpha_value += alpha_step + if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_blending_with_color_table_test.py b/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_blending_with_color_table_test.py new file mode 100644 index 000000000..a3f70c0a6 --- /dev/null +++ b/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_blending_with_color_table_test.py @@ -0,0 +1,81 @@ +# Image Drawing Color Table with Alpha Blending Test +# +# This script tests the performance and quality of the draw_image() +# method which can perform nearest neighbor, bilinear, bicubic, and +# area scaling along with color channel extraction, alpha blending, +# color palette application, and alpha palette application. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) + +hint = image.BICUBIC # image.BILINEAR image.BICUBIC + +# RGB channel extraction is done after scaling normally, this +# may produce false colors. Set this flag to do it before. +# +hint |= 0 # image.EXTRACT_RGB_CHANNEL_FIRST + +# Color table application is done after scaling normally, this +# may produce false colors. Set this flag to do it before. +# +hint |= 0 # image.APPLY_COLOR_PALETTE_FIRST + +small_img = image.Image(4, 4, sensor.RGB565) +small_img.set_pixel(0, 0, (0, 0, 127)) +small_img.set_pixel(1, 0, (47, 255, 199)) +small_img.set_pixel(2, 0, (0, 188, 255)) +small_img.set_pixel(3, 0, (0, 0, 127)) +small_img.set_pixel(0, 1, (0, 176, 255)) +small_img.set_pixel(1, 1, (222, 0, 0 )) +small_img.set_pixel(2, 1, (50, 255, 195)) +small_img.set_pixel(3, 1, (86, 255, 160)) +small_img.set_pixel(0, 2, (255, 211, 0 )) +small_img.set_pixel(1, 2, (83, 255, 163)) +small_img.set_pixel(2, 2, (255, 211, 0)) +small_img.set_pixel(3, 2, (0, 80, 255)) +small_img.set_pixel(0, 3, (255, 118, 0 )) +small_img.set_pixel(1, 3, (127, 0, 0 )) +small_img.set_pixel(2, 3, (0, 144, 255)) +small_img.set_pixel(3, 3, (50, 255, 195)) +#small_img.to_grayscale() +#small_img.to_bitmap() + +big_img = image.Image(128, 128, sensor.RGB565) +big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint) +#big_img.to_grayscale() +#big_img.to_bitmap() + +alpha_div = 1 +alpha_value = 0 +alpha_step = 2 + +x_bounce = sensor.width()//2 +x_bounce_toggle = 1 + +y_bounce = sensor.height()//2 +y_bounce_toggle = 1 + +clock = time.clock() +while(True): + clock.tick() + + img = sensor.snapshot() + #img.to_grayscale() + #img.to_bitmap() + img.draw_image(big_img, x_bounce, y_bounce, + rgb_channel=-1, alpha=alpha_value//alpha_div, + color_palette=sensor.PALETTE_IRONBOW, hint=hint|image.CENTER) + + x_bounce += x_bounce_toggle + if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle + + y_bounce += y_bounce_toggle + if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle + + alpha_value += alpha_step + if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_table_test.py b/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_table_test.py new file mode 100644 index 000000000..7ca783fe9 --- /dev/null +++ b/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_table_test.py @@ -0,0 +1,75 @@ +# Image Drawing Alpha Table Test +# +# This script tests the performance and quality of the draw_image() +# method which can perform nearest neighbor, bilinear, bicubic, and +# area scaling along with color channel extraction, alpha blending, +# color palette application, and alpha palette application. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) + +hint = image.BICUBIC # image.BILINEAR image.BICUBIC + +small_img = image.Image(4, 4, sensor.RGB565) +small_img.set_pixel(0, 0, (0, 0, 127)) +small_img.set_pixel(1, 0, (47, 255, 199)) +small_img.set_pixel(2, 0, (0, 188, 255)) +small_img.set_pixel(3, 0, (0, 0, 127)) +small_img.set_pixel(0, 1, (0, 176, 255)) +small_img.set_pixel(1, 1, (222, 0, 0 )) +small_img.set_pixel(2, 1, (50, 255, 195)) +small_img.set_pixel(3, 1, (86, 255, 160)) +small_img.set_pixel(0, 2, (255, 211, 0 )) +small_img.set_pixel(1, 2, (83, 255, 163)) +small_img.set_pixel(2, 2, (255, 211, 0)) +small_img.set_pixel(3, 2, (0, 80, 255)) +small_img.set_pixel(0, 3, (255, 118, 0 )) +small_img.set_pixel(1, 3, (127, 0, 0 )) +small_img.set_pixel(2, 3, (0, 144, 255)) +small_img.set_pixel(3, 3, (50, 255, 195)) +#small_img.to_grayscale() +#small_img.to_bitmap() + +big_img = image.Image(128, 128, sensor.RGB565) +big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint) +#big_img.to_grayscale() +#big_img.to_bitmap() + +alpha_lut = image.Image(256, 1, sensor.GRAYSCALE) +for i in range(256): + alpha_lut.set_pixel(i, 0, 255 if i > 127 else 0) + +alpha_div = 1 +alpha_value = 0 +alpha_step = 2 + +x_bounce = sensor.width()//2 +x_bounce_toggle = 1 + +y_bounce = sensor.height()//2 +y_bounce_toggle = 1 + +clock = time.clock() +while(True): + clock.tick() + + img = sensor.snapshot() + #img.to_grayscale() + #img.to_bitmap() + img.draw_image(big_img, x_bounce, y_bounce, + rgb_channel=-1, alpha=alpha_value//alpha_div, + alpha_palette=alpha_lut, hint=hint|image.CENTER) + + x_bounce += x_bounce_toggle + if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle + + y_bounce += y_bounce_toggle + if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle + + alpha_value += alpha_step + if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_table_with_color_table_test.py b/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_table_with_color_table_test.py new file mode 100644 index 000000000..014a2c1ce --- /dev/null +++ b/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_table_with_color_table_test.py @@ -0,0 +1,85 @@ +# Image Drawing Color Table with Alpha Table Test +# +# This script tests the performance and quality of the draw_image() +# method which can perform nearest neighbor, bilinear, bicubic, and +# area scaling along with color channel extraction, alpha blending, +# color palette application, and alpha palette application. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) + +hint = image.BICUBIC # image.BILINEAR image.BICUBIC + +# RGB channel extraction is done after scaling normally, this +# may produce false colors. Set this flag to do it before. +# +hint |= 0 # image.EXTRACT_RGB_CHANNEL_FIRST + +# Color table application is done after scaling normally, this +# may produce false colors. Set this flag to do it before. +# +hint |= 0 # image.APPLY_COLOR_PALETTE_FIRST + +small_img = image.Image(4, 4, sensor.RGB565) +small_img.set_pixel(0, 0, (0, 0, 127)) +small_img.set_pixel(1, 0, (47, 255, 199)) +small_img.set_pixel(2, 0, (0, 188, 255)) +small_img.set_pixel(3, 0, (0, 0, 127)) +small_img.set_pixel(0, 1, (0, 176, 255)) +small_img.set_pixel(1, 1, (222, 0, 0 )) +small_img.set_pixel(2, 1, (50, 255, 195)) +small_img.set_pixel(3, 1, (86, 255, 160)) +small_img.set_pixel(0, 2, (255, 211, 0 )) +small_img.set_pixel(1, 2, (83, 255, 163)) +small_img.set_pixel(2, 2, (255, 211, 0)) +small_img.set_pixel(3, 2, (0, 80, 255)) +small_img.set_pixel(0, 3, (255, 118, 0 )) +small_img.set_pixel(1, 3, (127, 0, 0 )) +small_img.set_pixel(2, 3, (0, 144, 255)) +small_img.set_pixel(3, 3, (50, 255, 195)) +#small_img.to_grayscale() +#small_img.to_bitmap() + +big_img = image.Image(128, 128, sensor.RGB565) +big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint) +#big_img.to_grayscale() +#big_img.to_bitmap() + +alpha_lut = image.Image(256, 1, sensor.GRAYSCALE) +for i in range(256): + alpha_lut.set_pixel(i, 0, 255 if i > 127 else 0) + +alpha_div = 1 +alpha_value = 0 +alpha_step = 2 + +x_bounce = sensor.width()//2 +x_bounce_toggle = 1 + +y_bounce = sensor.height()//2 +y_bounce_toggle = 1 + +clock = time.clock() +while(True): + clock.tick() + + img = sensor.snapshot() + #img.to_grayscale() + #img.to_bitmap() + img.draw_image(big_img, x_bounce, y_bounce, + rgb_channel=-1, alpha=alpha_value//alpha_div, + color_palette=sensor.PALETTE_IRONBOW, alpha_palette=alpha_lut, hint=hint|image.CENTER) + + x_bounce += x_bounce_toggle + if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle + + y_bounce += y_bounce_toggle + if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle + + alpha_value += alpha_step + if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/image_drawing_scale_down_test.py b/scripts/examples/OpenMV/03-Drawing/image_drawing_scale_down_test.py new file mode 100644 index 000000000..7b4f9a67c --- /dev/null +++ b/scripts/examples/OpenMV/03-Drawing/image_drawing_scale_down_test.py @@ -0,0 +1,69 @@ +# Image Scaling Down Drawing Test +# +# This script tests the performance and quality of the draw_image() +# method which can perform nearest neighbor, bilinear, bicubic, and +# area scaling along with color channel extraction, alpha blending, +# color palette application, and alpha palette application. + +# DISABLE THE FRAME BUFFER TO SEE THE REAL FPS + +import sensor, image, time + +up_hint = 0 # image.BILINEAR image.BICUBIC +down_hint = image.AREA # image.BILINEAR image.BICUBIC image.AREA + +bounce_div = 128 + +medium_img = image.Image(32, 32, sensor.RGB565, copy_to_fb=True) +#medium_img.to_grayscale() +#medium_img.to_bitmap() + +small_img = image.Image(4, 4, sensor.RGB565) +small_img.set_pixel(0, 0, (0, 0, 127)) +small_img.set_pixel(1, 0, (47, 255, 199)) +small_img.set_pixel(2, 0, (0, 188, 255)) +small_img.set_pixel(3, 0, (0, 0, 127)) +small_img.set_pixel(0, 1, (0, 176, 255)) +small_img.set_pixel(1, 1, (222, 0, 0 )) +small_img.set_pixel(2, 1, (50, 255, 195)) +small_img.set_pixel(3, 1, (86, 255, 160)) +small_img.set_pixel(0, 2, (255, 211, 0 )) +small_img.set_pixel(1, 2, (83, 255, 163)) +small_img.set_pixel(2, 2, (255, 211, 0)) +small_img.set_pixel(3, 2, (0, 80, 255)) +small_img.set_pixel(0, 3, (255, 118, 0 )) +small_img.set_pixel(1, 3, (127, 0, 0 )) +small_img.set_pixel(2, 3, (0, 144, 255)) +small_img.set_pixel(3, 3, (50, 255, 195)) +#small_img.to_grayscale() +#small_img.to_bitmap() + +big_img = image.Image(128, 128, sensor.RGB565) +big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=up_hint) +#big_img.to_grayscale() +#big_img.to_bitmap() + +x_bounce = 0 +x_bounce_toggle = 0 + +y_bounce = 0 +y_bounce_toggle = 0 + +clock = time.clock() +while(True): + clock.tick() + + medium_img.clear() + medium_img.draw_image(big_img, + x_bounce // bounce_div, y_bounce // bounce_div, + x_scale=0.25, y_scale=0.25, + hint=down_hint) + sensor.flush() + + x_bounce += x_bounce_toggle + if abs(x_bounce // bounce_div) >= (medium_img.width()*1.1): x_bounce_toggle = -x_bounce_toggle + + y_bounce += y_bounce_toggle + if abs(y_bounce // bounce_div) >= (medium_img.height()*1.1): y_bounce_toggle = -y_bounce_toggle + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/image_drawing_scale_up_test.py b/scripts/examples/OpenMV/03-Drawing/image_drawing_scale_up_test.py new file mode 100644 index 000000000..92bcf91ee --- /dev/null +++ b/scripts/examples/OpenMV/03-Drawing/image_drawing_scale_up_test.py @@ -0,0 +1,63 @@ +# Image Scaling Up Drawing Test +# +# This script tests the performance and quality of the draw_image() +# method which can perform nearest neighbor, bilinear, bicubic, and +# area scaling along with color channel extraction, alpha blending, +# color palette application, and alpha palette application. + +# DISABLE THE FRAME BUFFER TO SEE THE REAL FPS + +import sensor, image, time + +hint = 0 # image.BILINEAR image.BICUBIC + +bounce_div = 32 + +big_img = image.Image(128, 128, sensor.RGB565, copy_to_fb=True) +#big_img.to_grayscale() +#big_img.to_bitmap() + +small_img = image.Image(4, 4, sensor.RGB565) +small_img.set_pixel(0, 0, (0, 0, 127)) +small_img.set_pixel(1, 0, (47, 255, 199)) +small_img.set_pixel(2, 0, (0, 188, 255)) +small_img.set_pixel(3, 0, (0, 0, 127)) +small_img.set_pixel(0, 1, (0, 176, 255)) +small_img.set_pixel(1, 1, (222, 0, 0 )) +small_img.set_pixel(2, 1, (50, 255, 195)) +small_img.set_pixel(3, 1, (86, 255, 160)) +small_img.set_pixel(0, 2, (255, 211, 0 )) +small_img.set_pixel(1, 2, (83, 255, 163)) +small_img.set_pixel(2, 2, (255, 211, 0)) +small_img.set_pixel(3, 2, (0, 80, 255)) +small_img.set_pixel(0, 3, (255, 118, 0 )) +small_img.set_pixel(1, 3, (127, 0, 0 )) +small_img.set_pixel(2, 3, (0, 144, 255)) +small_img.set_pixel(3, 3, (50, 255, 195)) +#small_img.to_grayscale() +#small_img.to_bitmap() + +x_bounce = 0 +x_bounce_toggle = 0 + +y_bounce = 0 +y_bounce_toggle = 0 + +clock = time.clock() +while(True): + clock.tick() + + big_img.clear() + big_img.draw_image(small_img, + x_bounce // bounce_div, y_bounce // bounce_div, + x_scale=32, y_scale=32, + hint=hint) + sensor.flush() + + x_bounce += x_bounce_toggle + if abs(x_bounce // bounce_div) >= (big_img.width()*1.1): x_bounce_toggle = -x_bounce_toggle + + y_bounce += y_bounce_toggle + if abs(y_bounce // bounce_div) >= (big_img.height()*1.1): y_bounce_toggle = -y_bounce_toggle + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/image_drawing_with_custom_palette.py b/scripts/examples/OpenMV/03-Drawing/image_drawing_with_custom_palette.py new file mode 100644 index 000000000..d379b3342 --- /dev/null +++ b/scripts/examples/OpenMV/03-Drawing/image_drawing_with_custom_palette.py @@ -0,0 +1,43 @@ +# Draw Image Example with custom color palette +# +# This example shows off how to draw images in the frame buffer with a custom generated color palette. + +import sensor, image, time, pyb + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) # or GRAYSCALE... +sensor.set_framesize(sensor.QQVGA) # or QQVGA... +sensor.skip_frames(time = 2000) +clock = time.clock() + +# the color palette is actually an image, this allows you to use image ops to create palettes +# the image must have 256 entries i.e. 256x1, 64x4, 16x16 and have the format rgb565 + +# Initialise palette source colors into an image +palette_source_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 0, 255)] +palette_source_color_image = image.Image(len(palette_source_colors), 1, sensor.RGB565) +for i, color in enumerate(palette_source_colors): + palette_source_color_image[i] = color + +# Scale the image to palette width and smooth them +palette = image.Image(256,1, sensor.RGB565) +palette.draw_image(palette_source_color_image, 0, 0, x_scale=palette.width() / palette_source_color_image.width()) +palette.mean(int(palette.width() / palette_source_color_image.width()/2)) + +while(True): + clock.tick() + + img = sensor.snapshot() + # Get a copy of grayscale image before converting to color + img_copy = img.copy() + + img.to_rgb565() + + palette_boundary_inset = int(sensor.width() / 40) + palette_scale_x = (sensor.width() - palette_boundary_inset * 2) / palette.width() + + img.draw_image(img_copy, 0, 0, color_palette=palette) + img.draw_image(palette, palette_boundary_inset, palette_boundary_inset, x_scale=palette_scale_x, y_scale=8) + img.draw_rectangle(palette_boundary_inset, palette_boundary_inset, int(palette.width()*palette_scale_x), 8, color=(255,255,255), thickness=1) + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/keypoints_drawing.py b/scripts/examples/OpenMV/03-Drawing/keypoints_drawing.py new file mode 100644 index 000000000..c41a8161d --- /dev/null +++ b/scripts/examples/OpenMV/03-Drawing/keypoints_drawing.py @@ -0,0 +1,31 @@ +# Keypoints Drawing +# +# This example shows off drawing keypoints on the OpenMV Cam. Usually you call draw_keypoints() +# on a keypoints object but you can also call it on a list of 3-value tuples... + +import sensor, image, time, pyb + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... +sensor.set_framesize(sensor.QVGA) # or QQVGA... +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + + img = sensor.snapshot() + + for i in range(20): + x = (pyb.rng() % (2*img.width())) - (img.width()//2) + y = (pyb.rng() % (2*img.height())) - (img.height()//2) + rot = pyb.rng() % 360 + + r = (pyb.rng() % 127) + 128 + g = (pyb.rng() % 127) + 128 + b = (pyb.rng() % 127) + 128 + + # This method draws a keypoints object or a list of (x, y, rot) tuples... + img.draw_keypoints([(x, y, rot)], color = (r, g, b), size = 20, thickness = 2, fill = False) + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/line_drawing.py b/scripts/examples/OpenMV/03-Drawing/line_drawing.py new file mode 100644 index 000000000..eb2d761bd --- /dev/null +++ b/scripts/examples/OpenMV/03-Drawing/line_drawing.py @@ -0,0 +1,31 @@ +# Line Drawing +# +# This example shows off drawing lines on the OpenMV Cam. + +import sensor, image, time, pyb + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... +sensor.set_framesize(sensor.QVGA) # or QQVGA... +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + + img = sensor.snapshot() + + for i in range(10): + x0 = (pyb.rng() % (2*img.width())) - (img.width()//2) + y0 = (pyb.rng() % (2*img.height())) - (img.height()//2) + x1 = (pyb.rng() % (2*img.width())) - (img.width()//2) + y1 = (pyb.rng() % (2*img.height())) - (img.height()//2) + r = (pyb.rng() % 127) + 128 + g = (pyb.rng() % 127) + 128 + b = (pyb.rng() % 127) + 128 + + # If the first argument is a scaler then this method expects + # to see x0, y0, x1, and y1. Otherwise, it expects a (x0,y0,x1,y1) tuple. + img.draw_line(x0, y0, x1, y1, color = (r, g, b), thickness = 2) + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/rectangle_drawing.py b/scripts/examples/OpenMV/03-Drawing/rectangle_drawing.py new file mode 100644 index 000000000..ab2afac66 --- /dev/null +++ b/scripts/examples/OpenMV/03-Drawing/rectangle_drawing.py @@ -0,0 +1,31 @@ +# Rectangle Drawing +# +# This example shows off drawing rectangles on the OpenMV Cam. + +import sensor, image, time, pyb + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... +sensor.set_framesize(sensor.QVGA) # or QQVGA... +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + + img = sensor.snapshot() + + for i in range(10): + x = (pyb.rng() % (2*img.width())) - (img.width()//2) + y = (pyb.rng() % (2*img.height())) - (img.height()//2) + w = (pyb.rng() % (img.width()//2)) + h = (pyb.rng() % (img.height()//2)) + r = (pyb.rng() % 127) + 128 + g = (pyb.rng() % 127) + 128 + b = (pyb.rng() % 127) + 128 + + # If the first argument is a scaler then this method expects + # to see x, y, w, and h. Otherwise, it expects a (x,y,w,h) tuple. + img.draw_rectangle(x, y, w, h, color = (r, g, b), thickness = 2, fill = False) + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/text_drawing.py b/scripts/examples/OpenMV/03-Drawing/text_drawing.py new file mode 100644 index 000000000..da37af656 --- /dev/null +++ b/scripts/examples/OpenMV/03-Drawing/text_drawing.py @@ -0,0 +1,33 @@ +# Text Drawing +# +# This example shows off drawing text on the OpenMV Cam. + +import sensor, image, time, pyb + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... +sensor.set_framesize(sensor.QVGA) # or QQVGA... +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + + img = sensor.snapshot() + + for i in range(10): + x = (pyb.rng() % (2*img.width())) - (img.width()//2) + y = (pyb.rng() % (2*img.height())) - (img.height()//2) + r = (pyb.rng() % 127) + 128 + g = (pyb.rng() % 127) + 128 + b = (pyb.rng() % 127) + 128 + + # If the first argument is a scaler then this method expects + # to see x, y, and text. Otherwise, it expects a (x,y,text) tuple. + + # Character and string rotation can be done at 0, 90, 180, 270, and etc. degrees. + img.draw_string(x, y, "Hello World!", color = (r, g, b), scale = 2, mono_space = False, + char_rotation = 0, char_hmirror = False, char_vflip = False, + string_rotation = 0, string_hmirror = False, string_vflip = False) + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/04-Image-Filters/adaptive_histogram_equalization.py b/scripts/examples/OpenMV/04-Image-Filters/adaptive_histogram_equalization.py new file mode 100644 index 000000000..a958b531e --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/adaptive_histogram_equalization.py @@ -0,0 +1,29 @@ +# Adaptive Histogram Equalization +# +# This example shows off how to use adaptive histogram equalization to improve +# the contrast in the image. Adaptive histogram equalization splits the image +# into regions and then equalizes the histogram in those regions to improve +# the image contrast versus a global histogram equalization. Additionally, +# you may specify a clip limit to prevent the contrast from going wild. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QQVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + + # A clip_limit of < 0 gives you normal adaptive histogram equalization + # which may result in huge amounts of contrast noise... + + # A clip_limit of 1 does nothing. For best results go slightly higher + # than 1 like below. The higher you go the closer you get back to + # standard adaptive histogram equalization with huge contrast swings. + + img = sensor.snapshot().histeq(adaptive=True, clip_limit=3) + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/04-Image-Filters/blur_filter.py b/scripts/examples/OpenMV/04-Image-Filters/blur_filter.py new file mode 100644 index 000000000..6074d2f2b --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/blur_filter.py @@ -0,0 +1,21 @@ +# Blur Filter Example +# +# This example shows off using the guassian filter to blur images. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # Run the kernel on every pixel of the image. + img.gaussian(1) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/cartoon_filter.py b/scripts/examples/OpenMV/04-Image-Filters/cartoon_filter.py new file mode 100644 index 000000000..c1703ca47 --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/cartoon_filter.py @@ -0,0 +1,29 @@ +# Cartoon Filter +# +# This example shows off a simple cartoon filter on images. The cartoon +# filter works by joining similar pixel areas of an image and replacing +# the pixels in those areas with the area mean. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... +sensor.set_framesize(sensor.QVGA) # or QQVGA... +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + + # seed_threshold controls the maximum area growth of a colored + # region. Making this larger will merge more pixels. + + # floating_threshold controls the maximum pixel-to-pixel difference + # when growing a region. Settings this very high will quickly combine + # all pixels in the image. You should keep this small. + + # cartoon() will grow regions while both thresholds are statisfied... + + img = sensor.snapshot().cartoon(seed_threshold=0.05, floating_thresholds=0.05) + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/04-Image-Filters/color_bilateral_filter.py b/scripts/examples/OpenMV/04-Image-Filters/color_bilateral_filter.py new file mode 100644 index 000000000..1bdbbb7eb --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/color_bilateral_filter.py @@ -0,0 +1,33 @@ +# Color Bilteral Filter Example +# +# This example shows off using the bilateral filter on color images. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # color_sigma controls how close color wise pixels have to be to each other to be + # blured togheter. A smaller value means they have to be closer. + # A larger value is less strict. + + # space_sigma controls how close space wise pixels have to be to each other to be + # blured togheter. A smaller value means they have to be closer. + # A larger value is less strict. + + # Run the kernel on every pixel of the image. + img.bilateral(3, color_sigma=0.1, space_sigma=1) + + # Note that the bilateral filter can introduce image defects if you set + # color_sigma/space_sigma to aggresively. Increase the sigma values until + # the defects go away if you see them. + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/color_binary_filter.py b/scripts/examples/OpenMV/04-Image-Filters/color_binary_filter.py new file mode 100644 index 000000000..b8686f7bc --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/color_binary_filter.py @@ -0,0 +1,61 @@ +# Color Binary Filter Example +# +# This script shows off the binary image filter. You may pass binary any +# number of thresholds to segment the image by. + +import sensor, image, time + +sensor.reset() +sensor.set_framesize(sensor.QVGA) +sensor.set_pixformat(sensor.RGB565) +sensor.skip_frames(time = 2000) +clock = time.clock() + +# Use the Tools -> Machine Vision -> Threshold Edtor to pick better thresholds. +red_threshold = (0,100, 0,127, 0,127) # L A B +green_threshold = (0,100, -128,0, 0,127) # L A B +blue_threshold = (0,100, -128,127, -128,0) # L A B + +while(True): + + # Test red threshold + for i in range(100): + clock.tick() + img = sensor.snapshot() + img.binary([red_threshold]) + print(clock.fps()) + + # Test green threshold + for i in range(100): + clock.tick() + img = sensor.snapshot() + img.binary([green_threshold]) + print(clock.fps()) + + # Test blue threshold + for i in range(100): + clock.tick() + img = sensor.snapshot() + img.binary([blue_threshold]) + print(clock.fps()) + + # Test not red threshold + for i in range(100): + clock.tick() + img = sensor.snapshot() + img.binary([red_threshold], invert = 1) + print(clock.fps()) + + # Test not green threshold + for i in range(100): + clock.tick() + img = sensor.snapshot() + img.binary([green_threshold], invert = 1) + print(clock.fps()) + + # Test not blue threshold + for i in range(100): + clock.tick() + img = sensor.snapshot() + img.binary([blue_threshold], invert = 1) + print(clock.fps()) diff --git a/scripts/examples/OpenMV/04-Image-Filters/color_light_removal.py b/scripts/examples/OpenMV/04-Image-Filters/color_light_removal.py new file mode 100644 index 000000000..270104cfb --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/color_light_removal.py @@ -0,0 +1,25 @@ +# Color Light Removal +# +# This example shows off how to remove bright lights from the image. +# You can do this using the binary() method with the "zero=" argument. +# +# Removing bright lights from the image allows you to now use +# histeq() on the image without outliers from oversaturated +# parts of the image breaking the algorithm... + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +thresholds = (90, 100, -128, 127, -128, 127) + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot().binary([thresholds], invert=False, zero=True) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/edge_filter.py b/scripts/examples/OpenMV/04-Image-Filters/edge_filter.py new file mode 100644 index 000000000..cdc03ddf3 --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/edge_filter.py @@ -0,0 +1,21 @@ +# Edge Filter Example +# +# This example shows off using the laplacian filter to detect edges. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # Run the kernel on every pixel of the image. + img.laplacian(1) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/erode_and_dilate.py b/scripts/examples/OpenMV/04-Image-Filters/erode_and_dilate.py new file mode 100644 index 000000000..06a6fde68 --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/erode_and_dilate.py @@ -0,0 +1,35 @@ +# Erode and Dilate Example +# +# This example shows off the erode and dilate functions which you can run on +# a binary image to remove noise. This example was originally a test but its +# useful for showing off how these functions work. + +import pyb, sensor, image + +sensor.reset() +sensor.set_framesize(sensor.QVGA) + +grayscale_thres = (170, 255) +rgb565_thres = (70, 100, -128, 127, -128, 127) + +while(True): + + sensor.set_pixformat(sensor.GRAYSCALE) + for i in range(20): + img = sensor.snapshot() + img.binary([grayscale_thres]) + img.erode(2) + for i in range(20): + img = sensor.snapshot() + img.binary([grayscale_thres]) + img.dilate(2) + + sensor.set_pixformat(sensor.RGB565) + for i in range(20): + img = sensor.snapshot() + img.binary([rgb565_thres]) + img.erode(2) + for i in range(20): + img = sensor.snapshot() + img.binary([rgb565_thres]) + img.dilate(2) diff --git a/scripts/examples/OpenMV/04-Image-Filters/gamma_correction.py b/scripts/examples/OpenMV/04-Image-Filters/gamma_correction.py new file mode 100644 index 000000000..2dd1137c5 --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/gamma_correction.py @@ -0,0 +1,21 @@ +# Gamma Correction +# +# This example shows off gamma correction to make the image brighter. The gamma +# correction method can also fix contrast and brightness too. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + + # Gamma, contrast, and brightness correction are applied to each color channel. The + # values are scaled to the range per color channel per image type... + img = sensor.snapshot().gamma_corr(gamma = 0.5, contrast = 1.0, brightness = 0.0) + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/04-Image-Filters/grayscale_bilateral_filter.py b/scripts/examples/OpenMV/04-Image-Filters/grayscale_bilateral_filter.py new file mode 100644 index 000000000..6b3a67b21 --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/grayscale_bilateral_filter.py @@ -0,0 +1,33 @@ +# Grayscale Bilteral Filter Example +# +# This example shows off using the bilateral filter on grayscale images. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # color_sigma controls how close color wise pixels have to be to each other to be + # blured togheter. A smaller value means they have to be closer. + # A larger value is less strict. + + # space_sigma controls how close space wise pixels have to be to each other to be + # blured togheter. A smaller value means they have to be closer. + # A larger value is less strict. + + # Run the kernel on every pixel of the image. + img.bilateral(3, color_sigma=0.1, space_sigma=1) + + # Note that the bilateral filter can introduce image defects if you set + # color_sigma/space_sigma to aggresively. Increase the sigma values until + # the defects go away if you see them. + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/grayscale_binary_filter.py b/scripts/examples/OpenMV/04-Image-Filters/grayscale_binary_filter.py new file mode 100644 index 000000000..dfaed5012 --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/grayscale_binary_filter.py @@ -0,0 +1,45 @@ +# Grayscale Binary Filter Example +# +# This script shows off the binary image filter. You may pass binary any +# number of thresholds to segment the image by. + +import sensor, image, time + +sensor.reset() +sensor.set_framesize(sensor.QVGA) +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.skip_frames(time = 2000) +clock = time.clock() + +low_threshold = (0, 50) +high_threshold = (205, 255) + +while(True): + + # Test low threshold + for i in range(100): + clock.tick() + img = sensor.snapshot() + img.binary([low_threshold]) + print(clock.fps()) + + # Test high threshold + for i in range(100): + clock.tick() + img = sensor.snapshot() + img.binary([high_threshold]) + print(clock.fps()) + + # Test not low threshold + for i in range(100): + clock.tick() + img = sensor.snapshot() + img.binary([low_threshold], invert = 1) + print(clock.fps()) + + # Test not high threshold + for i in range(100): + clock.tick() + img = sensor.snapshot() + img.binary([high_threshold], invert = 1) + print(clock.fps()) diff --git a/scripts/examples/OpenMV/04-Image-Filters/grayscale_light_removal.py b/scripts/examples/OpenMV/04-Image-Filters/grayscale_light_removal.py new file mode 100644 index 000000000..d42b8a8e9 --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/grayscale_light_removal.py @@ -0,0 +1,25 @@ +# Grayscale Light Removal +# +# This example shows off how to remove bright lights from the image. +# You can do this using the binary() method with the "zero=" argument. +# +# Removing bright lights from the image allows you to now use +# histeq() on the image without outliers from oversaturated +# parts of the image breaking the algorithm... + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +thresholds = (220, 255) + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot().binary([thresholds], invert=False, zero=True) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/histogram_equalization.py b/scripts/examples/OpenMV/04-Image-Filters/histogram_equalization.py new file mode 100644 index 000000000..2a3aece50 --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/histogram_equalization.py @@ -0,0 +1,19 @@ +# Histogram Equalization +# +# This example shows off how to use histogram equalization to improve +# the contrast in the image. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QQVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + + img = sensor.snapshot().histeq() + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/04-Image-Filters/kernel_filters.py b/scripts/examples/OpenMV/04-Image-Filters/kernel_filters.py new file mode 100644 index 000000000..9b9dd565f --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/kernel_filters.py @@ -0,0 +1,27 @@ +# Kernel Filtering Example +# +# This example shows off how to use a generic kernel filter. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +kernel_size = 1 # 3x3==1, 5x5==2, 7x7==3, etc. + +kernel = [-2, -1, 0, \ + -1, 1, 1, \ + 0, 1, 2] + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # Run the kernel on every pixel of the image. + img.morph(kernel_size, kernel) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/lens_correction.py b/scripts/examples/OpenMV/04-Image-Filters/lens_correction.py new file mode 100644 index 000000000..56066f583 --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/lens_correction.py @@ -0,0 +1,21 @@ +# Lens Correction +# +# This example shows off how to use the lens correction method to fix lens +# distortion in an image. You need to do this for qrcode / barcode / data matrix +# detection. Increase the strength below until lines are straight in the view. +# Zoom in (higher) or out (lower) until you see enough of the image. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + + img = sensor.snapshot().lens_corr(strength = 1.8, zoom = 1.0) + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/04-Image-Filters/linear_polar.py b/scripts/examples/OpenMV/04-Image-Filters/linear_polar.py new file mode 100644 index 000000000..98106542f --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/linear_polar.py @@ -0,0 +1,21 @@ +# Linear Polar Mapping Example +# +# This example shows off re-projecting the image using a linear polar +# transformation. Linear polar images are useful in that rotations +# become translations in the X direction and linear changes +# in scale become linear translations in the Y direction. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot().linpolar(reverse=False) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/log_polar.py b/scripts/examples/OpenMV/04-Image-Filters/log_polar.py new file mode 100644 index 000000000..d79f374b9 --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/log_polar.py @@ -0,0 +1,21 @@ +# Log Polar Mapping Example +# +# This example shows off re-projecting the image using a log polar +# transformation. Log polar images are useful in that rotations +# become translations in the X direction and exponential changes +# in scale (x2, x4, etc.) become linear translations in the Y direction. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot().logpolar(reverse=False) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/mean_adaptive_threshold_filter.py b/scripts/examples/OpenMV/04-Image-Filters/mean_adaptive_threshold_filter.py new file mode 100644 index 000000000..2d140ecc4 --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/mean_adaptive_threshold_filter.py @@ -0,0 +1,25 @@ +# Mean Adaptive Threshold Filter Example +# +# This example shows off mean filtering with adaptive thresholding. +# When mean(threshold=True) the mean() method adaptive thresholds the image +# by comparing the mean of the pixels around a pixel, minus an offset, with that pixel. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # The first argument is the kernel size. N coresponds to a ((N*2)+1)^2 + # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You + # shouldn't ever need to use a value bigger than 2. + img.mean(1, threshold=True, offset=5, invert=True) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/mean_filter.py b/scripts/examples/OpenMV/04-Image-Filters/mean_filter.py new file mode 100644 index 000000000..c6de0c81b --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/mean_filter.py @@ -0,0 +1,25 @@ +# Mean Filter Example +# +# This example shows off mean filtering. Mean filtering is your standard average +# filter in a NxN neighborhood. Mean filtering removes noise in the image by +# bluring everything. But, it's the fastest kernel filter operation. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # The only argument is the kernel size. N coresponds to a ((N*2)+1)^2 + # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You + # shouldn't ever need to use a value bigger than 2. + img.mean(1) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/median_adaptive_threshold_filter.py b/scripts/examples/OpenMV/04-Image-Filters/median_adaptive_threshold_filter.py new file mode 100644 index 000000000..673b28482 --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/median_adaptive_threshold_filter.py @@ -0,0 +1,27 @@ +# Median Adaptive Threshold Filter Example +# +# This example shows off median filtering with adaptive thresholding. +# When median(threshold=True) the median() method adaptive thresholds the image +# by comparing the median of the pixels around a pixel, minus an offset, with that pixel. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # The first argument to the median filter is the kernel size, it can be + # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. The second + # argument "percentile" is the percentile number to choose from the NxN + # neighborhood. 0.5 is the median, 0.25 is the lower quartile, and 0.75 + # would be the upper quartile. + img.median(1, percentile=0.5, threshold=True, offset=5, invert=True) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/median_filter.py b/scripts/examples/OpenMV/04-Image-Filters/median_filter.py new file mode 100644 index 000000000..441464a36 --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/median_filter.py @@ -0,0 +1,27 @@ +# Median Filter Example +# +# This example shows off median filtering. Median filtering replaces every pixel +# with the median value of it's NxN neighborhood. Median filtering is good for +# removing noise in the image while preserving edges. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # The first argument to the median filter is the kernel size, it can be + # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. The second + # argument "percentile" is the percentile number to choose from the NxN + # neighborhood. 0.5 is the median, 0.25 is the lower quartile, and 0.75 + # would be the upper quartile. + img.median(1, percentile=0.5) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/midpoint_adaptive_threshold_filter.py b/scripts/examples/OpenMV/04-Image-Filters/midpoint_adaptive_threshold_filter.py new file mode 100644 index 000000000..adaeaaa5d --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/midpoint_adaptive_threshold_filter.py @@ -0,0 +1,28 @@ +# Midpoint Adaptive Threshold Filter Example +# +# This example shows off midpoint filtering with adaptive thresholding. +# When midpoint(threshold=True) the midpoint() method adaptive thresholds the image +# by comparing the midpoint of the pixels around a pixel, minus an offset, with that pixel. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # The first argument is the kernel size. N coresponds to a ((N*2)+1)^2 + # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You + # shouldn't ever need to use a value bigger than 2. The "bias" argument + # lets you select between min and max blending. 0.5 == midpoint filter, + # 0.0 == min filter, and 1.0 == max filter. Note that the min filter + # makes images darker while the max filter makes images lighter. + img.midpoint(1, bias=0.5, threshold=True, offset=5, invert=True) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/midpoint_filter.py b/scripts/examples/OpenMV/04-Image-Filters/midpoint_filter.py new file mode 100644 index 000000000..ee9ab5d94 --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/midpoint_filter.py @@ -0,0 +1,27 @@ +# Midpoint Filter Example +# +# This example shows off midpoint filtering. Midpoint filtering replaces each +# pixel by the average of the min and max pixel values for a NxN neighborhood. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # The first argument is the kernel size. N coresponds to a ((N*2)+1)^2 + # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You + # shouldn't ever need to use a value bigger than 2. The "bias" argument + # lets you select between min and max blending. 0.5 == midpoint filter, + # 0.0 == min filter, and 1.0 == max filter. Note that the min filter + # makes images darker while the max filter makes images lighter. + img.midpoint(1, bias=0.5) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/mode_adaptive_threshold_filter.py b/scripts/examples/OpenMV/04-Image-Filters/mode_adaptive_threshold_filter.py new file mode 100644 index 000000000..8ab9a0675 --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/mode_adaptive_threshold_filter.py @@ -0,0 +1,25 @@ +# Mode Adaptive Threshold Filter Example +# +# This example shows off mode filtering with adaptive thresholding. +# When mode(threshold=True) the mode() method adaptive thresholds the image +# by comparing the mode of the pixels around a pixel, minus an offset, with that pixel. +# Avoid using the mode filter on RGB565 images. It will cause artifacts on image edges... + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # The only argument to the median filter is the kernel size, it can be + # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. + img.mode(1, threshold=True, offset=5, invert=True) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/mode_filter.py b/scripts/examples/OpenMV/04-Image-Filters/mode_filter.py new file mode 100644 index 000000000..170937c58 --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/mode_filter.py @@ -0,0 +1,25 @@ +# Mode Filter Example +# +# This example shows off mode filtering. Mode filtering is a highly non-linear +# operation which replaces each pixel with the mode of the NxN neighborhood +# of pixels around it. Avoid using the mode filter on RGB565 images. It will +# cause artifacts on image edges... + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # The only argument to the median filter is the kernel size, it can be + # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. + img.mode(1) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/negative.py b/scripts/examples/OpenMV/04-Image-Filters/negative.py new file mode 100644 index 000000000..36186cba9 --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/negative.py @@ -0,0 +1,19 @@ +# Negative Example +# +# This example shows off negating the image. This is not a particularly +# useful method but it can come in handy once in a while. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot().negate() + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/perspective_and_rotation_correction.py b/scripts/examples/OpenMV/04-Image-Filters/perspective_and_rotation_correction.py new file mode 100644 index 000000000..b89117d40 --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/perspective_and_rotation_correction.py @@ -0,0 +1,71 @@ +# Rotation Correction +# +# This example shows off how to use the rotation_corr() to both correct for +# perspective distortion and then to rotate the new corrected image in 3D +# space aftwards to handle movement. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +# The image will be warped such that the following points become the new: +# +# (0, 0) +# (w-1, 0) +# (w-1, h-1) +# (0, h-1) +# +# Try setting the points below to the corners of a quadrilateral +# (in clock-wise order) in the field-of-view. You can get points +# on the image by clicking and dragging on the frame buffer and +# recording the values shown in the histogram widget. + +w = sensor.width() +h = sensor.height() + +TARGET_POINTS = [(0, 0), # (x, y) CHANGE ME! + (w-1, 0), # (x, y) CHANGE ME! + (w-1, h-1), # (x, y) CHANGE ME! + (0, h-1)] # (x, y) CHANGE ME! + +# Degrees per frame to rotation by... +X_ROTATION_DEGREE_RATE = 5 +Y_ROTATION_DEGREE_RATE = 0.5 +Z_ROTATION_DEGREE_RATE = 0 +X_OFFSET = 0 +Y_OFFSET = 0 + +ZOOM_AMOUNT = 1 # Lower zooms out - Higher zooms in. +FOV_WINDOW = 25 # Between 0 and 180. Represents the field-of-view of the scene + # window when rotating the image in 3D space. When closer to + # zero results in lines becoming straighter as the window + # moves away from the image being rotated in 3D space. A large + # value moves the window closer to the image in 3D space which + # results in the more perspective distortion and sometimes + # the image in 3D intersecting the scene window. + +x_rotation_counter = 0 +y_rotation_counter = 0 +z_rotation_counter = 0 + +while(True): + clock.tick() + + img = sensor.snapshot().rotation_corr(x_rotation = x_rotation_counter, \ + y_rotation = y_rotation_counter, \ + z_rotation = z_rotation_counter, \ + x_translation = X_OFFSET, \ + y_translation = Y_OFFSET, \ + zoom = ZOOM_AMOUNT, \ + fov = FOV_WINDOW, \ + corners = TARGET_POINTS) + + x_rotation_counter += X_ROTATION_DEGREE_RATE + y_rotation_counter += Y_ROTATION_DEGREE_RATE + z_rotation_counter += Z_ROTATION_DEGREE_RATE + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/04-Image-Filters/perspective_correction.py b/scripts/examples/OpenMV/04-Image-Filters/perspective_correction.py new file mode 100644 index 000000000..a8f2aa58d --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/perspective_correction.py @@ -0,0 +1,39 @@ +# Perspective Correction +# +# This example shows off how to use the rotation_corr() to fix perspective +# issues related to how your OpenMV Cam is mounted. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +# The image will be warped such that the following points become the new: +# +# (0, 0) +# (w-1, 0) +# (w-1, h-1) +# (0, h-1) +# +# Try setting the points below to the corners of a quadrilateral +# (in clock-wise order) in the field-of-view. You can get points +# on the image by clicking and dragging on the frame buffer and +# recording the values shown in the histogram widget. + +w = sensor.width() +h = sensor.height() + +TARGET_POINTS = [(0, 0), # (x, y) CHANGE ME! + (w-1, 0), # (x, y) CHANGE ME! + (w-1, h-1), # (x, y) CHANGE ME! + (0, h-1)] # (x, y) CHANGE ME! + +while(True): + clock.tick() + + img = sensor.snapshot().rotation_corr(corners = TARGET_POINTS) + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/04-Image-Filters/rotation_correction.py b/scripts/examples/OpenMV/04-Image-Filters/rotation_correction.py new file mode 100644 index 000000000..b95e41d78 --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/rotation_correction.py @@ -0,0 +1,49 @@ +# Rotation Correction +# +# This example shows off how to use the rotation_corr() to play with the scene +# window your OpenMV Cam sees. + +import sensor, image, time + +# Degrees per frame to rotation by... +X_ROTATION_DEGREE_RATE = 5 +Y_ROTATION_DEGREE_RATE = 0.5 +Z_ROTATION_DEGREE_RATE = 0 +X_OFFSET = 0 +Y_OFFSET = 0 + +ZOOM_AMOUNT = 1 # Lower zooms out - Higher zooms in. +FOV_WINDOW = 60 # Between 0 and 180. Represents the field-of-view of the scene + # window when rotating the image in 3D space. When closer to + # zero results in lines becoming straighter as the window + # moves away from the image being rotated in 3D space. A large + # value moves the window closer to the image in 3D space which + # results in the more perspective distortion and sometimes + # the image in 3D intersecting the scene window. + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +x_rotation_counter = 0 +y_rotation_counter = 0 +z_rotation_counter = 0 + +while(True): + clock.tick() + + img = sensor.snapshot().rotation_corr(x_rotation = x_rotation_counter, \ + y_rotation = y_rotation_counter, \ + z_rotation = z_rotation_counter, \ + x_translation = X_OFFSET, \ + y_translation = Y_OFFSET, \ + zoom = ZOOM_AMOUNT, \ + fov = FOV_WINDOW) + + x_rotation_counter += X_ROTATION_DEGREE_RATE + y_rotation_counter += Y_ROTATION_DEGREE_RATE + z_rotation_counter += Z_ROTATION_DEGREE_RATE + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/04-Image-Filters/sharpen_filter.py b/scripts/examples/OpenMV/04-Image-Filters/sharpen_filter.py new file mode 100644 index 000000000..0f541e203 --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/sharpen_filter.py @@ -0,0 +1,21 @@ +# Sharpen Filter Example +# +# This example shows off using the laplacian filter to sharpen images. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # Run the kernel on every pixel of the image. + img.laplacian(1, sharpen=True) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/ulab.py b/scripts/examples/OpenMV/04-Image-Filters/ulab.py new file mode 100644 index 000000000..8af9d1a38 --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/ulab.py @@ -0,0 +1,18 @@ +# Ulab is a numpy-like module for micropython, meant to simplify and speed up common +# mathematical operations on arrays. This basic example shows mean/std on an image. +# +# NOTE: ndarrays cause the heap to be fragmented easily. If you run out of memory, +# there's not much that can be done about it, lowering the resolution might help. + +import sensor, image, time, ulab as np +from ulab import numerical + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QQVGA) # Set frame size to QVGA (320x240) +clock = time.clock() # Create a clock object to track the FPS. + +while (True): + img = sensor.snapshot() # Take a picture and return the image. + a = np.array(img, dtype=np.uint8) + print("mean: %d std:%d"%(numerical.mean(a), numerical.std(a))) diff --git a/scripts/examples/OpenMV/04-Image-Filters/unsharp_filter.py b/scripts/examples/OpenMV/04-Image-Filters/unsharp_filter.py new file mode 100644 index 000000000..eb8eb2270 --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/unsharp_filter.py @@ -0,0 +1,21 @@ +# Unsharp Filter Example +# +# This example shows off using the guassian filter to unsharp mask filter images. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # Run the kernel on every pixel of the image. + img.gaussian(1, unsharp=True) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/vflip_hmirror_transpose.py b/scripts/examples/OpenMV/04-Image-Filters/vflip_hmirror_transpose.py new file mode 100644 index 000000000..bf43c4e73 --- /dev/null +++ b/scripts/examples/OpenMV/04-Image-Filters/vflip_hmirror_transpose.py @@ -0,0 +1,33 @@ +# Vertical Flip - Horizontal Mirror - Transpose +# +# This example shows off how to vertically flip, horizontally mirror, or +# transpose an image. Note that: +# +# vflip=False, hmirror=False, transpose=False -> 0 degree rotation +# vflip=True, hmirror=False, transpose=True -> 90 degree rotation +# vflip=True, hmirror=True, transpose=False -> 180 degree rotation +# vflip=False, hmirror=True, transpose=True -> 270 degree rotation + +import sensor, image, time, pyb + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +mills = pyb.millis() +counter = 0 + +while(True): + clock.tick() + + img = sensor.snapshot().replace(vflip=(counter//2)%2, + hmirror=(counter//4)%2, + transpose=(counter//8)%2) + + if (pyb.millis() > (mills + 1000)): + mills = pyb.millis() + counter += 1 + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/05-Snapshot/emboss_snapshot.py b/scripts/examples/OpenMV/05-Snapshot/emboss_snapshot.py new file mode 100644 index 000000000..69bc105c6 --- /dev/null +++ b/scripts/examples/OpenMV/05-Snapshot/emboss_snapshot.py @@ -0,0 +1,33 @@ +# Emboss Snapshot Example +# +# Note: You will need an SD card to run this example. +# +# You can use your OpenMV Cam to save modified image files. + +import sensor, image, pyb + +RED_LED_PIN = 1 +BLUE_LED_PIN = 3 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. + +pyb.LED(RED_LED_PIN).on() +sensor.skip_frames(time = 2000) # Give the user time to get ready. + +pyb.LED(RED_LED_PIN).off() +pyb.LED(BLUE_LED_PIN).on() + +print("You're on camera!") +img = sensor.snapshot() + +img.morph(1, [+2, +1, +0,\ + +1, +1, -1,\ + +0, -1, -2]) # Emboss the image. + +img.save("example.jpg") # or "example.bmp" (or others) + +pyb.LED(BLUE_LED_PIN).off() +print("Done! Reset the camera to see the saved image.") diff --git a/scripts/examples/OpenMV/05-Snapshot/snapshot.py b/scripts/examples/OpenMV/05-Snapshot/snapshot.py new file mode 100644 index 000000000..8eb621e6d --- /dev/null +++ b/scripts/examples/OpenMV/05-Snapshot/snapshot.py @@ -0,0 +1,27 @@ +# Snapshot Example +# +# Note: You will need an SD card to run this example. +# +# You can use your OpenMV Cam to save image files. + +import sensor, image, pyb + +RED_LED_PIN = 1 +BLUE_LED_PIN = 3 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. + +pyb.LED(RED_LED_PIN).on() +sensor.skip_frames(time = 2000) # Give the user time to get ready. + +pyb.LED(RED_LED_PIN).off() +pyb.LED(BLUE_LED_PIN).on() + +print("You're on camera!") +sensor.snapshot().save("example.jpg") # or "example.bmp" (or others) + +pyb.LED(BLUE_LED_PIN).off() +print("Done! Reset the camera to see the saved image.") diff --git a/scripts/examples/OpenMV/05-Snapshot/snapshot_on_face_detection.py b/scripts/examples/OpenMV/05-Snapshot/snapshot_on_face_detection.py new file mode 100644 index 000000000..a716df263 --- /dev/null +++ b/scripts/examples/OpenMV/05-Snapshot/snapshot_on_face_detection.py @@ -0,0 +1,51 @@ +# Snapshot on Face Detection Example +# +# Note: You will need an SD card to run this example. +# +# This example demonstrates using face tracking on your OpenMV Cam to take a +# picture. + +import sensor, image, pyb + +RED_LED_PIN = 1 +BLUE_LED_PIN = 3 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.HQVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. + +# Load up a face detection HaarCascade. This is object that your OpenMV Cam +# can use to detect faces using the find_features() method below. Your OpenMV +# Cam has fontalface HaarCascade built-in. By default, all the stages of the +# HaarCascade are loaded. However, You can adjust the number of stages to speed +# up processing at the expense of accuracy. The frontalface HaarCascade has 25 +# stages. +face_cascade = image.HaarCascade("frontalface", stages=25) + +while(True): + + pyb.LED(RED_LED_PIN).on() + print("About to start detecting faces...") + sensor.skip_frames(time = 2000) # Give the user time to get ready. + + pyb.LED(RED_LED_PIN).off() + print("Now detecting faces!") + pyb.LED(BLUE_LED_PIN).on() + + diff = 10 # We'll say we detected a face after 10 frames. + while(diff): + img = sensor.snapshot() + # Threshold can be between 0.0 and 1.0. A higher threshold results in a + # higher detection rate with more false positives. The scale value + # controls the matching scale allowing you to detect smaller faces. + faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5) + + if faces: + diff -= 1 + for r in faces: + img.draw_rectangle(r) + + pyb.LED(BLUE_LED_PIN).off() + print("Face detected! Saving image...") + sensor.snapshot().save("snapshot-%d.jpg" % pyb.rng()) # Save Pic. diff --git a/scripts/examples/OpenMV/05-Snapshot/snapshot_on_movement.py b/scripts/examples/OpenMV/05-Snapshot/snapshot_on_movement.py new file mode 100644 index 000000000..4b0a625e1 --- /dev/null +++ b/scripts/examples/OpenMV/05-Snapshot/snapshot_on_movement.py @@ -0,0 +1,45 @@ +# Snapshot on Movement Example +# +# Note: You will need an SD card to run this example. +# +# This example demonstrates using frame differencing with your OpenMV Cam to do +# motion detection. After motion is detected your OpenMV Cam will take picture. + +import sensor, image, pyb, os + +RED_LED_PIN = 1 +BLUE_LED_PIN = 3 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +sensor.set_auto_whitebal(False) # Turn off white balance. + +if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory + +while(True): + + pyb.LED(RED_LED_PIN).on() + print("About to save background image...") + sensor.skip_frames(time = 2000) # Give the user time to get ready. + + pyb.LED(RED_LED_PIN).off() + sensor.snapshot().save("temp/bg.bmp") + print("Saved background image - Now detecting motion!") + pyb.LED(BLUE_LED_PIN).on() + + diff = 10 # We'll say we detected motion after 10 frames of motion. + while(diff): + img = sensor.snapshot() + img.difference("temp/bg.bmp") + stats = img.statistics() + # Stats 5 is the max of the lighting color channel. The below code + # triggers when the lighting max for the whole image goes above 20. + # The lighting difference maximum should be zero normally. + if (stats[5] > 20): + diff -= 1 + + pyb.LED(BLUE_LED_PIN).off() + print("Movement detected! Saving image...") + sensor.snapshot().save("temp/snapshot-%d.jpg" % pyb.rng()) # Save Pic. diff --git a/scripts/examples/OpenMV/05-Snapshot/time_lapse_photos.py b/scripts/examples/OpenMV/05-Snapshot/time_lapse_photos.py new file mode 100644 index 000000000..be6c370dd --- /dev/null +++ b/scripts/examples/OpenMV/05-Snapshot/time_lapse_photos.py @@ -0,0 +1,67 @@ +# Time Lapse Photos (Credit nedhorning) +# +# This example shows off how to take time lapse photos using your OpenMV +# Cam and using the RTC module along with a timer interrupt to achieve +# very low power operation. +# +# Note that if the USB is still plugged in when the camera is taking +# pictures it will run the bootloader each time. Please power the camera +# from something other than USB to not have the bootloader run. + +import pyb, machine, sensor, image, pyb, os + +# Create and init RTC object. This will allow us to set the current time for +# the RTC and let us set an interrupt to wake up later on. +rtc = pyb.RTC() +newFile = False + +try: + os.stat('time.txt') +except OSError: # If the log file doesn't exist then set the RTC and set newFile to True + # datetime format: year, month, day, weekday (Monday=1, Sunday=7), + # hours (24 hour clock), minutes, seconds, subseconds (counds down from 255 to 0) + rtc.datetime((2018, 3, 9, 5, 13, 0, 0, 0)) + newFile = True + +# Extract the date and time from the RTC object. +dateTime = rtc.datetime() +year = str(dateTime[0]) +month = '%02d' % dateTime[1] +day = '%02d' % dateTime[2] +hour = '%02d' % dateTime[4] +minute = '%02d' % dateTime[5] +second = '%02d' % dateTime[6] +subSecond = str(dateTime[7]) + +newName='I'+year+month+day+hour+minute+second # Image file name based on RTC + +# Enable RTC interrupts every 10 seconds, camera will RESET after wakeup from deepsleep Mode. +rtc.wakeup(10000) + +BLUE_LED_PIN = 3 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.VGA) +sensor.skip_frames(time = 1000) # Let new settings take affect. + +# Let folks know we are about to take a picture. +pyb.LED(BLUE_LED_PIN).on() + +if(newFile): # If log file does not exist then create it. + with open('time.txt', 'a') as timeFile: # Write text file to keep track of date, time and image number. + timeFile.write('Date and time format: year, month, day, hours, minutes, seconds, subseconds' + '\n') + timeFile.write(newName + ',' + year + ',' + month + ',' + day + ',' + hour + ',' + minute + ',' + second + ',' + subSecond + '\n') +else: + with open('time.txt', 'a') as timeFile: # Append to date, time and image number to text file. + timeFile.write(newName + ',' + year + ',' + month + ',' + day + ',' + hour + ',' + minute + ',' + second + ',' + subSecond + '\n') + +if not "images" in os.listdir(): os.mkdir("images") # Make a temp directory + +# Take photo and save to SD card +img = sensor.snapshot() +img.save('images/' + newName, quality=90) +pyb.LED(BLUE_LED_PIN).off() + +# Enter Deepsleep Mode (i.e. the OpenMV Cam effectively turns itself off except for the RTC). +machine.deepsleep() diff --git a/scripts/examples/OpenMV/06-Video-Recording/gif.py b/scripts/examples/OpenMV/06-Video-Recording/gif.py new file mode 100644 index 000000000..35f0933d7 --- /dev/null +++ b/scripts/examples/OpenMV/06-Video-Recording/gif.py @@ -0,0 +1,37 @@ +# GIF Video Recording Example +# +# Note: You will need an SD card to run this example. +# +# You can use your OpenMV Cam to record gif files. You can either feed the +# recorder object RGB565 frames or Grayscale frames. Use photo editing software +# like GIMP to compress and optimize the Gif before uploading it to the web. + +import sensor, image, time, gif, pyb + +RED_LED_PIN = 1 +BLUE_LED_PIN = 3 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +pyb.LED(RED_LED_PIN).on() +sensor.skip_frames(time = 2000) # Give the user time to get ready. + +pyb.LED(RED_LED_PIN).off() +pyb.LED(BLUE_LED_PIN).on() + +g = gif.Gif("example.gif", loop=True) + +print("You're on camera!") +for i in range(100): + clock.tick() + # clock.avg() returns the milliseconds between frames - gif delay is in + g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds. + print(clock.fps()) + +g.close() +pyb.LED(BLUE_LED_PIN).off() +print("Done! Reset the camera to see the saved recording.") diff --git a/scripts/examples/OpenMV/06-Video-Recording/gif_on_face_detection.py b/scripts/examples/OpenMV/06-Video-Recording/gif_on_face_detection.py new file mode 100644 index 000000000..0732ca1c8 --- /dev/null +++ b/scripts/examples/OpenMV/06-Video-Recording/gif_on_face_detection.py @@ -0,0 +1,65 @@ +# GIF Video Recording on Face Detection Example +# +# Note: You will need an SD card to run this example. +# +# You can use your OpenMV Cam to record gif files. You can either feed the +# recorder object RGB565 frames or Grayscale frames. Use photo editing software +# like GIMP to compress and optimize the Gif before uploading it to the web. +# +# This example demonstrates using face tracking on your OpenMV Cam to take a +# gif. + +import sensor, image, time, gif, pyb + +RED_LED_PIN = 1 +BLUE_LED_PIN = 3 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor. +sensor.set_framesize(sensor.QQVGA) # or sensor.HQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. + +# Load up a face detection HaarCascade. This is object that your OpenMV Cam +# can use to detect faces using the find_features() method below. Your OpenMV +# Cam has fontalface HaarCascade built-in. By default, all the stages of the +# HaarCascade are loaded. However, You can adjust the number of stages to speed +# up processing at the expense of accuracy. The frontalface HaarCascade has 25 +# stages. +face_cascade = image.HaarCascade("frontalface", stages=25) + +while(True): + + pyb.LED(RED_LED_PIN).on() + print("About to start detecting faces...") + sensor.skip_frames(time = 2000) # Give the user time to get ready. + + pyb.LED(RED_LED_PIN).off() + print("Now detecting faces!") + pyb.LED(BLUE_LED_PIN).on() + + diff = 10 # We'll say we detected a face after 10 frames. + while(diff): + img = sensor.snapshot() + # Threshold can be between 0.0 and 1.0. A higher threshold results in a + # higher detection rate with more false positives. The scale value + # controls the matching scale allowing you to detect smaller faces. + faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5) + + if faces: + diff -= 1 + for r in faces: + img.draw_rectangle(r) + + g = gif.Gif("example-%d.gif" % pyb.rng(), loop=True) + + clock = time.clock() # Tracks FPS. + print("You're on camera!") + for i in range(100): + clock.tick() + # clock.avg() returns the milliseconds between frames - gif delay is in + g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds. + print(clock.fps()) + + g.close() + pyb.LED(BLUE_LED_PIN).off() + print("Restarting...") diff --git a/scripts/examples/OpenMV/06-Video-Recording/gif_on_movement.py b/scripts/examples/OpenMV/06-Video-Recording/gif_on_movement.py new file mode 100644 index 000000000..4cf3fbae6 --- /dev/null +++ b/scripts/examples/OpenMV/06-Video-Recording/gif_on_movement.py @@ -0,0 +1,59 @@ +# GIF Video Recording on Movement Example +# +# Note: You will need an SD card to run this example. +# +# You can use your OpenMV Cam to record gif files. You can either feed the +# recorder object RGB565 frames or Grayscale frames. Use photo editing software +# like GIMP to compress and optimize the Gif before uploading it to the web. +# +# This example demonstrates using frame differencing with your OpenMV Cam to do +# motion detection. After motion is detected your OpenMV Cam will take video. + +import sensor, image, time, gif, pyb, os + +RED_LED_PIN = 1 +BLUE_LED_PIN = 3 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +sensor.set_auto_whitebal(False) # Turn off white balance. + +if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory + +while(True): + + pyb.LED(RED_LED_PIN).on() + print("About to save background image...") + sensor.skip_frames(time = 2000) # Give the user time to get ready. + + pyb.LED(RED_LED_PIN).off() + sensor.snapshot().save("temp/bg.bmp") + print("Saved background image - Now detecting motion!") + pyb.LED(BLUE_LED_PIN).on() + + diff = 10 # We'll say we detected motion after 10 frames of motion. + while(diff): + img = sensor.snapshot() + img.difference("temp/bg.bmp") + stats = img.statistics() + # Stats 5 is the max of the lighting color channel. The below code + # triggers when the lighting max for the whole image goes above 20. + # The lighting difference maximum should be zero normally. + if (stats[5] > 20): + diff -= 1 + + g = gif.Gif("example-%d.gif" % pyb.rng(), loop=True) + + clock = time.clock() # Tracks FPS. + print("You're on camera!") + for i in range(100): + clock.tick() + # clock.avg() returns the milliseconds between frames - gif delay is in + g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds. + print(clock.fps()) + + g.close() + pyb.LED(BLUE_LED_PIN).off() + print("Restarting...") diff --git a/scripts/examples/OpenMV/06-Video-Recording/image_reader.py b/scripts/examples/OpenMV/06-Video-Recording/image_reader.py new file mode 100644 index 000000000..4cecf3f51 --- /dev/null +++ b/scripts/examples/OpenMV/06-Video-Recording/image_reader.py @@ -0,0 +1,28 @@ +# Image Reader Example +# +# USE THIS EXAMPLE WITH A USD CARD! +# +# This example shows how to use the Image Reader object to replay snapshots of what your +# OpenMV Cam saw saved by the Image Writer object for testing machine vision algorithms. + +# Altered to allow full speed reading from SD card for extraction of sequences to the network etc. +# Set the new pause parameter to false + +import sensor, image, time + +snapshot_source = False # Set to true once finished to pull data from sensor. + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QQVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +img_reader = None if snapshot_source else image.ImageReader("/stream.bin") + +while(True): + clock.tick() + img = sensor.snapshot() if snapshot_source else img_reader.next_frame(copy_to_fb=True, loop=True, pause=True) + # Do machine vision algorithms on the image here. + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/06-Video-Recording/image_writer.py b/scripts/examples/OpenMV/06-Video-Recording/image_writer.py new file mode 100644 index 000000000..530ba3a19 --- /dev/null +++ b/scripts/examples/OpenMV/06-Video-Recording/image_writer.py @@ -0,0 +1,43 @@ +# Image Writer Example +# +# USE THIS EXAMPLE WITH A USD CARD! Reset the camera after recording to see the file. +# +# This example shows how to use the Image Writer object to record snapshots of what your +# OpenMV Cam sees for later analysis using the Image Reader object. Images written to disk +# by the Image Writer object are stored in a simple file format readable by your OpenMV Cam. + +import sensor, image, pyb, time + +record_time = 10000 # 10 seconds in milliseconds + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QQVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +img_writer = image.ImageWriter("/stream.bin") + +# Red LED on means we are capturing frames. +red_led = pyb.LED(1) +red_led.on() + +start = pyb.millis() +while pyb.elapsed_millis(start) < record_time: + clock.tick() + img = sensor.snapshot() + # Modify the image if you feel like here... + + img_writer.add_frame(img) + print(clock.fps()) + +img_writer.close() + +# Blue LED on means we are done. +red_led.off() +blue_led = pyb.LED(3) +blue_led.on() + +print("Done") +while(True): + pyb.wfi() diff --git a/scripts/examples/OpenMV/06-Video-Recording/mjpeg.py b/scripts/examples/OpenMV/06-Video-Recording/mjpeg.py new file mode 100644 index 000000000..1e3f732e4 --- /dev/null +++ b/scripts/examples/OpenMV/06-Video-Recording/mjpeg.py @@ -0,0 +1,37 @@ +# MJPEG Video Recording Example +# +# Note: You will need an SD card to run this demo. +# +# You can use your OpenMV Cam to record mjpeg files. You can either feed the +# recorder object JPEG frames or RGB565/Grayscale frames. Once you've finished +# recording a Mjpeg file you can use VLC to play it. If you are on Ubuntu then +# the built-in video player will work too. + +import sensor, image, time, mjpeg, pyb + +RED_LED_PIN = 1 +BLUE_LED_PIN = 3 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +pyb.LED(RED_LED_PIN).on() +sensor.skip_frames(time = 2000) # Give the user time to get ready. + +pyb.LED(RED_LED_PIN).off() +pyb.LED(BLUE_LED_PIN).on() + +m = mjpeg.Mjpeg("example.mjpeg") + +print("You're on camera!") +for i in range(200): + clock.tick() + m.add_frame(sensor.snapshot()) + print(clock.fps()) + +m.close(clock.fps()) +pyb.LED(BLUE_LED_PIN).off() +print("Done! Reset the camera to see the saved recording.") diff --git a/scripts/examples/OpenMV/06-Video-Recording/mjpeg_on_face_detection.py b/scripts/examples/OpenMV/06-Video-Recording/mjpeg_on_face_detection.py new file mode 100644 index 000000000..fd567a4f6 --- /dev/null +++ b/scripts/examples/OpenMV/06-Video-Recording/mjpeg_on_face_detection.py @@ -0,0 +1,65 @@ +# MJPEG Video Recording on Face Detection Example +# +# Note: You will need an SD card to run this example. +# +# You can use your OpenMV Cam to record mjpeg files. You can either feed the +# recorder object JPEG frames or RGB565/Grayscale frames. Once you've finished +# recording a Mjpeg file you can use VLC to play it. If you are on Ubuntu then +# the built-in video player will work too. +# +# This example demonstrates using face tracking on your OpenMV Cam to take a +# mjpeg. + +import sensor, image, time, mjpeg, pyb + +RED_LED_PIN = 1 +BLUE_LED_PIN = 3 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor. +sensor.set_framesize(sensor.QQVGA) # or sensor.HQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. + +# Load up a face detection HaarCascade. This is object that your OpenMV Cam +# can use to detect faces using the find_features() method below. Your OpenMV +# Cam has fontalface HaarCascade built-in. By default, all the stages of the +# HaarCascade are loaded. However, You can adjust the number of stages to speed +# up processing at the expense of accuracy. The frontalface HaarCascade has 25 +# stages. +face_cascade = image.HaarCascade("frontalface", stages=25) + +while(True): + + pyb.LED(RED_LED_PIN).on() + print("About to start detecting faces...") + sensor.skip_frames(time = 2000) # Give the user time to get ready. + + pyb.LED(RED_LED_PIN).off() + print("Now detecting faces!") + pyb.LED(BLUE_LED_PIN).on() + + diff = 10 # We'll say we detected a face after 10 frames. + while(diff): + img = sensor.snapshot() + # Threshold can be between 0.0 and 1.0. A higher threshold results in a + # higher detection rate with more false positives. The scale value + # controls the matching scale allowing you to detect smaller faces. + faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5) + + if faces: + diff -= 1 + for r in faces: + img.draw_rectangle(r) + + m = mjpeg.Mjpeg("example-%d.mjpeg" % pyb.rng()) + + clock = time.clock() # Tracks FPS. + print("You're on camera!") + for i in range(200): + clock.tick() + m.add_frame(sensor.snapshot()) + print(clock.fps()) + + m.close(clock.fps()) + pyb.LED(BLUE_LED_PIN).off() + print("Restarting...") diff --git a/scripts/examples/OpenMV/06-Video-Recording/mjpeg_on_movement.py b/scripts/examples/OpenMV/06-Video-Recording/mjpeg_on_movement.py new file mode 100644 index 000000000..5f13363f2 --- /dev/null +++ b/scripts/examples/OpenMV/06-Video-Recording/mjpeg_on_movement.py @@ -0,0 +1,59 @@ +# MJPEG Video Recording on Movement Example +# +# Note: You will need an SD card to run this example. +# +# You can use your OpenMV Cam to record mjpeg files. You can either feed the +# recorder object JPEG frames or RGB565/Grayscale frames. Once you've finished +# recording a Mjpeg file you can use VLC to play it. If you are on Ubuntu then +# the built-in video player will work too. +# +# This example demonstrates using frame differencing with your OpenMV Cam to do +# motion detection. After motion is detected your OpenMV Cam will take video. + +import sensor, image, time, mjpeg, pyb, os + +RED_LED_PIN = 1 +BLUE_LED_PIN = 3 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +sensor.set_auto_whitebal(False) # Turn off white balance. + +if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory + +while(True): + + pyb.LED(RED_LED_PIN).on() + print("About to save background image...") + sensor.skip_frames(time = 2000) # Give the user time to get ready. + + pyb.LED(RED_LED_PIN).off() + sensor.snapshot().save("temp/bg.bmp") + print("Saved background image - Now detecting motion!") + pyb.LED(BLUE_LED_PIN).on() + + diff = 10 # We'll say we detected motion after 10 frames of motion. + while(diff): + img = sensor.snapshot() + img.difference("temp/bg.bmp") + stats = img.statistics() + # Stats 5 is the max of the lighting color channel. The below code + # triggers when the lighting max for the whole image goes above 20. + # The lighting difference maximum should be zero normally. + if (stats[5] > 20): + diff -= 1 + + m = mjpeg.Mjpeg("example-%d.mjpeg" % pyb.rng()) + + clock = time.clock() # Tracks FPS. + print("You're on camera!") + for i in range(200): + clock.tick() + m.add_frame(sensor.snapshot()) + print(clock.fps()) + + m.close(clock.fps()) + pyb.LED(BLUE_LED_PIN).off() + print("Restarting...") diff --git a/scripts/examples/OpenMV/07-Face-Detection/face_detection.py b/scripts/examples/OpenMV/07-Face-Detection/face_detection.py new file mode 100644 index 000000000..aca735d33 --- /dev/null +++ b/scripts/examples/OpenMV/07-Face-Detection/face_detection.py @@ -0,0 +1,51 @@ +# Face Detection Example +# +# This example shows off the built-in face detection feature of the OpenMV Cam. +# +# Face detection works by using the Haar Cascade feature detector on an image. A +# Haar Cascade is a series of simple area contrasts checks. For the built-in +# frontalface detector there are 25 stages of checks with each stage having +# hundreds of checks a piece. Haar Cascades run fast because later stages are +# only evaluated if previous stages pass. Additionally, your OpenMV Cam uses +# a data structure called the integral image to quickly execute each area +# contrast check in constant time (the reason for feature detection being +# grayscale only is because of the space requirment for the integral image). + +import sensor, time, image + +# Reset sensor +sensor.reset() + +# Sensor settings +sensor.set_contrast(3) +sensor.set_gainceiling(16) +# HQVGA and GRAYSCALE are the best for face tracking. +sensor.set_framesize(sensor.HQVGA) +sensor.set_pixformat(sensor.GRAYSCALE) + +# Load Haar Cascade +# By default this will use all stages, lower satges is faster but less accurate. +face_cascade = image.HaarCascade("frontalface", stages=25) +print(face_cascade) + +# FPS clock +clock = time.clock() + +while (True): + clock.tick() + + # Capture snapshot + img = sensor.snapshot() + + # Find objects. + # Note: Lower scale factor scales-down the image more and detects smaller objects. + # Higher threshold results in a higher detection rate, with more false positives. + objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25) + + # Draw objects + for r in objects: + img.draw_rectangle(r) + + # Print FPS. + # Note: Actual FPS is higher, streaming the FB makes it slower. + print(clock.fps()) diff --git a/scripts/examples/OpenMV/07-Face-Detection/face_recognition.py b/scripts/examples/OpenMV/07-Face-Detection/face_recognition.py new file mode 100644 index 000000000..8a514664a --- /dev/null +++ b/scripts/examples/OpenMV/07-Face-Detection/face_recognition.py @@ -0,0 +1,27 @@ +# Face recognition with LBP descriptors. +# See Timo Ahonen's "Face Recognition with Local Binary Patterns". +# +# Before running the example: +# 1) Download the AT&T faces database http://www.cl.cam.ac.uk/Research/DTG/attarchive/pub/data/att_faces.zip +# 2) Exract and copy the orl_faces directory to the SD card root. +# +# NOTE: This is just a PoC implementation of the paper mentioned above, it does Not work well in real life conditions. + +import sensor, time, image + +SUB = "s2" +NUM_SUBJECTS = 5 +NUM_SUBJECTS_IMGS = 10 + +img = image.Image("orl_faces/%s/1.pgm"%(SUB)).mask_ellipse() +d0 = img.find_lbp((0, 0, img.width(), img.height())) +img = None + +print("") +for s in range(1, NUM_SUBJECTS+1): + dist = 0 + for i in range(2, NUM_SUBJECTS_IMGS+1): + img = image.Image("orl_faces/s%d/%d.pgm"%(s, i)).mask_ellipse() + d1 = img.find_lbp((0, 0, img.width(), img.height())) + dist += image.match_descriptor(d0, d1) + print("Average dist for subject %d: %d"%(s, dist/NUM_SUBJECTS_IMGS)) diff --git a/scripts/examples/OpenMV/07-Face-Detection/face_tracking.py b/scripts/examples/OpenMV/07-Face-Detection/face_tracking.py new file mode 100644 index 000000000..28dcdc801 --- /dev/null +++ b/scripts/examples/OpenMV/07-Face-Detection/face_tracking.py @@ -0,0 +1,68 @@ +# Face Tracking Example +# +# This example shows off using the keypoints feature of your OpenMV Cam to track +# a face after it has been detected by a Haar Cascade. The first part of this +# script finds a face in the image using the frontalface Haar Cascade. +# After which the script uses the keypoints feature to automatically learn your +# face and track it. Keypoints can be used to automatically track anything. +import sensor, time, image + +# Reset sensor +sensor.reset() +sensor.set_contrast(3) +sensor.set_gainceiling(16) +sensor.set_framesize(sensor.VGA) +sensor.set_windowing((320, 240)) +sensor.set_pixformat(sensor.GRAYSCALE) + +# Skip a few frames to allow the sensor settle down +sensor.skip_frames(time = 2000) + +# Load Haar Cascade +# By default this will use all stages, lower satges is faster but less accurate. +face_cascade = image.HaarCascade("frontalface", stages=25) +print(face_cascade) + +# First set of keypoints +kpts1 = None + +# Find a face! +while (kpts1 == None): + img = sensor.snapshot() + img.draw_string(0, 0, "Looking for a face...") + # Find faces + objects = img.find_features(face_cascade, threshold=0.5, scale=1.25) + if objects: + # Expand the ROI by 31 pixels in every direction + face = (objects[0][0]-31, objects[0][1]-31,objects[0][2]+31*2, objects[0][3]+31*2) + # Extract keypoints using the detect face size as the ROI + kpts1 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, roi=face) + # Draw a rectangle around the first face + img.draw_rectangle(objects[0]) + +# Draw keypoints +print(kpts1) +img.draw_keypoints(kpts1, size=24) +img = sensor.snapshot() +time.sleep(2000) + +# FPS clock +clock = time.clock() + +while (True): + clock.tick() + img = sensor.snapshot() + # Extract keypoints from the whole frame + kpts2 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, normalized=True) + + if (kpts2): + # Match the first set of keypoints with the second one + c=image.match_descriptor(kpts1, kpts2, threshold=85) + match = c[6] # C[6] contains the number of matches. + if (match>5): + img.draw_rectangle(c[2:6]) + img.draw_cross(c[0], c[1], size=10) + print(kpts2, "matched:%d dt:%d"%(match, c[7])) + + # Draw FPS + img.draw_string(0, 0, "FPS:%.2f"%(clock.fps())) diff --git a/scripts/examples/OpenMV/08-Eye-Tracking/face_eye_detection.py b/scripts/examples/OpenMV/08-Eye-Tracking/face_eye_detection.py new file mode 100644 index 000000000..13e5ab454 --- /dev/null +++ b/scripts/examples/OpenMV/08-Eye-Tracking/face_eye_detection.py @@ -0,0 +1,49 @@ +# Face Eye Detection Example +# +# This script uses the built-in frontalface detector to find a face and then +# the eyes within the face. If you want to determine the eye gaze please see the +# iris_detection script for an example on how to do that. + +import sensor, time, image + +# Reset sensor +sensor.reset() + +# Sensor settings +sensor.set_contrast(1) +sensor.set_gainceiling(16) +sensor.set_framesize(sensor.HQVGA) +sensor.set_pixformat(sensor.GRAYSCALE) + +# Load Haar Cascade +# By default this will use all stages, lower satges is faster but less accurate. +face_cascade = image.HaarCascade("frontalface", stages=25) +eyes_cascade = image.HaarCascade("eye", stages=24) +print(face_cascade, eyes_cascade) + +# FPS clock +clock = time.clock() + +while (True): + clock.tick() + + # Capture snapshot + img = sensor.snapshot() + + # Find a face ! + # Note: Lower scale factor scales-down the image more and detects smaller objects. + # Higher threshold results in a higher detection rate, with more false positives. + objects = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5) + + # Draw faces + for face in objects: + img.draw_rectangle(face) + # Now find eyes within each face. + # Note: Use a higher threshold here (more detections) and lower scale (to find small objects) + eyes = img.find_features(eyes_cascade, threshold=0.5, scale_factor=1.2, roi=face) + for e in eyes: + img.draw_rectangle(e) + + # Print FPS. + # Note: Actual FPS is higher, streaming the FB makes it slower. + print(clock.fps()) diff --git a/scripts/examples/OpenMV/08-Eye-Tracking/iris_detection.py b/scripts/examples/OpenMV/08-Eye-Tracking/iris_detection.py new file mode 100644 index 000000000..9d51498c3 --- /dev/null +++ b/scripts/examples/OpenMV/08-Eye-Tracking/iris_detection.py @@ -0,0 +1,52 @@ +# Iris Detection 2 Example +# +# This example shows how to find the eye gaze (pupil detection) after finding +# the eyes in an image. This script uses the find_eyes function which determines +# the center point of roi that should contain a pupil. It does this by basically +# finding the center of the darkest area in the eye roi which is the pupil center. +# +# Note: This script does not detect a face first, use it with the telephoto lens. + +import sensor, time, image + +# Reset sensor +sensor.reset() + +# Sensor settings +sensor.set_contrast(3) +sensor.set_gainceiling(16) + +# Set resolution to VGA. +sensor.set_framesize(sensor.VGA) + +# Bin/Crop image to 200x100, which gives more details with less data to process +sensor.set_windowing((220, 190, 200, 100)) + +sensor.set_pixformat(sensor.GRAYSCALE) + +# Load Haar Cascade +# By default this will use all stages, lower stages is faster but less accurate. +eyes_cascade = image.HaarCascade("eye", stages=24) +print(eyes_cascade) + +# FPS clock +clock = time.clock() + +while (True): + clock.tick() + # Capture snapshot + img = sensor.snapshot() + # Find eyes ! + # Note: Lower scale factor scales-down the image more and detects smaller objects. + # Higher threshold results in a higher detection rate, with more false positives. + eyes = img.find_features(eyes_cascade, threshold=0.5, scale_factor=1.5) + + # Find iris + for e in eyes: + iris = img.find_eye(e) + img.draw_rectangle(e) + img.draw_cross(iris[0], iris[1]) + + # Print FPS. + # Note: Actual FPS is higher, streaming the FB makes it slower. + print(clock.fps()) diff --git a/scripts/examples/OpenMV/09-Feature-Detection/edges.py b/scripts/examples/OpenMV/09-Feature-Detection/edges.py new file mode 100644 index 000000000..166820b72 --- /dev/null +++ b/scripts/examples/OpenMV/09-Feature-Detection/edges.py @@ -0,0 +1,20 @@ +# Edge detection with Canny: +# +# This example demonstrates the Canny edge detector. +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +sensor.set_gainceiling(8) + +clock = time.clock() # Tracks FPS. +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + # Use Canny edge detector + img.find_edges(image.EDGE_CANNY, threshold=(50, 80)) + # Faster simpler edge detection + #img.find_edges(image.EDGE_SIMPLE, threshold=(100, 255)) + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while diff --git a/scripts/examples/OpenMV/09-Feature-Detection/find_circles.py b/scripts/examples/OpenMV/09-Feature-Detection/find_circles.py new file mode 100644 index 000000000..a208b4a5b --- /dev/null +++ b/scripts/examples/OpenMV/09-Feature-Detection/find_circles.py @@ -0,0 +1,39 @@ +# Find Circles Example +# +# This example shows off how to find circles in the image using the Hough +# Transform. https://en.wikipedia.org/wiki/Circle_Hough_Transform +# +# Note that the find_circles() method will only find circles which are completely +# inside of the image. Circles which go outside of the image/roi are ignored... + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) # grayscale is faster +sensor.set_framesize(sensor.QQVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + img = sensor.snapshot().lens_corr(1.8) + + # Circle objects have four values: x, y, r (radius), and magnitude. The + # magnitude is the strength of the detection of the circle. Higher is + # better... + + # `threshold` controls how many circles are found. Increase its value + # to decrease the number of circles detected... + + # `x_margin`, `y_margin`, and `r_margin` control the merging of similar + # circles in the x, y, and r (radius) directions. + + # r_min, r_max, and r_step control what radiuses of circles are tested. + # Shrinking the number of tested circle radiuses yields a big performance boost. + + for c in img.find_circles(threshold = 2000, x_margin = 10, y_margin = 10, r_margin = 10, + r_min = 2, r_max = 100, r_step = 2): + img.draw_circle(c.x(), c.y(), c.r(), color = (255, 0, 0)) + print(c) + + print("FPS %f" % clock.fps()) diff --git a/scripts/examples/OpenMV/09-Feature-Detection/find_line_segments.py b/scripts/examples/OpenMV/09-Feature-Detection/find_line_segments.py new file mode 100644 index 000000000..4aa42cf17 --- /dev/null +++ b/scripts/examples/OpenMV/09-Feature-Detection/find_line_segments.py @@ -0,0 +1,39 @@ +# Find Line Segments Example +# +# This example shows off how to find line segments in the image. For each line object +# found in the image a line object is returned which includes the line's rotation. + +# find_line_segments() finds finite length lines (but is slow). +# Use find_line_segments() to find non-infinite lines (and is fast). + +enable_lens_corr = False # turn on for straighter lines... + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) # grayscale is faster +sensor.set_framesize(sensor.QQVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +# All lines also have `x1()`, `y1()`, `x2()`, and `y2()` methods to get their end-points +# and a `line()` method to get all the above as one 4 value tuple for `draw_line()`. + +while(True): + clock.tick() + img = sensor.snapshot() + if enable_lens_corr: img.lens_corr(1.8) # for 2.8mm lens... + + # `merge_distance` controls the merging of nearby lines. At 0 (the default), no + # merging is done. At 1, any line 1 pixel away from another is merged... and so + # on as you increase this value. You may wish to merge lines as line segment + # detection produces a lot of line segment results. + + # `max_theta_diff` controls the maximum amount of rotation difference between + # any two lines about to be merged. The default setting allows for 15 degrees. + + for l in img.find_line_segments(merge_distance = 0, max_theta_diff = 5): + img.draw_line(l.line(), color = (255, 0, 0)) + # print(l) + + print("FPS %f" % clock.fps()) diff --git a/scripts/examples/OpenMV/09-Feature-Detection/find_lines.py b/scripts/examples/OpenMV/09-Feature-Detection/find_lines.py new file mode 100644 index 000000000..6c45fcfbc --- /dev/null +++ b/scripts/examples/OpenMV/09-Feature-Detection/find_lines.py @@ -0,0 +1,57 @@ +# Find Lines Example +# +# This example shows off how to find lines in the image. For each line object +# found in the image a line object is returned which includes the line's rotation. + +# Note: Line detection is done by using the Hough Transform: +# http://en.wikipedia.org/wiki/Hough_transform +# Please read about it above for more information on what `theta` and `rho` are. + +# find_lines() finds infinite length lines. Use find_line_segments() to find non-infinite lines. + +enable_lens_corr = False # turn on for straighter lines... + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) # grayscale is faster +sensor.set_framesize(sensor.QQVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +# All line objects have a `theta()` method to get their rotation angle in degrees. +# You can filter lines based on their rotation angle. + +min_degree = 0 +max_degree = 179 + +# All lines also have `x1()`, `y1()`, `x2()`, and `y2()` methods to get their end-points +# and a `line()` method to get all the above as one 4 value tuple for `draw_line()`. + +while(True): + clock.tick() + img = sensor.snapshot() + if enable_lens_corr: img.lens_corr(1.8) # for 2.8mm lens... + + # `threshold` controls how many lines in the image are found. Only lines with + # edge difference magnitude sums greater than `threshold` are detected... + + # More about `threshold` - each pixel in the image contributes a magnitude value + # to a line. The sum of all contributions is the magintude for that line. Then + # when lines are merged their magnitudes are added togheter. Note that `threshold` + # filters out lines with low magnitudes before merging. To see the magnitude of + # un-merged lines set `theta_margin` and `rho_margin` to 0... + + # `theta_margin` and `rho_margin` control merging similar lines. If two lines + # theta and rho value differences are less than the margins then they are merged. + + for l in img.find_lines(threshold = 1000, theta_margin = 25, rho_margin = 25): + if (min_degree <= l.theta()) and (l.theta() <= max_degree): + img.draw_line(l.line(), color = (255, 0, 0)) + # print(l) + + print("FPS %f" % clock.fps()) + +# About negative rho values: +# +# A [theta+0:-rho] tuple is the same as [theta+180:+rho]. diff --git a/scripts/examples/OpenMV/09-Feature-Detection/find_rects.py b/scripts/examples/OpenMV/09-Feature-Detection/find_rects.py new file mode 100644 index 000000000..5fafba626 --- /dev/null +++ b/scripts/examples/OpenMV/09-Feature-Detection/find_rects.py @@ -0,0 +1,31 @@ +# Find Rects Example +# +# This example shows off how to find rectangles in the image using the quad threshold +# detection code from our April Tags code. The quad threshold detection algorithm +# detects rectangles in an extremely robust way and is much better than Hough +# Transform based methods. For example, it can still detect rectangles even when lens +# distortion causes those rectangles to look bent. Rounded rectangles are no problem! +# (But, given this the code will also detect small radius circles too)... + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) # grayscale is faster (160x120 max on OpenMV-M7) +sensor.set_framesize(sensor.QQVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + img = sensor.snapshot() + + # `threshold` below should be set to a high enough value to filter out noise + # rectangles detected in the image which have low edge magnitudes. Rectangles + # have larger edge magnitudes the larger and more contrasty they are... + + for r in img.find_rects(threshold = 10000): + img.draw_rectangle(r.rect(), color = (255, 0, 0)) + for p in r.corners(): img.draw_circle(p[0], p[1], 5, color = (0, 255, 0)) + print(r) + + print("FPS %f" % clock.fps()) diff --git a/scripts/examples/OpenMV/09-Feature-Detection/hog.py b/scripts/examples/OpenMV/09-Feature-Detection/hog.py new file mode 100644 index 000000000..7bc17446b --- /dev/null +++ b/scripts/examples/OpenMV/09-Feature-Detection/hog.py @@ -0,0 +1,28 @@ +# Histogram of Oriented Gradients (HoG) Example +# +# This example demonstrates HoG visualization. +# +# Note: Due to JPEG artifacts, the HoG visualization looks blurry. To see the +# image without JPEG artifacts, uncomment the lines that save the image to uSD. + +import sensor, image, time + +sensor.reset() +# Set sensor settings +sensor.set_contrast(1) +sensor.set_gainceiling(8) +sensor.set_framesize(sensor.QVGA) +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.skip_frames(time = 2000) + +clock = time.clock() # Tracks FPS. +while (True): + clock.tick() + img = sensor.snapshot() + img.find_hog() + + # Uncomment to save raw FB to file and exit the loop + #img.save("/hog.pgm") + #break + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/09-Feature-Detection/keypoints.py b/scripts/examples/OpenMV/09-Feature-Detection/keypoints.py new file mode 100644 index 000000000..8d5cf242d --- /dev/null +++ b/scripts/examples/OpenMV/09-Feature-Detection/keypoints.py @@ -0,0 +1,58 @@ +# Object tracking with keypoints example. +# Show the camera an object and then run the script. A set of keypoints will be extracted +# once and then tracked in the following frames. If you want a new set of keypoints re-run +# the script. NOTE: see the docs for arguments to tune find_keypoints and match_keypoints. +import sensor, time, image + +# Reset sensor +sensor.reset() + +# Sensor settings +sensor.set_contrast(3) +sensor.set_gainceiling(16) +sensor.set_framesize(sensor.VGA) +sensor.set_windowing((320, 240)) +sensor.set_pixformat(sensor.GRAYSCALE) + +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False, value=100) + +def draw_keypoints(img, kpts): + if kpts: + print(kpts) + img.draw_keypoints(kpts) + img = sensor.snapshot() + time.sleep(1000) + +kpts1 = None +# NOTE: uncomment to load a keypoints descriptor from file +#kpts1 = image.load_descriptor("/desc.orb") +#img = sensor.snapshot() +#draw_keypoints(img, kpts1) + +clock = time.clock() +while (True): + clock.tick() + img = sensor.snapshot() + if (kpts1 == None): + # NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid. + kpts1 = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2) + draw_keypoints(img, kpts1) + else: + # NOTE: When extracting keypoints to match the first descriptor, we use normalized=True to extract + # keypoints from the first scale only, which will match one of the scales in the first descriptor. + kpts2 = img.find_keypoints(max_keypoints=150, threshold=10, normalized=True) + if (kpts2): + match = image.match_descriptor(kpts1, kpts2, threshold=85) + if (match.count()>10): + # If we have at least n "good matches" + # Draw bounding rectangle and cross. + img.draw_rectangle(match.rect()) + img.draw_cross(match.cx(), match.cy(), size=10) + + print(kpts2, "matched:%d dt:%d"%(match.count(), match.theta())) + # NOTE: uncomment if you want to draw the keypoints + #img.draw_keypoints(kpts2, size=KEYPOINTS_SIZE, matched=True) + + # Draw FPS + img.draw_string(0, 0, "FPS:%.2f"%(clock.fps())) diff --git a/scripts/examples/OpenMV/09-Feature-Detection/keypoints_save.py b/scripts/examples/OpenMV/09-Feature-Detection/keypoints_save.py new file mode 100644 index 000000000..ca3e51bcc --- /dev/null +++ b/scripts/examples/OpenMV/09-Feature-Detection/keypoints_save.py @@ -0,0 +1,37 @@ +# Keypoints descriptor example. +# This example shows how to save a keypoints descriptor to file. Show the camera an object +# and then run the script. The script will extract and save a keypoints descriptor and the image. +# You can use the keypoints_editor.py util to remove unwanted keypoints. +# +# NOTE: Please reset the camera after running this script to see the new file. +import sensor, time, image + +# Reset sensor +sensor.reset() + +# Sensor settings +sensor.set_contrast(3) +sensor.set_gainceiling(16) +sensor.set_framesize(sensor.VGA) +sensor.set_windowing((320, 240)) +sensor.set_pixformat(sensor.GRAYSCALE) + +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False, value=100) + +FILE_NAME = "desc" +img = sensor.snapshot() +# NOTE: See the docs for other arguments +# NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid. +kpts = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2) + +if (kpts == None): + raise(Exception("Couldn't find any keypoints!")) + +image.save_descriptor(kpts, "/%s.orb"%(FILE_NAME)) +img.save("/%s.pgm"%(FILE_NAME)) + +img.draw_keypoints(kpts) +sensor.snapshot() +time.sleep(1000) +raise(Exception("Done! Please reset the camera")) diff --git a/scripts/examples/OpenMV/09-Feature-Detection/lbp.py b/scripts/examples/OpenMV/09-Feature-Detection/lbp.py new file mode 100644 index 000000000..b54838313 --- /dev/null +++ b/scripts/examples/OpenMV/09-Feature-Detection/lbp.py @@ -0,0 +1,53 @@ +# Local Binary Patterns (LBP) Example +# +# This example shows off how to use the local binary pattern feature descriptor +# on your OpenMV Cam. LBP descriptors work like Freak feature descriptors. +# +# WARNING: LBP supports needs to be reworked! As of right now this feature needs +# a lot of work to be made into somethin useful. This script will reamin to show +# that the functionality exists, but, in its current state is inadequate. + +import sensor, time, image +sensor.reset() + +# Reset sensor +sensor.reset() + +# Sensor settings +sensor.set_contrast(1) +sensor.set_gainceiling(16) +sensor.set_framesize(sensor.HQVGA) +sensor.set_pixformat(sensor.GRAYSCALE) + +# Load Haar Cascade +# By default this will use all stages, lower satges is faster but less accurate. +face_cascade = image.HaarCascade("frontalface", stages=25) +print(face_cascade) + +# Skip a few frames to allow the sensor settle down +# Note: This takes more time when exec from the IDE. +for i in range(0, 30): + img = sensor.snapshot() + img.draw_string(0, 0, "Please wait...") + +d0 = None +#d0 = image.load_descriptor("/desc.lbp") +clock = time.clock() + +while (True): + clock.tick() + img = sensor.snapshot() + + objects = img.find_features(face_cascade, threshold=0.5, scale_factor=1.25) + if objects: + face = objects[0] + d1 = img.find_lbp(face) + if (d0 == None): + d0 = d1 + else: + dist = image.match_descriptor(d0, d1) + img.draw_string(0, 10, "Match %d%%"%(dist)) + + img.draw_rectangle(face) + # Draw FPS + img.draw_string(0, 0, "FPS:%.2f"%(clock.fps())) diff --git a/scripts/examples/OpenMV/09-Feature-Detection/linear_regression_fast.py b/scripts/examples/OpenMV/09-Feature-Detection/linear_regression_fast.py new file mode 100644 index 000000000..f200e4ace --- /dev/null +++ b/scripts/examples/OpenMV/09-Feature-Detection/linear_regression_fast.py @@ -0,0 +1,43 @@ +# Fast Linear Regression Example +# +# This example shows off how to use the get_regression() method on your OpenMV Cam +# to get the linear regression of a ROI. Using this method you can easily build +# a robot which can track lines which all point in the same general direction +# but are not actually connected. Use find_blobs() on lines that are nicely +# connected for better filtering options and control. +# +# This is called the fast linear regression because we use the least-squares +# method to fit the line. However, this method is NOT GOOD FOR ANY images that +# have a lot (or really any) outlier points which corrupt the line fit... + +THRESHOLD = (0, 100) # Grayscale threshold for dark things... +BINARY_VISIBLE = True # Does binary first so you can see what the linear regression + # is being run on... might lower FPS though. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.QQVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + img = sensor.snapshot().binary([THRESHOLD]) if BINARY_VISIBLE else sensor.snapshot() + + # Returns a line object similar to line objects returned by find_lines() and + # find_line_segments(). You have x1(), y1(), x2(), y2(), length(), + # theta() (rotation in degrees), rho(), and magnitude(). + # + # magnitude() represents how well the linear regression worked. It goes from + # (0, INF] where 0 is returned for a circle. The more linear the + # scene is the higher the magnitude. + line = img.get_regression([(255,255) if BINARY_VISIBLE else THRESHOLD]) + + if (line): img.draw_line(line.line(), color = 127) + print("FPS %f, mag = %s" % (clock.fps(), str(line.magnitude()) if (line) else "N/A")) + +# About negative rho values: +# +# A [theta+0:-rho] tuple is the same as [theta+180:+rho]. diff --git a/scripts/examples/OpenMV/09-Feature-Detection/linear_regression_robust.py b/scripts/examples/OpenMV/09-Feature-Detection/linear_regression_robust.py new file mode 100644 index 000000000..9f24c618d --- /dev/null +++ b/scripts/examples/OpenMV/09-Feature-Detection/linear_regression_robust.py @@ -0,0 +1,45 @@ +# Robust Linear Regression Example +# +# This example shows off how to use the get_regression() method on your OpenMV Cam +# to get the linear regression of a ROI. Using this method you can easily build +# a robot which can track lines which all point in the same general direction +# but are not actually connected. Use find_blobs() on lines that are nicely +# connected for better filtering options and control. +# +# We're using the robust=True argument for get_regression() in this script which +# computes the linear regression using a much more robust algorithm... but potentially +# much slower. The robust algorithm runs in O(N^2) time on the image. So, YOU NEED +# TO LIMIT THE NUMBER OF PIXELS the robust algorithm works on or it can actually +# take seconds for the algorithm to give you a result... THRESHOLD VERY CAREFULLY! + +THRESHOLD = (0, 100) # Grayscale threshold for dark things... +BINARY_VISIBLE = True # Does binary first so you can see what the linear regression + # is being run on... might lower FPS though. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.QQQVGA) # 80x60 (4,800 pixels) - O(N^2) max = 2,3040,000. +sensor.skip_frames(time = 2000) # WARNING: If you use QQVGA it may take seconds +clock = time.clock() # to process a frame sometimes. + +while(True): + clock.tick() + img = sensor.snapshot().binary([THRESHOLD]) if BINARY_VISIBLE else sensor.snapshot() + + # Returns a line object similar to line objects returned by find_lines() and + # find_line_segments(). You have x1(), y1(), x2(), y2(), length(), + # theta() (rotation in degrees), rho(), and magnitude(). + # + # magnitude() represents how well the linear regression worked. It means something + # different for the robust linear regression. In general, the larger the value the + # better... + line = img.get_regression([(255,255) if BINARY_VISIBLE else THRESHOLD], robust = True) + + if (line): img.draw_line(line.line(), color = 127) + print("FPS %f, mag = %s" % (clock.fps(), str(line.magnitude()) if (line) else "N/A")) + +# About negative rho values: +# +# A [theta+0:-rho] tuple is the same as [theta+180:+rho]. diff --git a/scripts/examples/OpenMV/09-Feature-Detection/selective_search.py b/scripts/examples/OpenMV/09-Feature-Detection/selective_search.py new file mode 100644 index 000000000..c44c931e9 --- /dev/null +++ b/scripts/examples/OpenMV/09-Feature-Detection/selective_search.py @@ -0,0 +1,22 @@ +# Selective Search Example + +import sensor, image, time +from random import randint + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.skip_frames(time = 2000) # Wait for settings take effect. +sensor.set_auto_gain(False) +sensor.set_auto_exposure(False, exposure_us=10000) +clock = time.clock() # Create a clock object to track the FPS. + + +while(True): + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + rois = img.selective_search(threshold = 200, size = 20, a1=0.5, a2=1.0, a3=1.0) + for r in rois: + img.draw_rectangle(r, color=(255, 0, 0)) + #img.draw_rectangle(r, color=(randint(100, 255), randint(100, 255), randint(100, 255))) + print(clock.fps()) diff --git a/scripts/examples/OpenMV/09-Feature-Detection/template_matching.py b/scripts/examples/OpenMV/09-Feature-Detection/template_matching.py new file mode 100644 index 000000000..79d9c66a7 --- /dev/null +++ b/scripts/examples/OpenMV/09-Feature-Detection/template_matching.py @@ -0,0 +1,48 @@ +# Template Matching Example - Normalized Cross Correlation (NCC) +# +# This example shows off how to use the NCC feature of your OpenMV Cam to match +# image patches to parts of an image... expect for extremely controlled enviorments +# NCC is not all to useful. +# +# WARNING: NCC supports needs to be reworked! As of right now this feature needs +# a lot of work to be made into somethin useful. This script will reamin to show +# that the functionality exists, but, in its current state is inadequate. + +import time, sensor, image +from image import SEARCH_EX, SEARCH_DS + +# Reset sensor +sensor.reset() + +# Set sensor settings +sensor.set_contrast(1) +sensor.set_gainceiling(16) +# Max resolution for template matching with SEARCH_EX is QQVGA +sensor.set_framesize(sensor.QQVGA) +# You can set windowing to reduce the search image. +#sensor.set_windowing(((640-80)//2, (480-60)//2, 80, 60)) +sensor.set_pixformat(sensor.GRAYSCALE) + +# Load template. +# Template should be a small (eg. 32x32 pixels) grayscale image. +template = image.Image("/template.pgm") + +clock = time.clock() + +# Run template matching +while (True): + clock.tick() + img = sensor.snapshot() + + # find_template(template, threshold, [roi, step, search]) + # ROI: The region of interest tuple (x, y, w, h). + # Step: The loop step used (y+=step, x+=step) use a bigger step to make it faster. + # Search is either image.SEARCH_EX for exhaustive search or image.SEARCH_DS for diamond search + # + # Note1: ROI has to be smaller than the image and bigger than the template. + # Note2: In diamond search, step and ROI are both ignored. + r = img.find_template(template, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) + if r: + img.draw_rectangle(r) + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/10-Color-Tracking/automatic_grayscale_color_tracking.py b/scripts/examples/OpenMV/10-Color-Tracking/automatic_grayscale_color_tracking.py new file mode 100644 index 000000000..51f8a0a91 --- /dev/null +++ b/scripts/examples/OpenMV/10-Color-Tracking/automatic_grayscale_color_tracking.py @@ -0,0 +1,49 @@ +# Automatic Grayscale Color Tracking Example +# +# This example shows off single color automatic grayscale color tracking using the OpenMV Cam. + +import sensor, image, time +print("Letting auto algorithms run. Don't put anything in front of the camera!") + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking +clock = time.clock() + +# Capture the color thresholds for whatever was in the center of the image. +r = [(320//2)-(50//2), (240//2)-(50//2), 50, 50] # 50x50 center of QVGA. + +print("Auto algorithms done. Hold the object you want to track in front of the camera in the box.") +print("MAKE SURE THE COLOR OF THE OBJECT YOU WANT TO TRACK IS FULLY ENCLOSED BY THE BOX!") +for i in range(60): + img = sensor.snapshot() + img.draw_rectangle(r) + +print("Learning thresholds...") +threshold = [128, 128] # Middle grayscale values. +for i in range(60): + img = sensor.snapshot() + hist = img.get_histogram(roi=r) + lo = hist.get_percentile(0.01) # Get the CDF of the histogram at the 1% range (ADJUST AS NECESSARY)! + hi = hist.get_percentile(0.99) # Get the CDF of the histogram at the 99% range (ADJUST AS NECESSARY)! + # Average in percentile values. + threshold[0] = (threshold[0] + lo.value()) // 2 + threshold[1] = (threshold[1] + hi.value()) // 2 + for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): + img.draw_rectangle(blob.rect()) + img.draw_cross(blob.cx(), blob.cy()) + img.draw_rectangle(r) + +print("Thresholds learned...") +print("Tracking colors...") + +while(True): + clock.tick() + img = sensor.snapshot() + for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): + img.draw_rectangle(blob.rect()) + img.draw_cross(blob.cx(), blob.cy()) + print(clock.fps()) diff --git a/scripts/examples/OpenMV/10-Color-Tracking/automatic_rgb565_color_tracking.py b/scripts/examples/OpenMV/10-Color-Tracking/automatic_rgb565_color_tracking.py new file mode 100644 index 000000000..50a3f4e6e --- /dev/null +++ b/scripts/examples/OpenMV/10-Color-Tracking/automatic_rgb565_color_tracking.py @@ -0,0 +1,53 @@ +# Automatic RGB565 Color Tracking Example +# +# This example shows off single color automatic RGB565 color tracking using the OpenMV Cam. + +import sensor, image, time +print("Letting auto algorithms run. Don't put anything in front of the camera!") + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking +clock = time.clock() + +# Capture the color thresholds for whatever was in the center of the image. +r = [(320//2)-(50//2), (240//2)-(50//2), 50, 50] # 50x50 center of QVGA. + +print("Auto algorithms done. Hold the object you want to track in front of the camera in the box.") +print("MAKE SURE THE COLOR OF THE OBJECT YOU WANT TO TRACK IS FULLY ENCLOSED BY THE BOX!") +for i in range(60): + img = sensor.snapshot() + img.draw_rectangle(r) + +print("Learning thresholds...") +threshold = [50, 50, 0, 0, 0, 0] # Middle L, A, B values. +for i in range(60): + img = sensor.snapshot() + hist = img.get_histogram(roi=r) + lo = hist.get_percentile(0.01) # Get the CDF of the histogram at the 1% range (ADJUST AS NECESSARY)! + hi = hist.get_percentile(0.99) # Get the CDF of the histogram at the 99% range (ADJUST AS NECESSARY)! + # Average in percentile values. + threshold[0] = (threshold[0] + lo.l_value()) // 2 + threshold[1] = (threshold[1] + hi.l_value()) // 2 + threshold[2] = (threshold[2] + lo.a_value()) // 2 + threshold[3] = (threshold[3] + hi.a_value()) // 2 + threshold[4] = (threshold[4] + lo.b_value()) // 2 + threshold[5] = (threshold[5] + hi.b_value()) // 2 + for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): + img.draw_rectangle(blob.rect()) + img.draw_cross(blob.cx(), blob.cy()) + img.draw_rectangle(r) + +print("Thresholds learned...") +print("Tracking colors...") + +while(True): + clock.tick() + img = sensor.snapshot() + for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): + img.draw_rectangle(blob.rect()) + img.draw_cross(blob.cx(), blob.cy()) + print(clock.fps()) diff --git a/scripts/examples/OpenMV/10-Color-Tracking/black_grayscale_line_following.py b/scripts/examples/OpenMV/10-Color-Tracking/black_grayscale_line_following.py new file mode 100644 index 000000000..6e0674b63 --- /dev/null +++ b/scripts/examples/OpenMV/10-Color-Tracking/black_grayscale_line_following.py @@ -0,0 +1,85 @@ +# Black Grayscale Line Following Example +# +# Making a line following robot requires a lot of effort. This example script +# shows how to do the machine vision part of the line following robot. You +# can use the output from this script to drive a differential drive robot to +# follow a line. This script just generates a single turn value that tells +# your robot to go left or right. +# +# For this script to work properly you should point the camera at a line at a +# 45 or so degree angle. Please make sure that only the line is within the +# camera's field of view. + +import sensor, image, time, math + +# Tracks a black line. Use [(128, 255)] for a tracking a white line. +GRAYSCALE_THRESHOLD = [(0, 64)] + +# Each roi is (x, y, w, h). The line detection algorithm will try to find the +# centroid of the largest blob in each roi. The x position of the centroids +# will then be averaged with different weights where the most weight is assigned +# to the roi near the bottom of the image and less to the next roi and so on. +ROIS = [ # [ROI, weight] + (0, 100, 160, 20, 0.7), # You'll need to tweak the weights for your app + (0, 50, 160, 20, 0.3), # depending on how your robot is setup. + (0, 0, 160, 20, 0.1) + ] + +# Compute the weight divisor (we're computing this so you don't have to make weights add to 1). +weight_sum = 0 +for r in ROIS: weight_sum += r[4] # r[4] is the roi weight. + +# Camera setup... +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # use grayscale. +sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed. +sensor.skip_frames(time = 2000) # Let new settings take affect. +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + centroid_sum = 0 + + for r in ROIS: + blobs = img.find_blobs(GRAYSCALE_THRESHOLD, roi=r[0:4], merge=True) # r[0:4] is roi tuple. + + if blobs: + # Find the blob with the most pixels. + largest_blob = max(blobs, key=lambda b: b.pixels()) + + # Draw a rect around the blob. + img.draw_rectangle(largest_blob.rect()) + img.draw_cross(largest_blob.cx(), + largest_blob.cy()) + + centroid_sum += largest_blob.cx() * r[4] # r[4] is the roi weight. + + center_pos = (centroid_sum / weight_sum) # Determine center of line. + + # Convert the center_pos to a deflection angle. We're using a non-linear + # operation so that the response gets stronger the farther off the line we + # are. Non-linear operations are good to use on the output of algorithms + # like this to cause a response "trigger". + deflection_angle = 0 + + # The 80 is from half the X res, the 60 is from half the Y res. The + # equation below is just computing the angle of a triangle where the + # opposite side of the triangle is the deviation of the center position + # from the center and the adjacent side is half the Y res. This limits + # the angle output to around -45 to 45. (It's not quite -45 and 45). + deflection_angle = -math.atan((center_pos-80)/60) + + # Convert angle in radians to degrees. + deflection_angle = math.degrees(deflection_angle) + + # Now you have an angle telling you how much to turn the robot by which + # incorporates the part of the line nearest to the robot and parts of + # the line farther away from the robot for a better prediction. + print("Turn Angle: %f" % deflection_angle) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/10-Color-Tracking/image_histogram_info.py b/scripts/examples/OpenMV/10-Color-Tracking/image_histogram_info.py new file mode 100644 index 000000000..04b386b3e --- /dev/null +++ b/scripts/examples/OpenMV/10-Color-Tracking/image_histogram_info.py @@ -0,0 +1,25 @@ +# Image Histogram Info Example +# +# This script computes the histogram of the image and prints it out. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) # or RGB565. +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking +clock = time.clock() + +while(True): + clock.tick() + img = sensor.snapshot() + # Gets the grayscale histogram for the image into 8 bins. + # Bins defaults to 256 and may be between 2 and 256. + print(img.get_histogram(bins=8)) + print(clock.fps()) + +# You can also pass get_histogram() an "roi=" to get just the histogram of that area. +# get_histogram() allows you to quickly determine the color channel information of +# any any area in the image. diff --git a/scripts/examples/OpenMV/10-Color-Tracking/image_statistics_info.py b/scripts/examples/OpenMV/10-Color-Tracking/image_statistics_info.py new file mode 100644 index 000000000..04f306109 --- /dev/null +++ b/scripts/examples/OpenMV/10-Color-Tracking/image_statistics_info.py @@ -0,0 +1,23 @@ +# Image Statistics Info Example +# +# This script computes the statistics of the image and prints it out. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) # or RGB565. +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking +clock = time.clock() + +while(True): + clock.tick() + img = sensor.snapshot() + print(img.get_statistics()) + print(clock.fps()) + +# You can also pass get_statistics() an "roi=" to get just the statistics of that area. +# get_statistics() allows you to quickly determine the color channel information of +# any any area in the image. diff --git a/scripts/examples/OpenMV/10-Color-Tracking/ir_beacon_grayscale_tracking.py b/scripts/examples/OpenMV/10-Color-Tracking/ir_beacon_grayscale_tracking.py new file mode 100644 index 000000000..0a4d9e0b7 --- /dev/null +++ b/scripts/examples/OpenMV/10-Color-Tracking/ir_beacon_grayscale_tracking.py @@ -0,0 +1,30 @@ +# IR Beacon Grayscale Tracking Example +# +# This example shows off IR beacon Grayscale tracking using the OpenMV Cam. + +import sensor, image, time + +thresholds = (255, 255) # thresholds for bright white light from IR. + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.VGA) +sensor.set_windowing((240, 240)) # 240x240 center pixels of VGA +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking +clock = time.clock() + +# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are +# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the +# camera resolution. "merge=True" merges all overlapping blobs in the image. + +while(True): + clock.tick() + img = sensor.snapshot() + for blob in img.find_blobs([thresholds], pixels_threshold=200, area_threshold=200, merge=True): + ratio = blob.w() / blob.h() + if (ratio >= 0.5) and (ratio <= 1.5): # filter out non-squarish blobs + img.draw_rectangle(blob.rect()) + img.draw_cross(blob.cx(), blob.cy()) + print(clock.fps()) diff --git a/scripts/examples/OpenMV/10-Color-Tracking/ir_beacon_rgb565_tracking.py b/scripts/examples/OpenMV/10-Color-Tracking/ir_beacon_rgb565_tracking.py new file mode 100644 index 000000000..033908131 --- /dev/null +++ b/scripts/examples/OpenMV/10-Color-Tracking/ir_beacon_rgb565_tracking.py @@ -0,0 +1,30 @@ +# IR Beacon RGB565 Tracking Example +# +# This example shows off IR beacon RGB565 tracking using the OpenMV Cam. + +import sensor, image, time + +thresholds = (100, 100, 0, 0, 0, 0) # thresholds for bright white light from IR. + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.VGA) +sensor.set_windowing((240, 240)) # 240x240 center pixels of VGA +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking +clock = time.clock() + +# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are +# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the +# camera resolution. "merge=True" merges all overlapping blobs in the image. + +while(True): + clock.tick() + img = sensor.snapshot() + for blob in img.find_blobs([thresholds], pixels_threshold=200, area_threshold=200, merge=True): + ratio = blob.w() / blob.h() + if (ratio >= 0.5) and (ratio <= 1.5): # filter out non-squarish blobs + img.draw_rectangle(blob.rect()) + img.draw_cross(blob.cx(), blob.cy()) + print(clock.fps()) diff --git a/scripts/examples/OpenMV/10-Color-Tracking/multi_color_blob_tracking.py b/scripts/examples/OpenMV/10-Color-Tracking/multi_color_blob_tracking.py new file mode 100644 index 000000000..bc94d257d --- /dev/null +++ b/scripts/examples/OpenMV/10-Color-Tracking/multi_color_blob_tracking.py @@ -0,0 +1,41 @@ +# Multi Color Blob Tracking Example +# +# This example shows off multi color blob tracking using the OpenMV Cam. + +import sensor, image, time, math + +# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) +# The below thresholds track in general red/green things. You may wish to tune them... +thresholds = [(30, 100, 15, 127, 15, 127), # generic_red_thresholds + (30, 100, -64, -8, -32, 32), # generic_green_thresholds + (0, 15, 0, 40, -80, -20)] # generic_blue_thresholds +# You may pass up to 16 thresholds above. However, it's not really possible to segment any +# scene with 16 thresholds before color thresholds start to overlap heavily. + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking +clock = time.clock() + +# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are +# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the +# camera resolution. Don't set "merge=True" becuase that will merge blobs which we don't want here. + +while(True): + clock.tick() + img = sensor.snapshot() + for blob in img.find_blobs(thresholds, pixels_threshold=200, area_threshold=200): + # These values depend on the blob not being circular - otherwise they will be shaky. + if blob.elongation() > 0.5: + img.draw_edges(blob.min_corners(), color=(255,0,0)) + img.draw_line(blob.major_axis_line(), color=(0,255,0)) + img.draw_line(blob.minor_axis_line(), color=(0,0,255)) + # These values are stable all the time. + img.draw_rectangle(blob.rect()) + img.draw_cross(blob.cx(), blob.cy()) + # Note - the blob rotation is unique to 0-180 only. + img.draw_keypoints([(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], size=20) + print(clock.fps()) diff --git a/scripts/examples/OpenMV/10-Color-Tracking/multi_color_code_tracking.py b/scripts/examples/OpenMV/10-Color-Tracking/multi_color_code_tracking.py new file mode 100644 index 000000000..917fb5057 --- /dev/null +++ b/scripts/examples/OpenMV/10-Color-Tracking/multi_color_code_tracking.py @@ -0,0 +1,49 @@ +# Multi Color Code Tracking Example +# +# This example shows off multi color code tracking using the OpenMV Cam. +# +# A color code is a blob composed of two or more colors. The example below will +# only track colored objects which have two or more the colors below in them. + +import sensor, image, time + +# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) +# The below thresholds track in general red/green things. You may wish to tune them... +thresholds = [(30, 100, 15, 127, 15, 127), # generic_red_thresholds -> index is 0 so code == (1 << 0) + (30, 100, -64, -8, -32, 32), # generic_green_thresholds -> index is 1 so code == (1 << 1) + (0, 15, 0, 40, -80, -20)] # generic_blue_thresholds -> index is 2 so code == (1 << 2) +# Codes are or'ed together when "merge=True" for "find_blobs". + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking +clock = time.clock() + +# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are +# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the +# camera resolution. "merge=True" must be set to merge overlapping color blobs for color codes. + +while(True): + clock.tick() + img = sensor.snapshot() + for blob in img.find_blobs(thresholds, pixels_threshold=100, area_threshold=100, merge=True): + if blob.code() == 3: # r/g code + img.draw_rectangle(blob.rect()) + img.draw_cross(blob.cx(), blob.cy()) + img.draw_string(blob.x() + 2, blob.y() + 2, "r/g") + if blob.code() == 5: # r/b code + img.draw_rectangle(blob.rect()) + img.draw_cross(blob.cx(), blob.cy()) + img.draw_string(blob.x() + 2, blob.y() + 2, "r/b") + if blob.code() == 6: # g/b code + img.draw_rectangle(blob.rect()) + img.draw_cross(blob.cx(), blob.cy()) + img.draw_string(blob.x() + 2, blob.y() + 2, "g/b") + if blob.code() == 7: # r/g/b code + img.draw_rectangle(blob.rect()) + img.draw_cross(blob.cx(), blob.cy()) + img.draw_string(blob.x() + 2, blob.y() + 2, "r/g/b") + print(clock.fps()) diff --git a/scripts/examples/OpenMV/10-Color-Tracking/single_color_code_tracking.py b/scripts/examples/OpenMV/10-Color-Tracking/single_color_code_tracking.py new file mode 100644 index 000000000..ed8fa651e --- /dev/null +++ b/scripts/examples/OpenMV/10-Color-Tracking/single_color_code_tracking.py @@ -0,0 +1,43 @@ +# Single Color Code Tracking Example +# +# This example shows off single color code tracking using the OpenMV Cam. +# +# A color code is a blob composed of two or more colors. The example below will +# only track colored objects which have both the colors below in them. + +import sensor, image, time, math + +# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) +# The below thresholds track in general red/green things. You may wish to tune them... +thresholds = [(30, 100, 15, 127, 15, 127), # generic_red_thresholds -> index is 0 so code == (1 << 0) + (30, 100, -64, -8, -32, 32)] # generic_green_thresholds -> index is 1 so code == (1 << 1) +# Codes are or'ed together when "merge=True" for "find_blobs". + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking +clock = time.clock() + +# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are +# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the +# camera resolution. "merge=True" must be set to merge overlapping color blobs for color codes. + +while(True): + clock.tick() + img = sensor.snapshot() + for blob in img.find_blobs(thresholds, pixels_threshold=100, area_threshold=100, merge=True): + if blob.code() == 3: # r/g code == (1 << 1) | (1 << 0) + # These values depend on the blob not being circular - otherwise they will be shaky. + if blob.elongation() > 0.5: + img.draw_edges(blob.min_corners(), color=(255,0,0)) + img.draw_line(blob.major_axis_line(), color=(0,255,0)) + img.draw_line(blob.minor_axis_line(), color=(0,0,255)) + # These values are stable all the time. + img.draw_rectangle(blob.rect()) + img.draw_cross(blob.cx(), blob.cy()) + # Note - the blob rotation is unique to 0-180 only. + img.draw_keypoints([(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], size=20) + print(clock.fps()) diff --git a/scripts/examples/OpenMV/10-Color-Tracking/single_color_grayscale_blob_tracking.py b/scripts/examples/OpenMV/10-Color-Tracking/single_color_grayscale_blob_tracking.py new file mode 100644 index 000000000..8f8b44d3b --- /dev/null +++ b/scripts/examples/OpenMV/10-Color-Tracking/single_color_grayscale_blob_tracking.py @@ -0,0 +1,37 @@ +# Single Color Grayscale Blob Tracking Example +# +# This example shows off single color grayscale tracking using the OpenMV Cam. + +import sensor, image, time, math + +# Color Tracking Thresholds (Grayscale Min, Grayscale Max) +# The below grayscale threshold is set to only find extremely bright white areas. +thresholds = (245, 255) + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.VGA) +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking +clock = time.clock() + +# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are +# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the +# camera resolution. "merge=True" merges all overlapping blobs in the image. + +while(True): + clock.tick() + img = sensor.snapshot() + for blob in img.find_blobs([thresholds], pixels_threshold=100, area_threshold=100, merge=True): + # These values depend on the blob not being circular - otherwise they will be shaky. + if blob.elongation() > 0.5: + img.draw_edges(blob.min_corners(), color=0) + img.draw_line(blob.major_axis_line(), color=0) + img.draw_line(blob.minor_axis_line(), color=0) + # These values are stable all the time. + img.draw_rectangle(blob.rect(), color=127) + img.draw_cross(blob.cx(), blob.cy(), color=127) + # Note - the blob rotation is unique to 0-180 only. + img.draw_keypoints([(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], size=40, color=127) + print(clock.fps()) diff --git a/scripts/examples/OpenMV/10-Color-Tracking/single_color_rgb565_blob_tracking.py b/scripts/examples/OpenMV/10-Color-Tracking/single_color_rgb565_blob_tracking.py new file mode 100644 index 000000000..e7dc5bec1 --- /dev/null +++ b/scripts/examples/OpenMV/10-Color-Tracking/single_color_rgb565_blob_tracking.py @@ -0,0 +1,41 @@ +# Single Color RGB565 Blob Tracking Example +# +# This example shows off single color RGB565 tracking using the OpenMV Cam. + +import sensor, image, time, math + +threshold_index = 0 # 0 for red, 1 for green, 2 for blue + +# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) +# The below thresholds track in general red/green/blue things. You may wish to tune them... +thresholds = [(30, 100, 15, 127, 15, 127), # generic_red_thresholds + (30, 100, -64, -8, -32, 32), # generic_green_thresholds + (0, 30, 0, 64, -128, 0)] # generic_blue_thresholds + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking +clock = time.clock() + +# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are +# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the +# camera resolution. "merge=True" merges all overlapping blobs in the image. + +while(True): + clock.tick() + img = sensor.snapshot() + for blob in img.find_blobs([thresholds[threshold_index]], pixels_threshold=200, area_threshold=200, merge=True): + # These values depend on the blob not being circular - otherwise they will be shaky. + if blob.elongation() > 0.5: + img.draw_edges(blob.min_corners(), color=(255,0,0)) + img.draw_line(blob.major_axis_line(), color=(0,255,0)) + img.draw_line(blob.minor_axis_line(), color=(0,0,255)) + # These values are stable all the time. + img.draw_rectangle(blob.rect()) + img.draw_cross(blob.cx(), blob.cy()) + # Note - the blob rotation is unique to 0-180 only. + img.draw_keypoints([(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], size=20) + print(clock.fps()) diff --git a/scripts/examples/11-LCD-Shield/lcd.py b/scripts/examples/OpenMV/11-LCD-Shield/lcd.py similarity index 100% rename from scripts/examples/11-LCD-Shield/lcd.py rename to scripts/examples/OpenMV/11-LCD-Shield/lcd.py diff --git a/scripts/examples/12-Thermopile-Shield/AMG8833_camera.py b/scripts/examples/OpenMV/12-Thermopile-Shield/AMG8833_camera.py similarity index 100% rename from scripts/examples/12-Thermopile-Shield/AMG8833_camera.py rename to scripts/examples/OpenMV/12-Thermopile-Shield/AMG8833_camera.py diff --git a/scripts/examples/12-Thermopile-Shield/AMG8833_camera_lcd.py b/scripts/examples/OpenMV/12-Thermopile-Shield/AMG8833_camera_lcd.py similarity index 100% rename from scripts/examples/12-Thermopile-Shield/AMG8833_camera_lcd.py rename to scripts/examples/OpenMV/12-Thermopile-Shield/AMG8833_camera_lcd.py diff --git a/scripts/examples/12-Thermopile-Shield/AMG8833_overlay.py b/scripts/examples/OpenMV/12-Thermopile-Shield/AMG8833_overlay.py similarity index 100% rename from scripts/examples/12-Thermopile-Shield/AMG8833_overlay.py rename to scripts/examples/OpenMV/12-Thermopile-Shield/AMG8833_overlay.py diff --git a/scripts/examples/12-Thermopile-Shield/AMG8833_overlay_lcd.py b/scripts/examples/OpenMV/12-Thermopile-Shield/AMG8833_overlay_lcd.py similarity index 100% rename from scripts/examples/12-Thermopile-Shield/AMG8833_overlay_lcd.py rename to scripts/examples/OpenMV/12-Thermopile-Shield/AMG8833_overlay_lcd.py diff --git a/scripts/examples/12-Thermopile-Shield/MLX90621_camera.py b/scripts/examples/OpenMV/12-Thermopile-Shield/MLX90621_camera.py similarity index 100% rename from scripts/examples/12-Thermopile-Shield/MLX90621_camera.py rename to scripts/examples/OpenMV/12-Thermopile-Shield/MLX90621_camera.py diff --git a/scripts/examples/12-Thermopile-Shield/MLX90621_camera_lcd.py b/scripts/examples/OpenMV/12-Thermopile-Shield/MLX90621_camera_lcd.py similarity index 100% rename from scripts/examples/12-Thermopile-Shield/MLX90621_camera_lcd.py rename to scripts/examples/OpenMV/12-Thermopile-Shield/MLX90621_camera_lcd.py diff --git a/scripts/examples/12-Thermopile-Shield/MLX90621_overlay.py b/scripts/examples/OpenMV/12-Thermopile-Shield/MLX90621_overlay.py similarity index 100% rename from scripts/examples/12-Thermopile-Shield/MLX90621_overlay.py rename to scripts/examples/OpenMV/12-Thermopile-Shield/MLX90621_overlay.py diff --git a/scripts/examples/12-Thermopile-Shield/MLX90621_overlay_lcd.py b/scripts/examples/OpenMV/12-Thermopile-Shield/MLX90621_overlay_lcd.py similarity index 100% rename from scripts/examples/12-Thermopile-Shield/MLX90621_overlay_lcd.py rename to scripts/examples/OpenMV/12-Thermopile-Shield/MLX90621_overlay_lcd.py diff --git a/scripts/examples/12-Thermopile-Shield/MLX90640_camera.py b/scripts/examples/OpenMV/12-Thermopile-Shield/MLX90640_camera.py similarity index 100% rename from scripts/examples/12-Thermopile-Shield/MLX90640_camera.py rename to scripts/examples/OpenMV/12-Thermopile-Shield/MLX90640_camera.py diff --git a/scripts/examples/12-Thermopile-Shield/MLX90640_camera_lcd.py b/scripts/examples/OpenMV/12-Thermopile-Shield/MLX90640_camera_lcd.py similarity index 100% rename from scripts/examples/12-Thermopile-Shield/MLX90640_camera_lcd.py rename to scripts/examples/OpenMV/12-Thermopile-Shield/MLX90640_camera_lcd.py diff --git a/scripts/examples/12-Thermopile-Shield/MLX90640_overlay.py b/scripts/examples/OpenMV/12-Thermopile-Shield/MLX90640_overlay.py similarity index 100% rename from scripts/examples/12-Thermopile-Shield/MLX90640_overlay.py rename to scripts/examples/OpenMV/12-Thermopile-Shield/MLX90640_overlay.py diff --git a/scripts/examples/12-Thermopile-Shield/MLX90640_overlay_lcd.py b/scripts/examples/OpenMV/12-Thermopile-Shield/MLX90640_overlay_lcd.py similarity index 100% rename from scripts/examples/12-Thermopile-Shield/MLX90640_overlay_lcd.py rename to scripts/examples/OpenMV/12-Thermopile-Shield/MLX90640_overlay_lcd.py diff --git a/scripts/examples/12-Thermopile-Shield/MLX90640_overlay_smoothed.py b/scripts/examples/OpenMV/12-Thermopile-Shield/MLX90640_overlay_smoothed.py similarity index 100% rename from scripts/examples/12-Thermopile-Shield/MLX90640_overlay_smoothed.py rename to scripts/examples/OpenMV/12-Thermopile-Shield/MLX90640_overlay_smoothed.py diff --git a/scripts/examples/13-BLE-Shield/ble.py b/scripts/examples/OpenMV/13-BLE-Shield/ble.py similarity index 100% rename from scripts/examples/13-BLE-Shield/ble.py rename to scripts/examples/OpenMV/13-BLE-Shield/ble.py diff --git a/scripts/examples/14-WiFi-Shield/connect.py b/scripts/examples/OpenMV/14-WiFi-Shield/connect.py similarity index 100% rename from scripts/examples/14-WiFi-Shield/connect.py rename to scripts/examples/OpenMV/14-WiFi-Shield/connect.py diff --git a/scripts/examples/14-WiFi-Shield/dns.py b/scripts/examples/OpenMV/14-WiFi-Shield/dns.py similarity index 100% rename from scripts/examples/14-WiFi-Shield/dns.py rename to scripts/examples/OpenMV/14-WiFi-Shield/dns.py diff --git a/scripts/examples/14-WiFi-Shield/fw_update.py b/scripts/examples/OpenMV/14-WiFi-Shield/fw_update.py similarity index 100% rename from scripts/examples/14-WiFi-Shield/fw_update.py rename to scripts/examples/OpenMV/14-WiFi-Shield/fw_update.py diff --git a/scripts/examples/14-WiFi-Shield/http_client.py b/scripts/examples/OpenMV/14-WiFi-Shield/http_client.py similarity index 100% rename from scripts/examples/14-WiFi-Shield/http_client.py rename to scripts/examples/OpenMV/14-WiFi-Shield/http_client.py diff --git a/scripts/examples/14-WiFi-Shield/http_client_ssl.py b/scripts/examples/OpenMV/14-WiFi-Shield/http_client_ssl.py similarity index 100% rename from scripts/examples/14-WiFi-Shield/http_client_ssl.py rename to scripts/examples/OpenMV/14-WiFi-Shield/http_client_ssl.py diff --git a/scripts/examples/14-WiFi-Shield/mjpeg_streamer.py b/scripts/examples/OpenMV/14-WiFi-Shield/mjpeg_streamer.py similarity index 100% rename from scripts/examples/14-WiFi-Shield/mjpeg_streamer.py rename to scripts/examples/OpenMV/14-WiFi-Shield/mjpeg_streamer.py diff --git a/scripts/examples/14-WiFi-Shield/mjpeg_streamer_ap.py b/scripts/examples/OpenMV/14-WiFi-Shield/mjpeg_streamer_ap.py similarity index 100% rename from scripts/examples/14-WiFi-Shield/mjpeg_streamer_ap.py rename to scripts/examples/OpenMV/14-WiFi-Shield/mjpeg_streamer_ap.py diff --git a/scripts/examples/14-WiFi-Shield/mjpeg_streamer_fir.py b/scripts/examples/OpenMV/14-WiFi-Shield/mjpeg_streamer_fir.py similarity index 100% rename from scripts/examples/14-WiFi-Shield/mjpeg_streamer_fir.py rename to scripts/examples/OpenMV/14-WiFi-Shield/mjpeg_streamer_fir.py diff --git a/scripts/examples/14-WiFi-Shield/mqtt_pub.py b/scripts/examples/OpenMV/14-WiFi-Shield/mqtt_pub.py similarity index 100% rename from scripts/examples/14-WiFi-Shield/mqtt_pub.py rename to scripts/examples/OpenMV/14-WiFi-Shield/mqtt_pub.py diff --git a/scripts/examples/14-WiFi-Shield/mqtt_sub.py b/scripts/examples/OpenMV/14-WiFi-Shield/mqtt_sub.py similarity index 100% rename from scripts/examples/14-WiFi-Shield/mqtt_sub.py rename to scripts/examples/OpenMV/14-WiFi-Shield/mqtt_sub.py diff --git a/scripts/examples/14-WiFi-Shield/ntp.py b/scripts/examples/OpenMV/14-WiFi-Shield/ntp.py similarity index 100% rename from scripts/examples/14-WiFi-Shield/ntp.py rename to scripts/examples/OpenMV/14-WiFi-Shield/ntp.py diff --git a/scripts/examples/14-WiFi-Shield/scan.py b/scripts/examples/OpenMV/14-WiFi-Shield/scan.py similarity index 100% rename from scripts/examples/14-WiFi-Shield/scan.py rename to scripts/examples/OpenMV/14-WiFi-Shield/scan.py diff --git a/scripts/examples/14-WiFi-Shield/static_ip.py b/scripts/examples/OpenMV/14-WiFi-Shield/static_ip.py similarity index 100% rename from scripts/examples/14-WiFi-Shield/static_ip.py rename to scripts/examples/OpenMV/14-WiFi-Shield/static_ip.py diff --git a/scripts/examples/15-Servo-Shield/main.py b/scripts/examples/OpenMV/15-Servo-Shield/main.py similarity index 100% rename from scripts/examples/15-Servo-Shield/main.py rename to scripts/examples/OpenMV/15-Servo-Shield/main.py diff --git a/scripts/examples/15-Servo-Shield/pca9685.py b/scripts/examples/OpenMV/15-Servo-Shield/pca9685.py similarity index 100% rename from scripts/examples/15-Servo-Shield/pca9685.py rename to scripts/examples/OpenMV/15-Servo-Shield/pca9685.py diff --git a/scripts/examples/15-Servo-Shield/servo.py b/scripts/examples/OpenMV/15-Servo-Shield/servo.py similarity index 100% rename from scripts/examples/15-Servo-Shield/servo.py rename to scripts/examples/OpenMV/15-Servo-Shield/servo.py diff --git a/scripts/examples/OpenMV/16-Codes/find_barcodes.py b/scripts/examples/OpenMV/16-Codes/find_barcodes.py new file mode 100644 index 000000000..07dee507c --- /dev/null +++ b/scripts/examples/OpenMV/16-Codes/find_barcodes.py @@ -0,0 +1,65 @@ +# Barcode Example +# +# This example shows off how easy it is to detect bar codes using the +# OpenMV Cam M7. Barcode detection does not work on the M4 Camera. + +import sensor, image, time, math + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.VGA) # High Res! +sensor.set_windowing((640, 80)) # V Res of 80 == less work (40 for 2X the speed). +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False) # must turn this off to prevent image washout... +sensor.set_auto_whitebal(False) # must turn this off to prevent image washout... +clock = time.clock() + +# Barcode detection can run at the full 640x480 resolution of your OpenMV Cam's +# OV7725 camera module. Barcode detection will also work in RGB565 mode but at +# a lower resolution. That said, barcode detection requires a higher resolution +# to work well so it should always be run at 640x480 in grayscale... + +def barcode_name(code): + if(code.type() == image.EAN2): + return "EAN2" + if(code.type() == image.EAN5): + return "EAN5" + if(code.type() == image.EAN8): + return "EAN8" + if(code.type() == image.UPCE): + return "UPCE" + if(code.type() == image.ISBN10): + return "ISBN10" + if(code.type() == image.UPCA): + return "UPCA" + if(code.type() == image.EAN13): + return "EAN13" + if(code.type() == image.ISBN13): + return "ISBN13" + if(code.type() == image.I25): + return "I25" + if(code.type() == image.DATABAR): + return "DATABAR" + if(code.type() == image.DATABAR_EXP): + return "DATABAR_EXP" + if(code.type() == image.CODABAR): + return "CODABAR" + if(code.type() == image.CODE39): + return "CODE39" + if(code.type() == image.PDF417): + return "PDF417" + if(code.type() == image.CODE93): + return "CODE93" + if(code.type() == image.CODE128): + return "CODE128" + +while(True): + clock.tick() + img = sensor.snapshot() + codes = img.find_barcodes() + for code in codes: + img.draw_rectangle(code.rect()) + print_args = (barcode_name(code), code.payload(), (180 * code.rotation()) / math.pi, code.quality(), clock.fps()) + print("Barcode %s, Payload \"%s\", rotation %f (degrees), quality %d, FPS %f" % print_args) + if not codes: + print("FPS %f" % clock.fps()) diff --git a/scripts/examples/OpenMV/16-Codes/find_datamatrices.py b/scripts/examples/OpenMV/16-Codes/find_datamatrices.py new file mode 100644 index 000000000..95d0fe38a --- /dev/null +++ b/scripts/examples/OpenMV/16-Codes/find_datamatrices.py @@ -0,0 +1,27 @@ +# Find Data Matrices Example +# +# This example shows off how easy it is to detect data matrices using the +# OpenMV Cam M7. Data matrices detection does not work on the M4 Camera. + +import sensor, image, time, math + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False) # must turn this off to prevent image washout... +sensor.set_auto_whitebal(False) # must turn this off to prevent image washout... +clock = time.clock() + +while(True): + clock.tick() + img = sensor.snapshot() + img.lens_corr(1.8) # strength of 1.8 is good for the 2.8mm lens. + + matrices = img.find_datamatrices() + for matrix in matrices: + img.draw_rectangle(matrix.rect(), color = (255, 0, 0)) + print_args = (matrix.rows(), matrix.columns(), matrix.payload(), (180 * matrix.rotation()) / math.pi, clock.fps()) + print("Matrix [%d:%d], Payload \"%s\", rotation %f (degrees), FPS %f" % print_args) + if not matrices: + print("FPS %f" % clock.fps()) diff --git a/scripts/examples/OpenMV/16-Codes/find_datamatrices_w_lens_zoom.py b/scripts/examples/OpenMV/16-Codes/find_datamatrices_w_lens_zoom.py new file mode 100644 index 000000000..889326df1 --- /dev/null +++ b/scripts/examples/OpenMV/16-Codes/find_datamatrices_w_lens_zoom.py @@ -0,0 +1,27 @@ +# Find Data Matrices w/ Lens Zoom Example +# +# This example shows off how easy it is to detect data matrices using the +# OpenMV Cam M7. Data matrices detection does not work on the M4 Camera. + +import sensor, image, time, math + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.VGA) +sensor.set_windowing((320, 240)) # 2x Zoom +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False) # must turn this off to prevent image washout... +sensor.set_auto_whitebal(False) # must turn this off to prevent image washout... +clock = time.clock() + +while(True): + clock.tick() + img = sensor.snapshot() + + matrices = img.find_datamatrices() + for matrix in matrices: + img.draw_rectangle(matrix.rect(), color = (255, 0, 0)) + print_args = (matrix.rows(), matrix.columns(), matrix.payload(), (180 * matrix.rotation()) / math.pi, clock.fps()) + print("Matrix [%d:%d], Payload \"%s\", rotation %f (degrees), FPS %f" % print_args) + if not matrices: + print("FPS %f" % clock.fps()) diff --git a/scripts/examples/OpenMV/16-Codes/qrcodes_with_lens_corr.py b/scripts/examples/OpenMV/16-Codes/qrcodes_with_lens_corr.py new file mode 100644 index 000000000..06b839f49 --- /dev/null +++ b/scripts/examples/OpenMV/16-Codes/qrcodes_with_lens_corr.py @@ -0,0 +1,22 @@ +# QRCode Example +# +# This example shows the power of the OpenMV Cam to detect QR Codes +# using lens correction (see the qrcodes_with_lens_corr.py script for higher performance). + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False) # must turn this off to prevent image washout... +clock = time.clock() + +while(True): + clock.tick() + img = sensor.snapshot() + img.lens_corr(1.8) # strength of 1.8 is good for the 2.8mm lens. + for code in img.find_qrcodes(): + img.draw_rectangle(code.rect(), color = (255, 0, 0)) + print(code) + print(clock.fps()) diff --git a/scripts/examples/OpenMV/16-Codes/qrcodes_with_lens_zoom.py b/scripts/examples/OpenMV/16-Codes/qrcodes_with_lens_zoom.py new file mode 100644 index 000000000..c9798a4bf --- /dev/null +++ b/scripts/examples/OpenMV/16-Codes/qrcodes_with_lens_zoom.py @@ -0,0 +1,22 @@ +# QRCode Example +# +# This example shows the power of the OpenMV Cam to detect QR Codes +# without needing lens correction. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.VGA) +sensor.set_windowing((240, 240)) # look at center 240x240 pixels of the VGA resolution. +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False) # must turn this off to prevent image washout... +clock = time.clock() + +while(True): + clock.tick() + img = sensor.snapshot() + for code in img.find_qrcodes(): + img.draw_rectangle(code.rect(), color = 127) + print(code) + print(clock.fps()) diff --git a/scripts/examples/OpenMV/17-Pixy-Emulation/apriltags_pixy_i2c_emulation.py b/scripts/examples/OpenMV/17-Pixy-Emulation/apriltags_pixy_i2c_emulation.py new file mode 100644 index 000000000..18e394fb5 --- /dev/null +++ b/scripts/examples/OpenMV/17-Pixy-Emulation/apriltags_pixy_i2c_emulation.py @@ -0,0 +1,245 @@ +# AprilTags Pixy I2C Emulation Script +# +# This script allows your OpenMV Cam to transmit AprilTag detection data like +# a Pixy (CMUcam5) tracking colors in I2C mode. This script allows you to +# easily replace a Pixy (CMUcam5) color tracking sensor with an OpenMV Cam +# AprilTag tracking sensor. Note that this only runs on the OpenMV Cam M7. +# +# P4 = SCL +# P5 = SDA +# +# P7 = Servo 1 +# P8 = Servo 2 + +# Note: The tag family is TAG36H11. Additionally, in order to for the +# signature value of a tag detection to be compatible with pixy +# interface libraries all tag ids have 8 added to them in order +# to move them in the color code signature range. Finally, tags +# are all reported as color code blocks... + +# Pixy Parameters ############################################################ + +max_blocks = 1000 +max_blocks_per_id = 1000 + +i2c_address = 0x54 + +# Pan Servo +s0_lower_limit = 1000 # Servo pulse width lower limit in microseconds. +s0_upper_limit = 2000 # Servo pulse width upper limit in microseconds. + +# Tilt Servo +s1_lower_limit = 1000 # Servo pulse width lower limit in microseconds. +s1_upper_limit = 2000 # Servo pulse width upper limit in microseconds. + +analog_out_enable = False # P6 -> Analog Out (0v - 3.3v). +analog_out_mode = 0 # 0 == x position of largest tag - 1 == y position of largest tag + +############################################################################## + +import image, math, pyb, sensor, struct, time + +# Camera Setup + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.QQVGA) +sensor.skip_frames(time = 2000) + +# LED Setup + +red_led = pyb.LED(1) +green_led = pyb.LED(2) +blue_led = pyb.LED(3) + +red_led.off() +green_led.off() +blue_led.off() + +# DAC Setup + +dac = pyb.DAC("P6") if analog_out_enable else None + +if dac: + dac.write(0) + +# Servo Setup + +min_s0_limit = min(s0_lower_limit, s0_upper_limit) +max_s0_limit = max(s0_lower_limit, s0_upper_limit) +min_s1_limit = min(s1_lower_limit, s1_upper_limit) +max_s1_limit = max(s1_lower_limit, s1_upper_limit) + +s0_pan = pyb.Servo(1) # P7 +s1_tilt = pyb.Servo(2) # P8 + +s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center +s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center + +s0_pan_conversion_factor = (max_s0_limit - min_s0_limit) / 1000 +s1_tilt_conversion_factor = (max_s1_limit - min_s1_limit) / 1000 + +def s0_pan_position(value): + s0_pan.pulse_width(round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor))) + +def s1_tilt_position(value): + s1_tilt.pulse_width(round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor))) + +# Link Setup + +bus = pyb.I2C(2, pyb.I2C.SLAVE, addr = i2c_address) + +def write(data): + # Prepare the data to transmit first so we can do it quickly. + out_data = [] + for i in range(0, len(data), 2): + out_data.append(data[i:i+2]) + # Disable interrupts so we can send all packets without gaps. + state = pyb.disable_irq() + for i in range(len(out_data)): + max_exceptions = 10 + loop = True + while(loop): + try: + bus.send(out_data[i], timeout = 1) + loop = False + except OSError as error: + if(max_exceptions <= 0): + pyb.enable_irq(state) + return + max_exceptions -= 1 + pyb.enable_irq(state) + +def available(): + return 0 # Not implemented as there is no way for the us to be ready to receive the data. + +def read_byte(): + return 0 # Not implemented as there is no way for the us to be ready to receive the data. + +# Helper Stuff + +def checksum(data): + checksum = 0 + for i in range(0, len(data), 2): + checksum += ((data[i+1] & 0xFF) << 8) | ((data[i+0] & 0xFF) << 0) + return checksum & 0xFFFF + +def to_object_block_format(tag): + angle = int((tag.rotation() * 180) // math.pi) + temp = struct.pack(" 0) and (max_blocks_per_id > 0): # new frame + dat_buf = struct.pack(" Analog Out (0v - 3.3v). +analog_out_mode = 0 # 0 == x position of largest tag - 1 == y position of largest tag + +############################################################################## + +import image, math, pyb, sensor, struct, time + +# Camera Setup + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.QQVGA) +sensor.skip_frames(time = 2000) + +# LED Setup + +red_led = pyb.LED(1) +green_led = pyb.LED(2) +blue_led = pyb.LED(3) + +red_led.off() +green_led.off() +blue_led.off() + +# DAC Setup + +dac = pyb.DAC("P6") if analog_out_enable else None + +if dac: + dac.write(0) + +# Servo Setup + +min_s0_limit = min(s0_lower_limit, s0_upper_limit) +max_s0_limit = max(s0_lower_limit, s0_upper_limit) +min_s1_limit = min(s1_lower_limit, s1_upper_limit) +max_s1_limit = max(s1_lower_limit, s1_upper_limit) + +s0_pan = pyb.Servo(1) # P7 +s1_tilt = pyb.Servo(2) # P8 + +s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center +s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center + +s0_pan_conversion_factor = (max_s0_limit - min_s0_limit) / 1000 +s1_tilt_conversion_factor = (max_s1_limit - min_s1_limit) / 1000 + +def s0_pan_position(value): + s0_pan.pulse_width(round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor))) + +def s1_tilt_position(value): + s1_tilt.pulse_width(round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor))) + +# Link Setup + +bus = pyb.SPI(2, pyb.SPI.SLAVE, polarity = 0, phase = 0, bits = 16) +while(True): + try: + sync_bytes = bus.recv(2, timeout = 10) + if((sync_bytes[0] == 0x00) and (sync_bytes[1] == 0x5A)): + break + except OSError as error: + pass + + bus.deinit() + bus.init(pyb.SPI.SLAVE, polarity = 0, phase = 0, bits = 16) + +def write(data): + + max_exceptions = 10 + loop = True + while(loop): + try: + bus.send(data, timeout = 10) + loop = False + except OSError as error: + if(max_exceptions <= 0): + return + max_exceptions -= 1 + +def available(): + return 0 # Not implemented as there is no way for the us to be ready to receive the data. + +def read_byte(): + return 0 # Not implemented as there is no way for the us to be ready to receive the data. + +# Helper Stuff + +def checksum(data): + checksum = 0 + for i in range(0, len(data), 2): + checksum += ((data[i+1] & 0xFF) << 8) | ((data[i+0] & 0xFF) << 0) + return checksum & 0xFFFF + +def to_object_block_format(tag): + angle = int((tag.rotation() * 180) // math.pi) + temp = struct.pack(" 0) and (max_blocks_per_id > 0): # new frame + dat_buf = struct.pack(" Analog Out (0v - 3.3v). +analog_out_mode = 0 # 0 == x position of largest tag - 1 == y position of largest tag + +############################################################################## + +import image, math, pyb, sensor, struct, time + +# Camera Setup + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.QQVGA) +sensor.skip_frames(time = 2000) + +# LED Setup + +red_led = pyb.LED(1) +green_led = pyb.LED(2) +blue_led = pyb.LED(3) + +red_led.off() +green_led.off() +blue_led.off() + +# DAC Setup + +dac = pyb.DAC("P6") if analog_out_enable else None + +if dac: + dac.write(0) + +# Servo Setup + +min_s0_limit = min(s0_lower_limit, s0_upper_limit) +max_s0_limit = max(s0_lower_limit, s0_upper_limit) +min_s1_limit = min(s1_lower_limit, s1_upper_limit) +max_s1_limit = max(s1_lower_limit, s1_upper_limit) + +s0_pan = pyb.Servo(1) # P7 +s1_tilt = pyb.Servo(2) # P8 + +s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center +s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center + +s0_pan_conversion_factor = (max_s0_limit - min_s0_limit) / 1000 +s1_tilt_conversion_factor = (max_s1_limit - min_s1_limit) / 1000 + +def s0_pan_position(value): + s0_pan.pulse_width(round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor))) + +def s1_tilt_position(value): + s1_tilt.pulse_width(round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor))) + +# Link Setup + +uart = pyb.UART(3, uart_baudrate, timeout_char = 1000) + +def write(data): + uart.write(data) + +def available(): + return uart.any() + +def read_byte(): + return uart.readchar() + +# Helper Stuff + +def checksum(data): + checksum = 0 + for i in range(0, len(data), 2): + checksum += ((data[i+1] & 0xFF) << 8) | ((data[i+0] & 0xFF) << 0) + return checksum & 0xFFFF + +def to_object_block_format(tag): + angle = int((tag.rotation() * 180) // math.pi) + temp = struct.pack(" 0) and (max_blocks_per_id > 0): # new frame + dat_buf = struct.pack(" Analog Out (0v - 3.3v). +analog_out_mode = 0 # 0 == x position of largest blob - 1 == y position of largest blob + +# Parameter 0 - L Min. +# Parameter 1 - L Max. +# Parameter 2 - A Min. +# Parameter 3 - A Max. +# Parameter 4 - B Min. +# Parameter 5 - B Max. +# Parameter 6 - Is Color Code Threshold? (True/False). +# Parameter 7 - Enable Threshold? (True/False). +lab_color_thresholds = [(0, 100, 40, 127, -128, 127, True, True), # Generic Red Threshold + (0, 100, -128, -10, -128, 127, True, True), # Generic Green Threshold + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False)] + +fb_pixels_threshold = 500 # minimum number of pixels that must be in a blob +fb_merge_margin = 5 # how close pixel wise blobs can be before merging + +############################################################################## + +e_lab_color_thresholds = [] # enabled thresholds +e_lab_color_code = [] # enabled color code +e_lab_color_signatures = [] # original enabled threshold indexes +for i in range(len(lab_color_thresholds)): + if lab_color_thresholds[i][7]: + e_lab_color_thresholds.append(lab_color_thresholds[i][0:6]) + e_lab_color_code.append(lab_color_thresholds[i][6]) + e_lab_color_signatures.append(i + 1) + +import image, math, pyb, sensor, struct, time + +# Camera Setup + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False) +sensor.set_auto_whitebal(False) + +# LED Setup + +red_led = pyb.LED(1) +green_led = pyb.LED(2) +blue_led = pyb.LED(3) + +red_led.off() +green_led.off() +blue_led.off() + +# DAC Setup + +dac = pyb.DAC("P6") if analog_out_enable else None + +if dac: + dac.write(0) + +# Servo Setup + +min_s0_limit = min(s0_lower_limit, s0_upper_limit) +max_s0_limit = max(s0_lower_limit, s0_upper_limit) +min_s1_limit = min(s1_lower_limit, s1_upper_limit) +max_s1_limit = max(s1_lower_limit, s1_upper_limit) + +s0_pan = pyb.Servo(1) # P7 +s1_tilt = pyb.Servo(2) # P8 + +s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center +s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center + +s0_pan_conversion_factor = (max_s0_limit - min_s0_limit) / 1000 +s1_tilt_conversion_factor = (max_s1_limit - min_s1_limit) / 1000 + +def s0_pan_position(value): + s0_pan.pulse_width(round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor))) + +def s1_tilt_position(value): + s1_tilt.pulse_width(round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor))) + +# Link Setup + +bus = pyb.I2C(2, pyb.I2C.SLAVE, addr = i2c_address) + +def write(data): + # Prepare the data to transmit first so we can do it quickly. + out_data = [] + for i in range(0, len(data), 2): + out_data.append(data[i:i+2]) + # Disable interrupts so we can send all packets without gaps. + state = pyb.disable_irq() + for i in range(len(out_data)): + max_exceptions = 10 + loop = True + while(loop): + try: + bus.send(out_data[i], timeout = 1) + loop = False + except OSError as error: + if(max_exceptions <= 0): + pyb.enable_irq(state) + return + max_exceptions -= 1 + pyb.enable_irq(state) + +def available(): + return 0 # Not implemented as there is no way for the us to be ready to receive the data. + +def read_byte(): + return 0 # Not implemented as there is no way for the us to be ready to receive the data. + +# Helper Stuff + +def checksum(data): + checksum = 0 + for i in range(0, len(data), 2): + checksum += ((data[i+1] & 0xFF) << 8) | ((data[i+0] & 0xFF) << 0) + return checksum & 0xFFFF + +def get_normal_signature(code): + for i in range(len(e_lab_color_signatures)): + if code & (1 << i): + return e_lab_color_signatures[i] + return 0 + +def to_normal_object_block_format(blob): + temp = struct.pack(" 1) or (not color_code(blob.code())) + elif(pri_color_code_mode == 2): # only color codes with two or more colors + return (bits_set(blob.code()) > 1) + elif(pri_color_code_mode == 3): + return True + +clock = time.clock() +while(True): + clock.tick() + img = sensor.snapshot() + blobs = list(filter(blob_filter, img.find_blobs(e_lab_color_thresholds, area_threshold = min_block_area, pixels_threshold = fb_pixels_threshold, merge = True, margin = fb_merge_margin, merge_cb = fb_merge_cb))) + + # Transmit Blobs # + + if blobs and (max_blocks > 0) and (max_blocks_per_signature > 0): # new frame + dat_buf = struct.pack(" Analog Out (0v - 3.3v). +analog_out_mode = 0 # 0 == x position of largest blob - 1 == y position of largest blob + +# Parameter 0 - L Min. +# Parameter 1 - L Max. +# Parameter 2 - A Min. +# Parameter 3 - A Max. +# Parameter 4 - B Min. +# Parameter 5 - B Max. +# Parameter 6 - Is Color Code Threshold? (True/False). +# Parameter 7 - Enable Threshold? (True/False). +lab_color_thresholds = [(0, 100, 40, 127, -128, 127, True, True), # Generic Red Threshold + (0, 100, -128, -10, -128, 127, True, True), # Generic Green Threshold + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False)] + +fb_pixels_threshold = 500 # minimum number of pixels that must be in a blob +fb_merge_margin = 5 # how close pixel wise blobs can be before merging + +############################################################################## + +e_lab_color_thresholds = [] # enabled thresholds +e_lab_color_code = [] # enabled color code +e_lab_color_signatures = [] # original enabled threshold indexes +for i in range(len(lab_color_thresholds)): + if lab_color_thresholds[i][7]: + e_lab_color_thresholds.append(lab_color_thresholds[i][0:6]) + e_lab_color_code.append(lab_color_thresholds[i][6]) + e_lab_color_signatures.append(i + 1) + +import image, math, pyb, sensor, struct, time + +# Camera Setup + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False) +sensor.set_auto_whitebal(False) + +# LED Setup + +red_led = pyb.LED(1) +green_led = pyb.LED(2) +blue_led = pyb.LED(3) + +red_led.off() +green_led.off() +blue_led.off() + +# DAC Setup + +dac = pyb.DAC("P6") if analog_out_enable else None + +if dac: + dac.write(0) + +# Servo Setup + +min_s0_limit = min(s0_lower_limit, s0_upper_limit) +max_s0_limit = max(s0_lower_limit, s0_upper_limit) +min_s1_limit = min(s1_lower_limit, s1_upper_limit) +max_s1_limit = max(s1_lower_limit, s1_upper_limit) + +s0_pan = pyb.Servo(1) # P7 +s1_tilt = pyb.Servo(2) # P8 + +s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center +s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center + +s0_pan_conversion_factor = (max_s0_limit - min_s0_limit) / 1000 +s1_tilt_conversion_factor = (max_s1_limit - min_s1_limit) / 1000 + +def s0_pan_position(value): + s0_pan.pulse_width(round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor))) + +def s1_tilt_position(value): + s1_tilt.pulse_width(round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor))) + +# Link Setup + +bus = pyb.SPI(2, pyb.SPI.SLAVE, polarity = 0, phase = 0, bits = 16) +while(True): + try: + sync_bytes = bus.recv(2, timeout = 10) + if((sync_bytes[0] == 0x00) and (sync_bytes[1] == 0x5A)): + break + except OSError as error: + pass + + bus.deinit() + bus.init(pyb.SPI.SLAVE, polarity = 0, phase = 0, bits = 16) + +def write(data): + + max_exceptions = 10 + loop = True + while(loop): + try: + bus.send(data, timeout = 10) + loop = False + except OSError as error: + if(max_exceptions <= 0): + return + max_exceptions -= 1 + +def available(): + return 0 # Not implemented as there is no way for the us to be ready to receive the data. + +def read_byte(): + return 0 # Not implemented as there is no way for the us to be ready to receive the data. + +# Helper Stuff + +def checksum(data): + checksum = 0 + for i in range(0, len(data), 2): + checksum += ((data[i+1] & 0xFF) << 8) | ((data[i+0] & 0xFF) << 0) + return checksum & 0xFFFF + +def get_normal_signature(code): + for i in range(len(e_lab_color_signatures)): + if code & (1 << i): + return e_lab_color_signatures[i] + return 0 + +def to_normal_object_block_format(blob): + temp = struct.pack(" 1) or (not color_code(blob.code())) + elif(pri_color_code_mode == 2): # only color codes with two or more colors + return (bits_set(blob.code()) > 1) + elif(pri_color_code_mode == 3): + return True + +clock = time.clock() +while(True): + clock.tick() + img = sensor.snapshot() + blobs = list(filter(blob_filter, img.find_blobs(e_lab_color_thresholds, area_threshold = min_block_area, pixels_threshold = fb_pixels_threshold, merge = True, margin = fb_merge_margin, merge_cb = fb_merge_cb))) + + # Transmit Blobs # + + if blobs and (max_blocks > 0) and (max_blocks_per_signature > 0): # new frame + dat_buf = struct.pack(" Analog Out (0v - 3.3v). +analog_out_mode = 0 # 0 == x position of largest blob - 1 == y position of largest blob + +# Parameter 0 - L Min. +# Parameter 1 - L Max. +# Parameter 2 - A Min. +# Parameter 3 - A Max. +# Parameter 4 - B Min. +# Parameter 5 - B Max. +# Parameter 6 - Is Color Code Threshold? (True/False). +# Parameter 7 - Enable Threshold? (True/False). +lab_color_thresholds = [(0, 100, 40, 127, -128, 127, True, True), # Generic Red Threshold + (0, 100, -128, -10, -128, 127, True, True), # Generic Green Threshold + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False)] + +fb_pixels_threshold = 500 # minimum number of pixels that must be in a blob +fb_merge_margin = 5 # how close pixel wise blobs can be before merging + +############################################################################## + +e_lab_color_thresholds = [] # enabled thresholds +e_lab_color_code = [] # enabled color code +e_lab_color_signatures = [] # original enabled threshold indexes +for i in range(len(lab_color_thresholds)): + if lab_color_thresholds[i][7]: + e_lab_color_thresholds.append(lab_color_thresholds[i][0:6]) + e_lab_color_code.append(lab_color_thresholds[i][6]) + e_lab_color_signatures.append(i + 1) + +import image, math, pyb, sensor, struct, time + +# Camera Setup + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False) +sensor.set_auto_whitebal(False) + +# LED Setup + +red_led = pyb.LED(1) +green_led = pyb.LED(2) +blue_led = pyb.LED(3) + +red_led.off() +green_led.off() +blue_led.off() + +# DAC Setup + +dac = pyb.DAC("P6") if analog_out_enable else None + +if dac: + dac.write(0) + +# Servo Setup + +min_s0_limit = min(s0_lower_limit, s0_upper_limit) +max_s0_limit = max(s0_lower_limit, s0_upper_limit) +min_s1_limit = min(s1_lower_limit, s1_upper_limit) +max_s1_limit = max(s1_lower_limit, s1_upper_limit) + +s0_pan = pyb.Servo(1) # P7 +s1_tilt = pyb.Servo(2) # P8 + +s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center +s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center + +s0_pan_conversion_factor = (max_s0_limit - min_s0_limit) / 1000 +s1_tilt_conversion_factor = (max_s1_limit - min_s1_limit) / 1000 + +def s0_pan_position(value): + s0_pan.pulse_width(round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor))) + +def s1_tilt_position(value): + s1_tilt.pulse_width(round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor))) + +# Link Setup + +uart = pyb.UART(3, uart_baudrate, timeout_char = 1000) + +def write(data): + uart.write(data) + +def available(): + return uart.any() + +def read_byte(): + return uart.readchar() + +# Helper Stuff + +def checksum(data): + checksum = 0 + for i in range(0, len(data), 2): + checksum += ((data[i+1] & 0xFF) << 8) | ((data[i+0] & 0xFF) << 0) + return checksum & 0xFFFF + +def get_normal_signature(code): + for i in range(len(e_lab_color_signatures)): + if code & (1 << i): + return e_lab_color_signatures[i] + return 0 + +def to_normal_object_block_format(blob): + temp = struct.pack(" 1) or (not color_code(blob.code())) + elif(pri_color_code_mode == 2): # only color codes with two or more colors + return (bits_set(blob.code()) > 1) + elif(pri_color_code_mode == 3): + return True + +clock = time.clock() +while(True): + clock.tick() + img = sensor.snapshot() + blobs = list(filter(blob_filter, img.find_blobs(e_lab_color_thresholds, area_threshold = min_block_area, pixels_threshold = fb_pixels_threshold, merge = True, margin = fb_merge_margin, merge_cb = fb_merge_cb))) + + # Transmit Blobs # + + if blobs and (max_blocks > 0) and (max_blocks_per_signature > 0): # new frame + dat_buf = struct.pack("> 8) ^ (tmp << 8) ^ (tmp << 3) ^ (tmp >> 4)) & 0xFFFF + tmp = extra ^ (output & 0xFF) + tmp = (tmp ^ (tmp << 4)) & 0xFF + output = ((output >> 8) ^ (tmp << 8) ^ (tmp << 3) ^ (tmp >> 4)) & 0xFFFF + return output + +MAV_DISTANCE_SENSOR_message_id = 132 +MAV_DISTANCE_SENSOR_min_distance = 1 # in cm +MAV_DISTANCE_SENSOR_max_distance = 10000 # in cm +MAV_DISTANCE_SENSOR_type = 0 # MAV_DISTANCE_SENSOR_LASER +MAV_DISTANCE_SENSOR_id = 0 # unused +MAV_DISTANCE_SENSOR_orientation = 25 # MAV_SENSOR_ROTATION_PITCH_270 +MAV_DISTANCE_SENSOR_covariance = 0 # unused +MAV_DISTANCE_SENSOR_extra_crc = 85 + +# http://mavlink.org/messages/common#DISTANCE_SENSOR +# https://github.com/mavlink/c_library_v1/blob/master/common/mavlink_msg_distance_sensor.h +def send_distance_sensor_packet(tag, tag_size): + global packet_sequence + temp = struct.pack("= 20: + led.off() + led_state = 0 + +# Link Setup + +uart = pyb.UART(3, uart_baudrate, timeout_char = 1000) + +# Helper Stuff + +packet_sequence = 0 + +def checksum(data, extra): # https://github.com/mavlink/c_library_v1/blob/master/checksum.h + output = 0xFFFF + for i in range(len(data)): + tmp = data[i] ^ (output & 0xFF) + tmp = (tmp ^ (tmp << 4)) & 0xFF + output = ((output >> 8) ^ (tmp << 8) ^ (tmp << 3) ^ (tmp >> 4)) & 0xFFFF + tmp = extra ^ (output & 0xFF) + tmp = (tmp ^ (tmp << 4)) & 0xFF + output = ((output >> 8) ^ (tmp << 8) ^ (tmp << 3) ^ (tmp >> 4)) & 0xFFFF + return output + +MAV_OPTICAL_FLOW_message_id = 100 +MAV_OPTICAL_FLOW_id = 0 # unused +MAV_OPTICAL_FLOW_extra_crc = 175 + +# http://mavlink.org/messages/common#OPTICAL_FLOW +# https://github.com/mavlink/c_library_v1/blob/master/common/mavlink_msg_optical_flow.h +def send_optical_flow_packet(x, y, c): + global packet_sequence + temp = struct.pack(" BG_UPDATE_FRAMES): + frame_count = 0 + # Blend in new frame. We're doing 256-alpha here because we want to + # blend the new frame into the backgound. Not the background into the + # new frame which would be just alpha. Blend replaces each pixel by + # ((NEW*(alpha))+(OLD*(256-alpha)))/256. So, a low alpha results in + # low blending of the new image while a high alpha results in high + # blending of the new image. We need to reverse that for this update. + img.blend(extra_fb, alpha=(256-BG_UPDATE_BLEND)) + extra_fb.replace(img) + + # Replace the image with the "abs(NEW-OLD)" frame difference. + img.difference(extra_fb) + + hist = img.get_histogram() + # This code below works by comparing the 99th percentile value (e.g. the + # non-outlier max value against the 90th percentile value (e.g. a non-max + # value. The difference between the two values will grow as the difference + # image seems more pixels change. + diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value() + triggered = diff > TRIGGER_THRESHOLD + + print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/20-Frame-Differencing/in_memory_basic_frame_differencing.py b/scripts/examples/OpenMV/20-Frame-Differencing/in_memory_basic_frame_differencing.py new file mode 100644 index 000000000..cbe0daed2 --- /dev/null +++ b/scripts/examples/OpenMV/20-Frame-Differencing/in_memory_basic_frame_differencing.py @@ -0,0 +1,47 @@ +# In Memory Basic Frame Differencing Example +# +# This example demonstrates using frame differencing with your OpenMV Cam. It's +# called basic frame differencing because there's no background image update. +# So, as time passes the background image may change resulting in issues. + +import sensor, image, pyb, os, time + +TRIGGER_THRESHOLD = 5 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +sensor.set_auto_whitebal(False) # Turn off white balance. +clock = time.clock() # Tracks FPS. + +# Take from the main frame buffer's RAM to allocate a second frame buffer. +# There's a lot more RAM in the frame buffer than in the MicroPython heap. +# However, after doing this you have a lot less RAM for some algorithms... +# So, be aware that it's a lot easier to get out of RAM issues now. However, +# frame differencing doesn't use a lot of the extra space in the frame buffer. +# But, things like AprilTags do and won't work if you do this... +extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) + +print("About to save background image...") +sensor.skip_frames(time = 2000) # Give the user time to get ready. +extra_fb.replace(sensor.snapshot()) +print("Saved background image - Now frame differencing!") + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # Replace the image with the "abs(NEW-OLD)" frame difference. + img.difference(extra_fb) + + hist = img.get_histogram() + # This code below works by comparing the 99th percentile value (e.g. the + # non-outlier max value against the 90th percentile value (e.g. a non-max + # value. The difference between the two values will grow as the difference + # image seems more pixels change. + diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value() + triggered = diff > TRIGGER_THRESHOLD + + print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/20-Frame-Differencing/in_memory_shadow_removal.py b/scripts/examples/OpenMV/20-Frame-Differencing/in_memory_shadow_removal.py new file mode 100644 index 000000000..b1f65895f --- /dev/null +++ b/scripts/examples/OpenMV/20-Frame-Differencing/in_memory_shadow_removal.py @@ -0,0 +1,53 @@ +# In Memory Shadow Removal w/ Frame Differencing Example +# +# This example demonstrates using frame differencing with your OpenMV Cam using +# shadow removal to help reduce the affects of cast shadows in your scene. + +import sensor, image, pyb, os, time + +TRIGGER_THRESHOLD = 5 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +if sensor.get_id() == sensor.OV7725: # Reduce sensor PLL from 6x to 4x. + sensor.__write_reg(0x0D, (sensor.__read_reg(0x0D) & 0x3F) | 0x40) +sensor.skip_frames(time = 2000) # Let new settings take affect. +sensor.set_auto_whitebal(False) # Turn off white balance. +sensor.set_auto_gain(False) # Turn this off too. +clock = time.clock() # Tracks FPS. + +# Take from the main frame buffer's RAM to allocate a second frame buffer. +# There's a lot more RAM in the frame buffer than in the MicroPython heap. +# However, after doing this you have a lot less RAM for some algorithms... +# So, be aware that it's a lot easier to get out of RAM issues now. However, +# frame differencing doesn't use a lot of the extra space in the frame buffer. +# But, things like AprilTags do and won't work if you do this... +extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) + +print("About to save background image...") +sensor.skip_frames(time = 2000) # Give the user time to get ready. +extra_fb.replace(sensor.snapshot()) +print("Saved background image - Now frame differencing!") + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # Note that for shadow removal to work the background image must be + # shadow free and have the same lighting as the latest image. Unlike max() + # shadow removal won't remove all dark objects unless they were shadows... + + # Replace the image with the "abs(NEW-OLD)" frame difference. + img.remove_shadows(extra_fb).difference(extra_fb) + + hist = img.get_histogram() + # This code below works by comparing the 99th percentile value (e.g. the + # non-outlier max value against the 90th percentile value (e.g. a non-max + # value. The difference between the two values will grow as the difference + # image seems more pixels change. + diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value() + triggered = diff > TRIGGER_THRESHOLD + + print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/20-Frame-Differencing/in_memory_structural_similarity.py b/scripts/examples/OpenMV/20-Frame-Differencing/in_memory_structural_similarity.py new file mode 100644 index 000000000..18762ff81 --- /dev/null +++ b/scripts/examples/OpenMV/20-Frame-Differencing/in_memory_structural_similarity.py @@ -0,0 +1,39 @@ +# Structural Similarity (SSIM) Example +# +# This example shows off how to use the SSIM algorithm on your OpenMV Cam +# to detect differences between two images. The SSIM algorithm compares +# 8x8 blocks of pixels between two images to determine a similarity +# score between two images. + +import sensor, image, pyb, os, time + +# The image has likely changed if the sim.min() is lower than this. +MIN_TRIGGER_THRESHOLD = -0.4 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +sensor.set_auto_whitebal(False) # Turn off white balance. +clock = time.clock() # Tracks FPS. + +# Take from the main frame buffer's RAM to allocate a second frame buffer. +# There's a lot more RAM in the frame buffer than in the MicroPython heap. +# However, after doing this you have a lot less RAM for some algorithms... +# So, be aware that it's a lot easier to get out of RAM issues now. However, +# frame differencing doesn't use a lot of the extra space in the frame buffer. +# But, things like AprilTags do and won't work if you do this... +extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) + +print("About to save background image...") +sensor.skip_frames(time = 2000) # Give the user time to get ready. +extra_fb.replace(sensor.snapshot()) +print("Saved background image!") + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + sim = img.get_similarity(extra_fb) + change = "- Change -" if sim.min() < MIN_TRIGGER_THRESHOLD else "- No Change -" + + print(clock.fps(), change, sim) diff --git a/scripts/examples/OpenMV/20-Frame-Differencing/on_disk_advanced_frame_differencing.py b/scripts/examples/OpenMV/20-Frame-Differencing/on_disk_advanced_frame_differencing.py new file mode 100644 index 000000000..503643d13 --- /dev/null +++ b/scripts/examples/OpenMV/20-Frame-Differencing/on_disk_advanced_frame_differencing.py @@ -0,0 +1,61 @@ +# Advanced Frame Differencing Example +# +# Note: You will need an SD card to run this example. +# +# This example demonstrates using frame differencing with your OpenMV Cam. This +# example is advanced because it preforms a background update to deal with the +# backgound image changing overtime. + +import sensor, image, pyb, os, time + +TRIGGER_THRESHOLD = 5 + +BG_UPDATE_FRAMES = 50 # How many frames before blending. +BG_UPDATE_BLEND = 128 # How much to blend by... ([0-256]==[0.0-1.0]). + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.RGB565 +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +sensor.set_auto_whitebal(False) # Turn off white balance. +clock = time.clock() # Tracks FPS. + +if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory + +print("About to save background image...") +sensor.skip_frames(time = 2000) # Give the user time to get ready. +sensor.snapshot().save("temp/bg.bmp") +print("Saved background image - Now frame differencing!") + +triggered = False + +frame_count = 0 +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + frame_count += 1 + if (frame_count > BG_UPDATE_FRAMES): + frame_count = 0 + # Blend in new frame. We're doing 256-alpha here because we want to + # blend the new frame into the backgound. Not the background into the + # new frame which would be just alpha. Blend replaces each pixel by + # ((NEW*(alpha))+(OLD*(256-alpha)))/256. So, a low alpha results in + # low blending of the new image while a high alpha results in high + # blending of the new image. We need to reverse that for this update. + img.blend("temp/bg.bmp", alpha=(256-BG_UPDATE_BLEND)) + img.save("temp/bg.bmp") + + # Replace the image with the "abs(NEW-OLD)" frame difference. + img.difference("temp/bg.bmp") + + hist = img.get_histogram() + # This code below works by comparing the 99th percentile value (e.g. the + # non-outlier max value against the 90th percentile value (e.g. a non-max + # value. The difference between the two values will grow as the difference + # image seems more pixels change. + diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value() + triggered = diff > TRIGGER_THRESHOLD + + print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/20-Frame-Differencing/on_disk_basic_frame_differencing.py b/scripts/examples/OpenMV/20-Frame-Differencing/on_disk_basic_frame_differencing.py new file mode 100644 index 000000000..2b0b775e9 --- /dev/null +++ b/scripts/examples/OpenMV/20-Frame-Differencing/on_disk_basic_frame_differencing.py @@ -0,0 +1,43 @@ +# Basic Frame Differencing Example +# +# Note: You will need an SD card to run this example. +# +# This example demonstrates using frame differencing with your OpenMV Cam. It's +# called basic frame differencing because there's no background image update. +# So, as time passes the background image may change resulting in issues. + +import sensor, image, pyb, os, time + +TRIGGER_THRESHOLD = 5 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +sensor.set_auto_whitebal(False) # Turn off white balance. +clock = time.clock() # Tracks FPS. + +if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory + +print("About to save background image...") +sensor.skip_frames(time = 2000) # Give the user time to get ready. +sensor.snapshot().save("temp/bg.bmp") +print("Saved background image - Now frame differencing!") + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # Replace the image with the "abs(NEW-OLD)" frame difference. + img.difference("temp/bg.bmp") + + hist = img.get_histogram() + # This code below works by comparing the 99th percentile value (e.g. the + # non-outlier max value against the 90th percentile value (e.g. a non-max + # value. The difference between the two values will grow as the difference + # image seems more pixels change. + diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value() + triggered = diff > TRIGGER_THRESHOLD + + print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/20-Frame-Differencing/on_disk_shadow_removal.py b/scripts/examples/OpenMV/20-Frame-Differencing/on_disk_shadow_removal.py new file mode 100644 index 000000000..a48677ae8 --- /dev/null +++ b/scripts/examples/OpenMV/20-Frame-Differencing/on_disk_shadow_removal.py @@ -0,0 +1,49 @@ +# In Memory Shadow Removal w/ Frame Differencing Example +# +# Note: You will need an SD card to run this example. +# +# This example demonstrates using frame differencing with your OpenMV Cam using +# shadow removal to help reduce the affects of cast shadows in your scene. + +import sensor, image, pyb, os, time + +TRIGGER_THRESHOLD = 5 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +if sensor.get_id() == sensor.OV7725: # Reduce sensor PLL from 6x to 4x. + sensor.__write_reg(0x0D, (sensor.__read_reg(0x0D) & 0x3F) | 0x40) +sensor.skip_frames(time = 2000) # Let new settings take affect. +sensor.set_auto_whitebal(False) # Turn off white balance. +sensor.set_auto_gain(False) # Turn this off too. +clock = time.clock() # Tracks FPS. + +if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory + +print("About to save background image...") +sensor.skip_frames(time = 2000) # Give the user time to get ready. +sensor.snapshot().save("temp/bg.bmp") +print("Saved background image - Now frame differencing!") + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # Note that for shadow removal to work the background image must be + # shadow free and have the same lighting as the latest image. Unlike max() + # shadow removal won't remove all dark objects unless they were shadows... + + # Replace the image with the "abs(NEW-OLD)" frame difference. + img.remove_shadows("temp/bg.bmp").difference("temp/bg.bmp") + + hist = img.get_histogram() + # This code below works by comparing the 99th percentile value (e.g. the + # non-outlier max value against the 90th percentile value (e.g. a non-max + # value. The difference between the two values will grow as the difference + # image seems more pixels change. + diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value() + triggered = diff > TRIGGER_THRESHOLD + + print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/20-Frame-Differencing/on_disk_structural_similarity.py b/scripts/examples/OpenMV/20-Frame-Differencing/on_disk_structural_similarity.py new file mode 100644 index 000000000..4a1e73706 --- /dev/null +++ b/scripts/examples/OpenMV/20-Frame-Differencing/on_disk_structural_similarity.py @@ -0,0 +1,35 @@ +# Structural Similarity (SSIM) Example +# +# Note: You will need an SD card to run this example. +# +# This example shows off how to use the SSIM algorithm on your OpenMV Cam +# to detect differences between two images. The SSIM algorithm compares +# 8x8 blocks of pixels between two images to determine a similarity +# score between two images. + +import sensor, image, pyb, os, time + +# The image has likely changed if the sim.min() is lower than this. +MIN_TRIGGER_THRESHOLD = -0.4 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +sensor.set_auto_whitebal(False) # Turn off white balance. +clock = time.clock() # Tracks FPS. + +if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory + +print("About to save background image...") +sensor.skip_frames(time = 2000) # Give the user time to get ready. +sensor.snapshot().save("temp/bg.bmp") +print("Saved background image!") + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + sim = img.get_similarity("temp/bg.bmp") + change = "- Change -" if sim.min() < MIN_TRIGGER_THRESHOLD else "- No Change -" + + print(clock.fps(), change, sim) diff --git a/scripts/examples/OpenMV/21-Sensor-Control/sensor_auto_gain_control.py b/scripts/examples/OpenMV/21-Sensor-Control/sensor_auto_gain_control.py new file mode 100644 index 000000000..3ecdc89d7 --- /dev/null +++ b/scripts/examples/OpenMV/21-Sensor-Control/sensor_auto_gain_control.py @@ -0,0 +1,45 @@ +# Sensor Auto Gain Control +# +# This example shows off how to control the sensor's gain +# using the automatic gain control algorithm. + +# What's the difference between gain and exposure control? +# +# Well, by increasing the exposure time for the image you're getting more +# light on the camera. This gives you the best signal to noise ratio. You +# in general always want to increase the expsoure time... except, when you +# increase the exposure time you decrease the maximum possible frame rate +# and if anything moves in the image it will start to blur more with a +# higher exposure time. Gain control allows you to increase the output per +# pixel using analog and digital multipliers... however, it also amplifies +# noise. So, it's best to let the exposure increase as much as possible +# and then use gain control to make up any remaining ground. + +# We can achieve the above by setting a gain ceiling on the automatic +# gain control algorithm. Once this is set the algorithm will have to +# increase the exposure time to meet any gain needs versus using gain +# to do so. However, this comes at the price of the exposure time varying +# more when the lighting changes versus the exposure being constant and +# the gain changing. + +import sensor, image, time + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) + +# The gain db ceiling maxes out at about 24 db for the OV7725 sensor. +sensor.set_auto_gain(True, gain_db_ceiling = 16.0) # Default gain. + +# Note! If you set the gain ceiling to low without adjusting the exposure control +# target value then you'll just get a lot of oscillation from the exposure +# control if it's on. + +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +while(True): + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + print("FPS %f, Gain %f dB, Exposure %d us" % \ + (clock.fps(), sensor.get_gain_db(), sensor.get_exposure_us())) diff --git a/scripts/examples/OpenMV/21-Sensor-Control/sensor_exposure_control.py b/scripts/examples/OpenMV/21-Sensor-Control/sensor_exposure_control.py new file mode 100644 index 000000000..edc994c44 --- /dev/null +++ b/scripts/examples/OpenMV/21-Sensor-Control/sensor_exposure_control.py @@ -0,0 +1,67 @@ +# Sensor Exposure Control +# +# This example shows off how to cotnrol the camera sensor's +# exposure manually versus letting auto exposure control run. + +# What's the difference between gain and exposure control? +# +# Well, by increasing the exposure time for the image you're getting more +# light on the camera. This gives you the best signal to noise ratio. You +# in general always want to increase the expsoure time... except, when you +# increase the exposure time you decrease the maximum possible frame rate +# and if anything moves in the image it will start to blur more with a +# higher exposure time. Gain control allows you to increase the output per +# pixel using analog and digital multipliers... however, it also amplifies +# noise. So, it's best to let the exposure increase as much as possible +# and then use gain control to make up any remaining ground. + +import sensor, image, time + +# Change this value to adjust the exposure. Try 10.0/0.1/etc. +EXPOSURE_TIME_SCALE = 1.0 + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) + +# Print out the initial exposure time for comparison. +print("Initial exposure == %d" % sensor.get_exposure_us()) + +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +# You have to turn automatic gain control and automatic white blance off +# otherwise they will change the image gains to undo any exposure settings +# that you put in place... +sensor.set_auto_gain(False) +sensor.set_auto_whitebal(False) +# Need to let the above settings get in... +sensor.skip_frames(time = 500) + +current_exposure_time_in_microseconds = sensor.get_exposure_us() +print("Current Exposure == %d" % current_exposure_time_in_microseconds) + +# Auto exposure control (AEC) is enabled by default. Calling the below function +# disables sensor auto exposure control. The additionally "exposure_us" +# argument then overrides the auto exposure value after AEC is disabled. +sensor.set_auto_exposure(False, \ + exposure_us = int(current_exposure_time_in_microseconds * EXPOSURE_TIME_SCALE)) + +print("New exposure == %d" % sensor.get_exposure_us()) +# sensor.get_exposure_us() returns the exact camera sensor exposure time +# in microseconds. However, this may be a different number than what was +# commanded because the sensor code converts the exposure time in microseconds +# to a row/pixel/clock time which doesn't perfectly match with microseconds... + +# If you want to turn auto exposure back on do: sensor.set_auto_exposure(True) +# Note that the camera sensor will then change the exposure time as it likes. + +# Doing: sensor.set_auto_exposure(False) +# Just disables the exposure value update but does not change the exposure +# value the camera sensor determined was good. + +while(True): + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected + # to the IDE. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/21-Sensor-Control/sensor_horizontal_mirror.py b/scripts/examples/OpenMV/21-Sensor-Control/sensor_horizontal_mirror.py new file mode 100644 index 000000000..467b46286 --- /dev/null +++ b/scripts/examples/OpenMV/21-Sensor-Control/sensor_horizontal_mirror.py @@ -0,0 +1,21 @@ +# Sensor Horizontal Mirror Example +# +# This example shows off horizontally mirroring the image in hardware +# from the camera sensor. + +import sensor, image, time + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +# Change this to False to undo the mirror. +sensor.set_hmirror(True) + +while(True): + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected + # to the IDE. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/21-Sensor-Control/sensor_manual_whitebal_control.py b/scripts/examples/OpenMV/21-Sensor-Control/sensor_manual_whitebal_control.py new file mode 100644 index 000000000..d9420ada3 --- /dev/null +++ b/scripts/examples/OpenMV/21-Sensor-Control/sensor_manual_whitebal_control.py @@ -0,0 +1,39 @@ +# Sensor Manual Whitebal Control +# +# This example shows off how to control the camera sensor's +# white balance gain manually versus letting the AWB control run. + +# White balance is achieve by adjusting R/G/B gain values +# such that the average color of the image is gray. The +# automatic white balance (AWB) algorithm does this for +# you but usually ends up with a different result each +# time you turn the camera on making it hard to get +# color tracking settings right. By manually recording +# the gain values you like and then forcing them to +# the sensor on startup you can control the colors +# the camera sees. + +import sensor, image, time + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +# You can control the white balance gains here. The first value is the +# R gain in db, and then the G gain in db, followed by the B gain in db. +# +# Uncomment the below line with gain values you like (get them from the print out). +# +# sensor.set_auto_whitebal(False, rgb_gain_db = (0.0, 0.0, 0.0)) + +# Note: Putting (0.0, 0.0, 0.0) for the gain results in something close to zero +# comming out. Do not expect the exact value going in to be equal to the value +# comming out. + +while(True): + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + print(clock.fps(), \ + sensor.get_rgb_gain_db()) # Prints the AWB current RGB gains. diff --git a/scripts/examples/OpenMV/21-Sensor-Control/sensor_vertical_flip.py b/scripts/examples/OpenMV/21-Sensor-Control/sensor_vertical_flip.py new file mode 100644 index 000000000..0470f27bf --- /dev/null +++ b/scripts/examples/OpenMV/21-Sensor-Control/sensor_vertical_flip.py @@ -0,0 +1,21 @@ +# Sensor Vertical Flip Example +# +# This example shows off vertically flipping the image in hardware +# from the camera sensor. + +import sensor, image, time + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +# Change this to False to undo the flip. +sensor.set_vflip(True) + +while(True): + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected + # to the IDE. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/21-Sensor-Control/sesnor_manual_gain_control.py b/scripts/examples/OpenMV/21-Sensor-Control/sesnor_manual_gain_control.py new file mode 100644 index 000000000..01d7d3232 --- /dev/null +++ b/scripts/examples/OpenMV/21-Sensor-Control/sesnor_manual_gain_control.py @@ -0,0 +1,67 @@ +# Sensor Manual Gain Control +# +# This example shows off how to control the camera sensor's +# gain manually versus letting auto gain control run. + +# What's the difference between gain and exposure control? +# +# Well, by increasing the exposure time for the image you're getting more +# light on the camera. This gives you the best signal to noise ratio. You +# in general always want to increase the expsoure time... except, when you +# increase the exposure time you decrease the maximum possible frame rate +# and if anything moves in the image it will start to blur more with a +# higher exposure time. Gain control allows you to increase the output per +# pixel using analog and digital multipliers... however, it also amplifies +# noise. So, it's best to let the exposure increase as much as possible +# and then use gain control to make up any remaining ground. + +import sensor, image, time + +# Change this value to adjust the gain. Try 10.0/0/0.1/etc. +GAIN_SCALE = 1.0 + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) + +# Print out the initial gain for comparison. +print("Initial gain == %f db" % sensor.get_gain_db()) + +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +# You have to turn automatic exposure control and automatic white blance off +# otherwise they will change the image exposure to undo any gain settings +# that you put in place... +sensor.set_auto_exposure(False) +sensor.set_auto_whitebal(False) +# Need to let the above settings get in... +sensor.skip_frames(time = 500) + +current_gain_in_decibels = sensor.get_gain_db() +print("Current Gain == %f db" % current_gain_in_decibels) + +# Auto gain control (AGC) is enabled by default. Calling the below function +# disables sensor auto gain control. The additionally "gain_db" +# argument then overrides the auto gain value after AGC is disabled. +sensor.set_auto_gain(False, \ + gain_db = current_gain_in_decibels * GAIN_SCALE) + +print("New gain == %f db" % sensor.get_gain_db()) +# sensor.get_gain_db() returns the exact camera sensor gain decibels. +# However, this may be a different number than what was commanded because +# the sensor code converts the gain to a small and large gain value which +# aren't able to accept all possible values... + +# If you want to turn auto gain back on do: sensor.set_auto_gain(True) +# Note that the camera sensor will then change the gain as it likes. + +# Doing: sensor.set_auto_gain(False) +# Just disables the gain value update but does not change the gain +# value the camera sensor determined was good. + +while(True): + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected + # to the IDE. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/22-Optical-Flow/absolute-rotation-scale.py b/scripts/examples/OpenMV/22-Optical-Flow/absolute-rotation-scale.py new file mode 100644 index 000000000..1dafa06ee --- /dev/null +++ b/scripts/examples/OpenMV/22-Optical-Flow/absolute-rotation-scale.py @@ -0,0 +1,67 @@ +# Absolute Optical Flow Rotation/Scale +# +# This example shows off using your OpenMV Cam to measure +# rotation/scale by comparing the current and a previous +# image against each other. Note that only rotation/scale is +# handled - not X and Y translation in this mode. + +# To run this demo effectively please mount your OpenMV Cam on a steady +# base and SLOWLY rotate the camera around the lens and move the camera +# forward/backwards to see the numbers change. +# I.e. Z direction changes only. + +import sensor, image, time, math + +# NOTE!!! You have to use a small power of 2 resolution when using +# find_displacement(). This is because the algorithm is powered by +# something called phase correlation which does the image comparison +# using FFTs. A non-power of 2 resolution requires padding to a power +# of 2 which reduces the usefulness of the algorithm results. Please +# use a resolution like B64X64 or B64X32 (2x faster). + +# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, +# 128x64, and 128x128. If you want a resolution of 32x32 you can create +# it by doing "img.pool(2, 2)" on a 64x64 image. + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +# Take from the main frame buffer's RAM to allocate a second frame buffer. +# There's a lot more RAM in the frame buffer than in the MicroPython heap. +# However, after doing this you have a lot less RAM for some algorithms... +# So, be aware that it's a lot easier to get out of RAM issues now. +extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) +extra_fb.replace(sensor.snapshot()) + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works. + # Put in a z_rotation value below and you should see the r output be equal to that. + if(0): + expected_rotation = 20.0 + img.rotation_corr(z_rotation=expected_rotation) + + # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works. + # Put in a zoom value below and you should see the z output be equal to that. + if(0): + expected_zoom = 0.8 + img.rotation_corr(zoom=expected_zoom) + + # For this example we never update the old image to measure absolute change. + displacement = extra_fb.find_displacement(img, logpolar=True) + + # Offset results are noisy without filtering so we drop some accuracy. + rotation_change = int(math.degrees(displacement.rotation()) * 5) / 5.0 + zoom_amount = displacement.scale() + + if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. + print("{0:+f}r {1:+f}z {2} {3} FPS".format(rotation_change, zoom_amount, \ + displacement.response(), + clock.fps())) + else: + print(clock.fps()) diff --git a/scripts/examples/OpenMV/22-Optical-Flow/absolute-translation.py b/scripts/examples/OpenMV/22-Optical-Flow/absolute-translation.py new file mode 100644 index 000000000..f4dd2e49f --- /dev/null +++ b/scripts/examples/OpenMV/22-Optical-Flow/absolute-translation.py @@ -0,0 +1,55 @@ +# Absolute Optical Flow Translation +# +# This example shows off using your OpenMV Cam to measure translation +# in the X and Y direction by comparing the current and a previous +# image against each other. Note that only X and Y translation is +# handled - not rotation/scale in this mode. + +# To run this demo effectively please mount your OpenMV Cam on a steady +# base and SLOWLY translate it to the left, right, up, and down and +# watch the numbers change. Note that you can see displacement numbers +# up +- half of the hoizontal and vertical resolution. + +import sensor, image, time + +# NOTE!!! You have to use a small power of 2 resolution when using +# find_displacement(). This is because the algorithm is powered by +# something called phase correlation which does the image comparison +# using FFTs. A non-power of 2 resolution requires padding to a power +# of 2 which reduces the usefulness of the algorithm results. Please +# use a resolution like B64X64 or B64X32 (2x faster). + +# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, +# 128x64, and 128x128. If you want a resolution of 32x32 you can create +# it by doing "img.pool(2, 2)" on a 64x64 image. + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +# Take from the main frame buffer's RAM to allocate a second frame buffer. +# There's a lot more RAM in the frame buffer than in the MicroPython heap. +# However, after doing this you have a lot less RAM for some algorithms... +# So, be aware that it's a lot easier to get out of RAM issues now. +extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) +extra_fb.replace(sensor.snapshot()) + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # For this example we never update the old image to measure absolute change. + displacement = extra_fb.find_displacement(img) + + # Offset results are noisy without filtering so we drop some accuracy. + sub_pixel_x = int(displacement.x_translation() * 5) / 5.0 + sub_pixel_y = int(displacement.y_translation() * 5) / 5.0 + + if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. + print("{0:+f}x {1:+f}y {2} {3} FPS".format(sub_pixel_x, sub_pixel_y, + displacement.response(), + clock.fps())) + else: + print(clock.fps()) diff --git a/scripts/examples/OpenMV/22-Optical-Flow/differential-rotation-scale.py b/scripts/examples/OpenMV/22-Optical-Flow/differential-rotation-scale.py new file mode 100644 index 000000000..8e1b54c64 --- /dev/null +++ b/scripts/examples/OpenMV/22-Optical-Flow/differential-rotation-scale.py @@ -0,0 +1,67 @@ +# Differential Optical Flow Rotation/Scale +# +# This example shows off using your OpenMV Cam to measure +# rotation/scale by comparing the current and the previous +# image against each other. Note that only rotation/scale is +# handled - not X and Y translation in this mode. + +# To run this demo effectively please mount your OpenMV Cam on a steady +# base and SLOWLY rotate the camera around the lens and move the camera +# forward/backwards to see the numbers change. +# I.e. Z direction changes only. + +import sensor, image, time, math + +# NOTE!!! You have to use a small power of 2 resolution when using +# find_displacement(). This is because the algorithm is powered by +# something called phase correlation which does the image comparison +# using FFTs. A non-power of 2 resolution requires padding to a power +# of 2 which reduces the usefulness of the algorithm results. Please +# use a resolution like B64X64 or B64X32 (2x faster). + +# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, +# 128x64, and 128x128. If you want a resolution of 32x32 you can create +# it by doing "img.pool(2, 2)" on a 64x64 image. + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +# Take from the main frame buffer's RAM to allocate a second frame buffer. +# There's a lot more RAM in the frame buffer than in the MicroPython heap. +# However, after doing this you have a lot less RAM for some algorithms... +# So, be aware that it's a lot easier to get out of RAM issues now. +extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) +extra_fb.replace(sensor.snapshot()) + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works. + # Put in a z_rotation value below and you should see the r output be equal to that. + if(0): + expected_rotation = 20.0 + extra_fb.rotation_corr(z_rotation=(-expected_rotation)) + + # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works. + # Put in a zoom value below and you should see the z output be equal to that. + if(0): + expected_zoom = 0.8 + extra_fb.rotation_corr(zoom=(2.00-expected_zoom)) + + displacement = extra_fb.find_displacement(img, logpolar=True) + extra_fb.replace(img) + + # Offset results are noisy without filtering so we drop some accuracy. + rotation_change = int(math.degrees(displacement.rotation()) * 5) / 5.0 + zoom_amount = displacement.scale() + + if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. + print("{0:+f}r {1:+f}z {2} {3} FPS".format(rotation_change, zoom_amount, \ + displacement.response(), + clock.fps())) + else: + print(clock.fps()) diff --git a/scripts/examples/OpenMV/22-Optical-Flow/differential-translation.py b/scripts/examples/OpenMV/22-Optical-Flow/differential-translation.py new file mode 100644 index 000000000..04416cf09 --- /dev/null +++ b/scripts/examples/OpenMV/22-Optical-Flow/differential-translation.py @@ -0,0 +1,55 @@ +# Differential Optical Flow Translation +# +# This example shows off using your OpenMV Cam to measure translation +# in the X and Y direction by comparing the current and the previous +# image against each other. Note that only X and Y translation is +# handled - not rotation/scale in this mode. + +# To run this demo effectively please mount your OpenMV Cam on a steady +# base and QUICKLY translate it to the left, right, up, and down and +# watch the numbers change. Note that you can see displacement numbers +# up +- half of the hoizontal and vertical resolution. + +import sensor, image, time + +# NOTE!!! You have to use a small power of 2 resolution when using +# find_displacement(). This is because the algorithm is powered by +# something called phase correlation which does the image comparison +# using FFTs. A non-power of 2 resolution requires padding to a power +# of 2 which reduces the usefulness of the algorithm results. Please +# use a resolution like B64X64 or B64X32 (2x faster). + +# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, +# 128x64, and 128x128. If you want a resolution of 32x32 you can create +# it by doing "img.pool(2, 2)" on a 64x64 image. + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +# Take from the main frame buffer's RAM to allocate a second frame buffer. +# There's a lot more RAM in the frame buffer than in the MicroPython heap. +# However, after doing this you have a lot less RAM for some algorithms... +# So, be aware that it's a lot easier to get out of RAM issues now. +extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) +extra_fb.replace(sensor.snapshot()) + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + displacement = extra_fb.find_displacement(img) + extra_fb.replace(img) + + # Offset results are noisy without filtering so we drop some accuracy. + sub_pixel_x = int(displacement.x_translation() * 5) / 5.0 + sub_pixel_y = int(displacement.y_translation() * 5) / 5.0 + + if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. + print("{0:+f}x {1:+f}y {2} {3} FPS".format(sub_pixel_x, sub_pixel_y, + displacement.response(), + clock.fps())) + else: + print(clock.fps()) diff --git a/scripts/examples/OpenMV/22-Optical-Flow/image-patches-absolute-rotation-scale.py b/scripts/examples/OpenMV/22-Optical-Flow/image-patches-absolute-rotation-scale.py new file mode 100644 index 000000000..414a105e6 --- /dev/null +++ b/scripts/examples/OpenMV/22-Optical-Flow/image-patches-absolute-rotation-scale.py @@ -0,0 +1,73 @@ +# Image Patches Absolute Optical Flow Rotation/Scale +# +# This example shows off using your OpenMV Cam to measure +# rotation/scale by comparing the current and a previous +# image against each other. Note that only rotation/scale is +# handled - not X and Y translation in this mode. +# +# However, this examples goes beyond doing optical flow on the whole +# image at once. Instead it breaks up the process by working on groups +# of pixels in the image. This gives you a "new" image of results. +# +# NOTE that surfaces need to have some type of "edge" on them for the +# algorithm to work. A featureless surface produces crazy results. + +# NOTE: Unless you have a very nice test rig this example is hard to see usefulness of... + +BLOCK_W = 16 # pow2 +BLOCK_H = 16 # pow2 + +# To run this demo effectively please mount your OpenMV Cam on a steady +# base and SLOWLY rotate the camera around the lens and move the camera +# forward/backwards to see the numbers change. +# I.e. Z direction changes only. + +import sensor, image, time, math + +# NOTE!!! You have to use a small power of 2 resolution when using +# find_displacement(). This is because the algorithm is powered by +# something called phase correlation which does the image comparison +# using FFTs. A non-power of 2 resolution requires padding to a power +# of 2 which reduces the usefulness of the algorithm results. Please +# use a resolution like B128X128 or B128X64 (2x faster). + +# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, +# 128x64, and 128x128. If you want a resolution of 32x32 you can create +# it by doing "img.pool(2, 2)" on a 64x64 image. + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) +sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)... +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +# Take from the main frame buffer's RAM to allocate a second frame buffer. +# There's a lot more RAM in the frame buffer than in the MicroPython heap. +# However, after doing this you have a lot less RAM for some algorithms... +# So, be aware that it's a lot easier to get out of RAM issues now. +extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) +extra_fb.replace(sensor.snapshot()) + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + for y in range(0, sensor.height(), BLOCK_H): + for x in range(0, sensor.width(), BLOCK_W): + # For this example we never update the old image to measure absolute change. + displacement = extra_fb.find_displacement(img, logpolar=True, \ + roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) + + # Below 0.1 or so (YMMV) and the results are just noise. + if(displacement.response() > 0.1): + rotation_change = displacement.rotation() + zoom_amount = displacement.scale() + pixel_x = x + (BLOCK_W//2) + int(math.sin(rotation_change) * zoom_amount * (BLOCK_W//4)) + pixel_y = y + (BLOCK_H//2) + int(math.cos(rotation_change) * zoom_amount * (BLOCK_H//4)) + img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ + color = 255) + else: + img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \ + color = 0) + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/22-Optical-Flow/image-patches-absolute-translation.py b/scripts/examples/OpenMV/22-Optical-Flow/image-patches-absolute-translation.py new file mode 100644 index 000000000..0bfae8ca6 --- /dev/null +++ b/scripts/examples/OpenMV/22-Optical-Flow/image-patches-absolute-translation.py @@ -0,0 +1,69 @@ +# Image Patches Absolute Optical Flow Translation +# +# This example shows off using your OpenMV Cam to measure translation +# in the X and Y direction by comparing the current and a previous +# image against each other. Note that only X and Y translation is +# handled - not rotation/scale in this mode. +# +# However, this examples goes beyond doing optical flow on the whole +# image at once. Instead it breaks up the process by working on groups +# of pixels in the image. This gives you a "new" image of results. +# +# NOTE that surfaces need to have some type of "edge" on them for the +# algorithm to work. A featureless surface produces crazy results. + +BLOCK_W = 16 # pow2 +BLOCK_H = 16 # pow2 + +# To run this demo effectively please mount your OpenMV Cam on a steady +# base and SLOWLY translate it to the left, right, up, and down and +# watch the numbers change. Note that you can see displacement numbers +# up +- half of the hoizontal and vertical resolution. + +import sensor, image, time + +# NOTE!!! You have to use a small power of 2 resolution when using +# find_displacement(). This is because the algorithm is powered by +# something called phase correlation which does the image comparison +# using FFTs. A non-power of 2 resolution requires padding to a power +# of 2 which reduces the usefulness of the algorithm results. Please +# use a resolution like B128X128 or B128X64 (2x faster). + +# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, +# 128x64, and 128x128. If you want a resolution of 32x32 you can create +# it by doing "img.pool(2, 2)" on a 64x64 image. + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) +sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)... +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +# Take from the main frame buffer's RAM to allocate a second frame buffer. +# There's a lot more RAM in the frame buffer than in the MicroPython heap. +# However, after doing this you have a lot less RAM for some algorithms... +# So, be aware that it's a lot easier to get out of RAM issues now. +extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) +extra_fb.replace(sensor.snapshot()) + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + for y in range(0, sensor.height(), BLOCK_H): + for x in range(0, sensor.width(), BLOCK_W): + # For this example we never update the old image to measure absolute change. + displacement = extra_fb.find_displacement(img, \ + roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) + + # Below 0.1 or so (YMMV) and the results are just noise. + if(displacement.response() > 0.1): + pixel_x = x + (BLOCK_W//2) + int(displacement.x_translation()) + pixel_y = y + (BLOCK_H//2) + int(displacement.y_translation()) + img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ + color = 255) + else: + img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \ + color = 0) + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/22-Optical-Flow/image-patches-differential-rotation-scale.py b/scripts/examples/OpenMV/22-Optical-Flow/image-patches-differential-rotation-scale.py new file mode 100644 index 000000000..bb1bc2eea --- /dev/null +++ b/scripts/examples/OpenMV/22-Optical-Flow/image-patches-differential-rotation-scale.py @@ -0,0 +1,73 @@ +# Image Patches Differential Optical Flow Rotation/Scale +# +# This example shows off using your OpenMV Cam to measure +# rotation/scale by comparing the current and the previous +# image against each other. Note that only rotation/scale is +# handled - not X and Y translation in this mode. +# +# However, this examples goes beyond doing optical flow on the whole +# image at once. Instead it breaks up the process by working on groups +# of pixels in the image. This gives you a "new" image of results. +# +# NOTE that surfaces need to have some type of "edge" on them for the +# algorithm to work. A featureless surface produces crazy results. + +# NOTE: Unless you have a very nice test rig this example is hard to see usefulness of... + +BLOCK_W = 16 # pow2 +BLOCK_H = 16 # pow2 + +# To run this demo effectively please mount your OpenMV Cam on a steady +# base and SLOWLY rotate the camera around the lens and move the camera +# forward/backwards to see the numbers change. +# I.e. Z direction changes only. + +import sensor, image, time, math + +# NOTE!!! You have to use a small power of 2 resolution when using +# find_displacement(). This is because the algorithm is powered by +# something called phase correlation which does the image comparison +# using FFTs. A non-power of 2 resolution requires padding to a power +# of 2 which reduces the usefulness of the algorithm results. Please +# use a resolution like B128X128 or B128X64 (2x faster). + +# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, +# 128x64, and 128x128. If you want a resolution of 32x32 you can create +# it by doing "img.pool(2, 2)" on a 64x64 image. + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) +sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)... +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +# Take from the main frame buffer's RAM to allocate a second frame buffer. +# There's a lot more RAM in the frame buffer than in the MicroPython heap. +# However, after doing this you have a lot less RAM for some algorithms... +# So, be aware that it's a lot easier to get out of RAM issues now. +extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) +extra_fb.replace(sensor.snapshot()) + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + for y in range(0, sensor.height(), BLOCK_H): + for x in range(0, sensor.width(), BLOCK_W): + displacement = extra_fb.find_displacement(img, logpolar=True, \ + roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) + + # Below 0.1 or so (YMMV) and the results are just noise. + if(displacement.response() > 0.1): + rotation_change = displacement.rotation() + zoom_amount = 1.0 + displacement.scale() + pixel_x = x + (BLOCK_W//2) + int(math.sin(rotation_change) * zoom_amount * (BLOCK_W//4)) + pixel_y = y + (BLOCK_H//2) + int(math.cos(rotation_change) * zoom_amount * (BLOCK_H//4)) + img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ + color = 255) + else: + img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \ + color = 0) + extra_fb.replace(img) + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/22-Optical-Flow/image-patches-differential-translation.py b/scripts/examples/OpenMV/22-Optical-Flow/image-patches-differential-translation.py new file mode 100644 index 000000000..0a87c42f3 --- /dev/null +++ b/scripts/examples/OpenMV/22-Optical-Flow/image-patches-differential-translation.py @@ -0,0 +1,69 @@ +# Image Patches Differential Optical Flow Translation +# +# This example shows off using your OpenMV Cam to measure translation +# in the X and Y direction by comparing the current and the previous +# image against each other. Note that only X and Y translation is +# handled - not rotation/scale in this mode. +# +# However, this examples goes beyond doing optical flow on the whole +# image at once. Instead it breaks up the process by working on groups +# of pixels in the image. This gives you a "new" image of results. +# +# NOTE that surfaces need to have some type of "edge" on them for the +# algorithm to work. A featureless surface produces crazy results. + +BLOCK_W = 16 # pow2 +BLOCK_H = 16 # pow2 + +# To run this demo effectively please mount your OpenMV Cam on a steady +# base and SLOWLY translate it to the left, right, up, and down and +# watch the numbers change. Note that you can see displacement numbers +# up +- half of the hoizontal and vertical resolution. + +import sensor, image, time + +# NOTE!!! You have to use a small power of 2 resolution when using +# find_displacement(). This is because the algorithm is powered by +# something called phase correlation which does the image comparison +# using FFTs. A non-power of 2 resolution requires padding to a power +# of 2 which reduces the usefulness of the algorithm results. Please +# use a resolution like B128X128 or B128X64 (2x faster). + +# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, +# 128x64, and 128x128. If you want a resolution of 32x32 you can create +# it by doing "img.pool(2, 2)" on a 64x64 image. + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) +sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)... +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +# Take from the main frame buffer's RAM to allocate a second frame buffer. +# There's a lot more RAM in the frame buffer than in the MicroPython heap. +# However, after doing this you have a lot less RAM for some algorithms... +# So, be aware that it's a lot easier to get out of RAM issues now. +extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) +extra_fb.replace(sensor.snapshot()) + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + for y in range(0, sensor.height(), BLOCK_H): + for x in range(0, sensor.width(), BLOCK_W): + displacement = extra_fb.find_displacement(img, \ + roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) + + # Below 0.1 or so (YMMV) and the results are just noise. + if(displacement.response() > 0.1): + pixel_x = x + (BLOCK_W//2) + int(displacement.x_translation()) + pixel_y = y + (BLOCK_H//2) + int(displacement.y_translation()) + img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ + color = 255) + else: + img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \ + color = 0) + extra_fb.replace(img) + + print(clock.fps()) diff --git a/scripts/examples/23-Motor-Shield/motor-shield-power-driver.py b/scripts/examples/OpenMV/23-Motor-Shield/motor-shield-power-driver.py similarity index 100% rename from scripts/examples/23-Motor-Shield/motor-shield-power-driver.py rename to scripts/examples/OpenMV/23-Motor-Shield/motor-shield-power-driver.py diff --git a/scripts/examples/23-Motor-Shield/motor-shield-pwm.py b/scripts/examples/OpenMV/23-Motor-Shield/motor-shield-pwm.py similarity index 100% rename from scripts/examples/23-Motor-Shield/motor-shield-pwm.py rename to scripts/examples/OpenMV/23-Motor-Shield/motor-shield-pwm.py diff --git a/scripts/examples/23-Motor-Shield/motor.py b/scripts/examples/OpenMV/23-Motor-Shield/motor.py similarity index 100% rename from scripts/examples/23-Motor-Shield/motor.py rename to scripts/examples/OpenMV/23-Motor-Shield/motor.py diff --git a/scripts/examples/23-Motor-Shield/stepper.py b/scripts/examples/OpenMV/23-Motor-Shield/stepper.py similarity index 100% rename from scripts/examples/23-Motor-Shield/stepper.py rename to scripts/examples/OpenMV/23-Motor-Shield/stepper.py diff --git a/scripts/examples/OpenMV/24-External-Sensors/I2C_Lidar_Lite_V3_example_code.py b/scripts/examples/OpenMV/24-External-Sensors/I2C_Lidar_Lite_V3_example_code.py new file mode 100644 index 000000000..ac9cee166 --- /dev/null +++ b/scripts/examples/OpenMV/24-External-Sensors/I2C_Lidar_Lite_V3_example_code.py @@ -0,0 +1,63 @@ +# OpenMV M7 I2C interface with Garmin Lidar Lite V3 - By: Grant Phillips - Sun Apr 8 2018 + + +# Returns a basic distance reading from the lidar in cm for the target point and prints to console +# Uses default lidar settings. For more advanced settings, see the I2C commands in the manual: +# https://static.garmin.com/pumac/LIDAR_Lite_v3_Operation_Manual_and_Technical_Specifications.pdf + +# I2C Control of LIDAR Lite V3 +# 1. Write 0x04 to register 0x00 +# 2. Read register 0x01. Repeat until bit 0 (LSB) goes low. +# 3. Read two bytes from 0x8f (high byte 0x0f then low byte 0x10) to obtain 16 bit measurement in cm + +# HARDWARE CONNECTIONS: +# Connect the lidar SCL line (green) to I2C 2 SCL on openMV (Pin 4) +# Connect the lidar SDA line (blue) to I2C 2 SDA on openMV (pin 5) +# 680uF filter capacitor in parallel with the lidar +# 10k pullup resistors on the SCL and SDA lines to +5Vdc + + +import pyb +from pyb import I2C + + +lidarReady = bytearray([0xff]) # holds the returned data for ready check +lidarReadyCheck = bytes([1]) # to compare bit 0 of lidarReady + +startBuf = bytearray([0x00,0x04]) # step 1 address and data +readyBuf = bytearray([0x01]) # step 2 address for readiness check +distBuf = bytearray([0x8f]) # step 3 address for distance reading +distance = -1 # variable for distance reading + +# I2C setup +Lidar=I2C(2,I2C.MASTER) # initialise I2C 2 bus in master mode + + +while(True): + distance = -1 # reset to -1 so we know when we get a real reading + + try: # handles errors thrown up if we have an I2C error + # Step 1 Write 0x04 to register 0x00 + Lidar.send(startBuf,0x62) # this is making it read (laser visible) + + # Step 2 Read register 0x01 and wait for bit 0 to go low + while (lidarReady[0] & readyBuf[0]): + Lidar.send(readyBuf,0x62) + lidarReady=Lidar.recv(1,0x62) + pyb.delay(50) # This seems to help reduce errors on the I2C bus + lidarReady=bytearray([0xff]) # reset the ready check data for next reading + + # Step 3 Read the distance measurement from 0x8f (0x0f and 0x10) + Lidar.send(distBuf,0x62) + dist=Lidar.recv(2,0x62) + distance=dist[0] + distance<<=8 # move 2 bytes into a 16 bit int + distance|=dist[1] + pyb.delay(100) # allow time between readings, can go faster but more errors + + except OSError: # reninitialise i2c bus if error + Lidar.init(I2C.MASTER) + print("error, reinitialising") + + if distance > -1: + print("Distance:", distance, "cm") diff --git a/scripts/examples/OpenMV/25-Machine-Learning/nn_stm32cubeai.py b/scripts/examples/OpenMV/25-Machine-Learning/nn_stm32cubeai.py new file mode 100644 index 000000000..542aaf458 --- /dev/null +++ b/scripts/examples/OpenMV/25-Machine-Learning/nn_stm32cubeai.py @@ -0,0 +1,38 @@ +# STM32 CUBE.AI on OpenMV MNIST Example +# See https://github.com/openmv/openmv/blob/master/src/stm32cubeai/README.MD + +import sensor, image, time, nn_st + +sensor.reset() # Reset and initialize the sensor. +sensor.set_contrast(3) +sensor.set_brightness(0) +sensor.set_auto_gain(True) +sensor.set_auto_exposure(True) +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to Grayscale +sensor.set_framesize(sensor.QQQVGA) # Set frame size to 80x60 +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +# [CUBE.AI] Initialize the network +net = nn_st.loadnnst('network') + +nn_input_sz = 28 # The NN input is 28x28 + +while(True): + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + + # Crop in the middle (avoids vignetting) + img.crop((img.width()//2-nn_input_sz//2, + img.height()//2-nn_input_sz//2, + nn_input_sz, + nn_input_sz)) + + # Binarize the image + img.midpoint(2, bias=0.5, threshold=True, offset=5, invert=True) + + # [CUBE.AI] Run the inference + out = net.predict(img) + print('Network argmax output: {}'.format( out.index(max(out)) )) + img.draw_string(0, 0, str(out.index(max(out)))) + print('FPS {}'.format(clock.fps())) # Note: OpenMV Cam runs about half as fast when connected diff --git a/scripts/examples/OpenMV/25-Machine-Learning/tf_face_collection.py b/scripts/examples/OpenMV/25-Machine-Learning/tf_face_collection.py new file mode 100644 index 000000000..b8f7a5d26 --- /dev/null +++ b/scripts/examples/OpenMV/25-Machine-Learning/tf_face_collection.py @@ -0,0 +1,31 @@ +# Face Collection +# +# Use this script to gather face images for building a TensorFlow dataset. This script automatically +# zooms in the largest face in the field of view which you can then save using the data set editor. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) + +clock = time.clock() + +largest_face = None +largest_face_timeout = 0 + +while(True): + clock.tick() + + faces = sensor.snapshot().gamma_corr(contrast=1.5).find_features(image.HaarCascade("frontalface")) + + if faces: + largest_face = max(faces, key = lambda f: f[2] * f[3]) + largest_face_timeout = 20 + + if largest_face_timeout > 0: + sensor.get_fb().crop(roi=largest_face) + largest_face_timeout -= 1 + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/25-Machine-Learning/tf_face_recognition.py b/scripts/examples/OpenMV/25-Machine-Learning/tf_face_recognition.py new file mode 100644 index 000000000..6bf348e0e --- /dev/null +++ b/scripts/examples/OpenMV/25-Machine-Learning/tf_face_recognition.py @@ -0,0 +1,41 @@ +# Face Recognition +# +# Use this script to run a TensorFlow lite image classifier on faces detected within an image. +# The classifier is free to do facial recognition, expression detection, or whatever. + +import sensor, image, time, tf + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) + +clock = time.clock() + +net = tf.load("trained.tflite", load_to_fb=True) +labels = [l.rstrip('\n') for l in open("labels.txt")] + +while(True): + clock.tick() + + # Take a picture and brighten things up for the frontal face detector. + img = sensor.snapshot().gamma_corr(contrast=1.5) + + # Returns a list of rects (x, y, w, h) where faces are. + faces = img.find_features(image.HaarCascade("frontalface")) + + for f in faces: + + # Classify a face and get the class scores list + scores = net.classify(img, roi=f)[0].output() + + # Find the highest class score and lookup the label for that + label = labels[scores.index(max(scores))] + + # Draw a box around the face + img.draw_rectangle(f) + + # Draw the label above the face + img.draw_string(f[0]+3, f[1]-1, label, mono_space=False) + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/25-Machine-Learning/tf_mobilenet_search_whole_window.py b/scripts/examples/OpenMV/25-Machine-Learning/tf_mobilenet_search_whole_window.py new file mode 100644 index 000000000..92ce31381 --- /dev/null +++ b/scripts/examples/OpenMV/25-Machine-Learning/tf_mobilenet_search_whole_window.py @@ -0,0 +1,58 @@ +# TensorFlow Lite Mobilenet V1 Example +# +# Google's Mobilenet V1 detects 1000 classes of objects +# +# WARNING: Mobilenet is trained on ImageNet and isn't meant to classify anything +# in the real world. It's just designed to score well on the ImageNet dataset. +# This example just shows off running mobilenet on the OpenMV Cam. However, the +# default model is not really usable for anything. You have to use transfer +# learning to apply the model to a target problem by re-training the model. +# +# NOTE: This example only works on the OpenMV Cam H7 Pro (that has SDRAM) and better! +# To get the models please see the CNN Network library in OpenMV IDE under +# Tools -> Machine Vision. The labels are there too. +# +# In this example we slide the detector window over the image and get a list +# of activations. Note that use a CNN with a sliding window is extremely compute +# expensive so for an exhaustive search do not expect the CNN to be real-time. + +import sensor, image, time, os, tf + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.set_windowing((240, 240)) # Set 240x240 window. +sensor.skip_frames(time=2000) # Let the camera adjust. + +mobilenet_version = "1" # 1 +mobilenet_width = "0.5" # 1.0, 0.75, 0.50, 0.25 +mobilenet_resolution = "128" # 224, 192, 160, 128 + +mobilenet = "mobilenet_v%s_%s_%s_quant.tflite" % (mobilenet_version, mobilenet_width, mobilenet_resolution) +labels = [line.rstrip('\n') for line in open("mobilenet_labels.txt")] + +clock = time.clock() +while(True): + clock.tick() + + img = sensor.snapshot() + + # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not + # specified). A classification score output vector will be generated for each location. At each scale the + # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. + # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note + # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after + # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) + # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. + # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... + + # default settings just do one detection... change them to search the image... + for obj in tf.classify(mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0): + print("**********\nTop 5 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) + img.draw_rectangle(obj.rect()) + # This combines the labels and confidence values into a list of tuples + # and then sorts that list by the confidence values. + sorted_list = sorted(zip(labels, obj.output()), key = lambda x: x[1], reverse = True) + for i in range(5): + print("%s = %f" % (sorted_list[i][0], sorted_list[i][1])) + print(clock.fps(), "fps") diff --git a/scripts/examples/OpenMV/25-Machine-Learning/tf_mobilenet_serach_just_center.py b/scripts/examples/OpenMV/25-Machine-Learning/tf_mobilenet_serach_just_center.py new file mode 100644 index 000000000..1371a877f --- /dev/null +++ b/scripts/examples/OpenMV/25-Machine-Learning/tf_mobilenet_serach_just_center.py @@ -0,0 +1,64 @@ +# TensorFlow Lite Mobilenet V1 Example +# +# Google's Mobilenet V1 detects 1000 classes of objects +# +# WARNING: Mobilenet is trained on ImageNet and isn't meant to classify anything +# in the real world. It's just designed to score well on the ImageNet dataset. +# This example just shows off running mobilenet on the OpenMV Cam. However, the +# default model is not really usable for anything. You have to use transfer +# learning to apply the model to a target problem by re-training the model. +# +# NOTE: This example only works on the OpenMV Cam H7 Pro (that has SDRAM) and better! +# To get the models please see the CNN Network library in OpenMV IDE under +# Tools -> Machine Vision. The labels are there too. +# +# In this example we slide the detector window over the image and get a list +# of activations. Note that use a CNN with a sliding window is extremely compute +# expensive so for an exhaustive search do not expect the CNN to be real-time. + +import sensor, image, time, os, tf + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.set_windowing((240, 240)) # Set 240x240 window. +sensor.skip_frames(time=2000) # Let the camera adjust. + +mobilenet_version = "1" # 1 +mobilenet_width = "0.5" # 1.0, 0.75, 0.50, 0.25 +mobilenet_resolution = "128" # 224, 192, 160, 128 + +mobilenet = "mobilenet_v%s_%s_%s_quant.tflite" % (mobilenet_version, mobilenet_width, mobilenet_resolution) +labels = [line.rstrip('\n') for line in open("mobilenet_labels.txt")] + +clock = time.clock() +while(True): + clock.tick() + + img = sensor.snapshot() + + # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not + # specified). A classification score output vector will be generated for each location. At each scale the + # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. + # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note + # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after + # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) + # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. + # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... + + # Setting x_overlap=-1 forces the window to stay centered in the ROI in the x direction always. If + # y_overlap is not -1 the method will search in all vertical positions. + + # Setting y_overlap=-1 forces the window to stay centered in the ROI in the y direction always. If + # x_overlap is not -1 the method will serach in all horizontal positions. + + # default settings just do one detection... change them to search the image... + for obj in tf.classify(mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=-1, y_overlap=-1): + print("**********\nTop 5 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) + img.draw_rectangle(obj.rect()) + # This combines the labels and confidence values into a list of tuples + # and then sorts that list by the confidence values. + sorted_list = sorted(zip(labels, obj.output()), key = lambda x: x[1], reverse = True) + for i in range(5): + print("%s = %f" % (sorted_list[i][0], sorted_list[i][1])) + print(clock.fps(), "fps") diff --git a/scripts/examples/OpenMV/25-Machine-Learning/tf_person_detection_search_just_center.py b/scripts/examples/OpenMV/25-Machine-Learning/tf_person_detection_search_just_center.py new file mode 100644 index 000000000..05ad0cd16 --- /dev/null +++ b/scripts/examples/OpenMV/25-Machine-Learning/tf_person_detection_search_just_center.py @@ -0,0 +1,49 @@ +# TensorFlow Lite Person Dection Example +# +# Google's Person Detection Model detects if a person is in view. +# +# In this example we slide the detector window over the image and get a list +# of activations. Note that use a CNN with a sliding window is extremely compute +# expensive so for an exhaustive search do not expect the CNN to be real-time. + +import sensor, image, time, os, tf + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.set_windowing((240, 240)) # Set 240x240 window. +sensor.skip_frames(time=2000) # Let the camera adjust. + +# Load the built-in person detection network (the network is in your OpenMV Cam's firmware). +net = tf.load('person_detection') +labels = ['unsure', 'person', 'no_person'] + +clock = time.clock() +while(True): + clock.tick() + + img = sensor.snapshot() + + # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not + # specified). A classification score output vector will be generated for each location. At each scale the + # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. + # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note + # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after + # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) + # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. + # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... + + # Setting x_overlap=-1 forces the window to stay centered in the ROI in the x direction always. If + # y_overlap is not -1 the method will search in all vertical positions. + + # Setting y_overlap=-1 forces the window to stay centered in the ROI in the y direction always. If + # x_overlap is not -1 the method will serach in all horizontal positions. + + # default settings just do one detection... change them to search the image... + for obj in net.classify(img, min_scale=0.5, scale_mul=0.5, x_overlap=-1, y_overlap=-1): + print("**********\nDetections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) + for i in range(len(obj.output())): + print("%s = %f" % (labels[i], obj.output()[i])) + img.draw_rectangle(obj.rect()) + img.draw_string(obj.x()+3, obj.y()-1, labels[obj.output().index(max(obj.output()))], mono_space = False) + print(clock.fps(), "fps") diff --git a/scripts/examples/OpenMV/25-Machine-Learning/tf_person_detection_search_whole_window.py b/scripts/examples/OpenMV/25-Machine-Learning/tf_person_detection_search_whole_window.py new file mode 100644 index 000000000..082cb56d4 --- /dev/null +++ b/scripts/examples/OpenMV/25-Machine-Learning/tf_person_detection_search_whole_window.py @@ -0,0 +1,43 @@ +# TensorFlow Lite Person Dection Example +# +# Google's Person Detection Model detects if a person is in view. +# +# In this example we slide the detector window over the image and get a list +# of activations. Note that use a CNN with a sliding window is extremely compute +# expensive so for an exhaustive search do not expect the CNN to be real-time. + +import sensor, image, time, os, tf + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.set_windowing((240, 240)) # Set 240x240 window. +sensor.skip_frames(time=2000) # Let the camera adjust. + +# Load the built-in person detection network (the network is in your OpenMV Cam's firmware). +net = tf.load('person_detection') +labels = ['unsure', 'person', 'no_person'] + +clock = time.clock() +while(True): + clock.tick() + + img = sensor.snapshot() + + # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not + # specified). A classification score output vector will be generated for each location. At each scale the + # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. + # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note + # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after + # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) + # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. + # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... + + # default settings just do one detection... change them to search the image... + for obj in net.classify(img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0): + print("**********\nDetections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) + for i in range(len(obj.output())): + print("%s = %f" % (labels[i], obj.output()[i])) + img.draw_rectangle(obj.rect()) + img.draw_string(obj.x()+3, obj.y()-1, labels[obj.output().index(max(obj.output()))], mono_space = False) + print(clock.fps(), "fps") diff --git a/scripts/examples/OpenMV/26-April-Tags/find_apriltags.py b/scripts/examples/OpenMV/26-April-Tags/find_apriltags.py new file mode 100644 index 000000000..02409b0f0 --- /dev/null +++ b/scripts/examples/OpenMV/26-April-Tags/find_apriltags.py @@ -0,0 +1,57 @@ +# AprilTags Example +# +# This example shows the power of the OpenMV Cam to detect April Tags +# on the OpenMV Cam M7. The M4 versions cannot detect April Tags. + +import sensor, image, time, math + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QQVGA) # we run out of memory if the resolution is much bigger... +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False) # must turn this off to prevent image washout... +sensor.set_auto_whitebal(False) # must turn this off to prevent image washout... +clock = time.clock() + +# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work. + +# The apriltag code supports up to 6 tag families which can be processed at the same time. +# Returned tag objects will have their tag family and id within the tag family. + +tag_families = 0 +tag_families |= image.TAG16H5 # comment out to disable this family +tag_families |= image.TAG25H7 # comment out to disable this family +tag_families |= image.TAG25H9 # comment out to disable this family +tag_families |= image.TAG36H10 # comment out to disable this family +tag_families |= image.TAG36H11 # comment out to disable this family (default family) +tag_families |= image.ARTOOLKIT # comment out to disable this family + +# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively +# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which +# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve +# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a +# reason to use the other tags families just use TAG36H11 which is the default family. + +def family_name(tag): + if(tag.family() == image.TAG16H5): + return "TAG16H5" + if(tag.family() == image.TAG25H7): + return "TAG25H7" + if(tag.family() == image.TAG25H9): + return "TAG25H9" + if(tag.family() == image.TAG36H10): + return "TAG36H10" + if(tag.family() == image.TAG36H11): + return "TAG36H11" + if(tag.family() == image.ARTOOLKIT): + return "ARTOOLKIT" + +while(True): + clock.tick() + img = sensor.snapshot() + for tag in img.find_apriltags(families=tag_families): # defaults to TAG36H11 without "families". + img.draw_rectangle(tag.rect(), color = (255, 0, 0)) + img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0)) + print_args = (family_name(tag), tag.id(), (180 * tag.rotation()) / math.pi) + print("Tag Family %s, Tag ID %d, rotation %f (degrees)" % print_args) + print(clock.fps()) diff --git a/scripts/examples/OpenMV/26-April-Tags/find_apriltags_3d_pose.py b/scripts/examples/OpenMV/26-April-Tags/find_apriltags_3d_pose.py new file mode 100644 index 000000000..64e763253 --- /dev/null +++ b/scripts/examples/OpenMV/26-April-Tags/find_apriltags_3d_pose.py @@ -0,0 +1,57 @@ +# AprilTags Example +# +# This example shows the power of the OpenMV Cam to detect April Tags +# on the OpenMV Cam M7. The M4 versions cannot detect April Tags. + +import sensor, image, time, math + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QQVGA) # we run out of memory if the resolution is much bigger... +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False) # must turn this off to prevent image washout... +sensor.set_auto_whitebal(False) # must turn this off to prevent image washout... +clock = time.clock() + +# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work. + +# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively +# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which +# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve +# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a +# reason to use the other tags families just use TAG36H11 which is the default family. + +# The AprilTags library outputs the pose information for tags. This is the x/y/z translation and +# x/y/z rotation. The x/y/z rotation is in radians and can be converted to degrees. As for +# translation the units are dimensionless and you must apply a conversion function. + +# f_x is the x focal length of the camera. It should be equal to the lens focal length in mm +# divided by the x sensor size in mm times the number of pixels in the image. +# The below values are for the OV7725 camera with a 2.8 mm lens. + +# f_y is the y focal length of the camera. It should be equal to the lens focal length in mm +# divided by the y sensor size in mm times the number of pixels in the image. +# The below values are for the OV7725 camera with a 2.8 mm lens. + +# c_x is the image x center position in pixels. +# c_y is the image y center position in pixels. + +f_x = (2.8 / 3.984) * 160 # find_apriltags defaults to this if not set +f_y = (2.8 / 2.952) * 120 # find_apriltags defaults to this if not set +c_x = 160 * 0.5 # find_apriltags defaults to this if not set (the image.w * 0.5) +c_y = 120 * 0.5 # find_apriltags defaults to this if not set (the image.h * 0.5) + +def degrees(radians): + return (180 * radians) / math.pi + +while(True): + clock.tick() + img = sensor.snapshot() + for tag in img.find_apriltags(fx=f_x, fy=f_y, cx=c_x, cy=c_y): # defaults to TAG36H11 + img.draw_rectangle(tag.rect(), color = (255, 0, 0)) + img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0)) + print_args = (tag.x_translation(), tag.y_translation(), tag.z_translation(), \ + degrees(tag.x_rotation()), degrees(tag.y_rotation()), degrees(tag.z_rotation())) + # Translation units are unknown. Rotation units are in degrees. + print("Tx: %f, Ty %f, Tz %f, Rx %f, Ry %f, Rz %f" % print_args) + print(clock.fps()) diff --git a/scripts/examples/OpenMV/26-April-Tags/find_apriltags_max_res.py b/scripts/examples/OpenMV/26-April-Tags/find_apriltags_max_res.py new file mode 100644 index 000000000..82f05427c --- /dev/null +++ b/scripts/examples/OpenMV/26-April-Tags/find_apriltags_max_res.py @@ -0,0 +1,61 @@ +# AprilTags Max Res Example +# +# This example shows the power of the OpenMV Cam to detect April Tags +# on the OpenMV Cam M7. The M4 versions cannot detect April Tags. + +import sensor, image, time, math, omv + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.VGA) # we run out of memory if the resolution is much bigger... +# AprilTags works on a maximum of < 64K pixels. +if omv.board_type() == "H7": sensor.set_windowing((240, 240)) +elif omv.board_type() == "M7": sensor.set_windowing((200, 200)) +else: raise Exception("You need a more powerful OpenMV Cam to run this script") +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False) # must turn this off to prevent image washout... +sensor.set_auto_whitebal(False) # must turn this off to prevent image washout... +clock = time.clock() + +# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work. + +# The apriltag code supports up to 6 tag families which can be processed at the same time. +# Returned tag objects will have their tag family and id within the tag family. + +tag_families = 0 +tag_families |= image.TAG16H5 # comment out to disable this family +tag_families |= image.TAG25H7 # comment out to disable this family +tag_families |= image.TAG25H9 # comment out to disable this family +tag_families |= image.TAG36H10 # comment out to disable this family +tag_families |= image.TAG36H11 # comment out to disable this family (default family) +tag_families |= image.ARTOOLKIT # comment out to disable this family + +# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively +# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which +# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve +# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a +# reason to use the other tags families just use TAG36H11 which is the default family. + +def family_name(tag): + if(tag.family() == image.TAG16H5): + return "TAG16H5" + if(tag.family() == image.TAG25H7): + return "TAG25H7" + if(tag.family() == image.TAG25H9): + return "TAG25H9" + if(tag.family() == image.TAG36H10): + return "TAG36H10" + if(tag.family() == image.TAG36H11): + return "TAG36H11" + if(tag.family() == image.ARTOOLKIT): + return "ARTOOLKIT" + +while(True): + clock.tick() + img = sensor.snapshot() + for tag in img.find_apriltags(families=tag_families): # defaults to TAG36H11 without "families". + img.draw_rectangle(tag.rect(), color = 127) + img.draw_cross(tag.cx(), tag.cy(), color = 127) + print_args = (family_name(tag), tag.id(), (180 * tag.rotation()) / math.pi) + print("Tag Family %s, Tag ID %d, rotation %f (degrees)" % print_args) + print(clock.fps()) diff --git a/scripts/examples/OpenMV/26-April-Tags/find_apriltags_w_lens_zoom.py b/scripts/examples/OpenMV/26-April-Tags/find_apriltags_w_lens_zoom.py new file mode 100644 index 000000000..3778ccb68 --- /dev/null +++ b/scripts/examples/OpenMV/26-April-Tags/find_apriltags_w_lens_zoom.py @@ -0,0 +1,33 @@ +# AprilTags Example +# +# This example shows the power of the OpenMV Cam to detect April Tags +# on the OpenMV Cam M7. The M4 versions cannot detect April Tags. + +import sensor, image, time, math + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.VGA) # we run out of memory if the resolution is much bigger... +sensor.set_windowing((160, 120)) # Look at center 160x120 pixels of the VGA resolution. +sensor.skip_frames(time = 2000) +sensor.set_auto_gain(False) # must turn this off to prevent image washout... +sensor.set_auto_whitebal(False) # must turn this off to prevent image washout... +clock = time.clock() + +# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work. + +# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively +# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which +# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve +# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a +# reason to use the other tags families just use TAG36H11 which is the default family. + +while(True): + clock.tick() + img = sensor.snapshot() + for tag in img.find_apriltags(): # defaults to TAG36H11 + img.draw_rectangle(tag.rect(), color = (255, 0, 0)) + img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0)) + print_args = (tag.id(), (180 * tag.rotation()) / math.pi) + print("Tag Family TAG36H11, Tag ID %d, rotation %f (degrees)" % print_args) + print(clock.fps()) diff --git a/scripts/examples/OpenMV/26-April-Tags/find_small_apriltags.py b/scripts/examples/OpenMV/26-April-Tags/find_small_apriltags.py new file mode 100644 index 000000000..cbd54bf9e --- /dev/null +++ b/scripts/examples/OpenMV/26-April-Tags/find_small_apriltags.py @@ -0,0 +1,71 @@ +# Find Small Apriltags +# +# This script shows off how to use blob tracking as a pre-filter to +# finding Apriltags in the image using blob tracking to find the +# area of where the tag is first and then calling find_apriltags +# on that blob. + +# Note, this script works well assuming most parts of the image do not +# pass the thresholding test... otherwise, you don't get a distance +# benefit. + +import sensor, image, time, math, omv + +# Set the thresholds to find a white object (i.e. tag border) +thresholds = (150, 255) + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +if omv.board_type() == "H7": sensor.set_framesize(sensor.VGA) +elif omv.board_type() == "M7": sensor.set_framesize(sensor.QVGA) +else: raise Exception("You need a more powerful OpenMV Cam to run this script") +sensor.skip_frames(time = 200) # increase this to let the auto methods run for longer +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking +clock = time.clock() + +# The apriltag code supports up to 6 tag families which can be processed at the same time. +# Returned tag objects will have their tag family and id within the tag family. +tag_families = 0 +tag_families |= image.TAG16H5 # comment out to disable this family +tag_families |= image.TAG25H7 # comment out to disable this family +tag_families |= image.TAG25H9 # comment out to disable this family +tag_families |= image.TAG36H10 # comment out to disable this family +tag_families |= image.TAG36H11 # comment out to disable this family (default family) +tag_families |= image.ARTOOLKIT # comment out to disable this family + +while(True): + clock.tick() + img = sensor.snapshot() + + # First, we find blobs that may be candidates for tags. + box_list = [] + + # AprilTags may fail due to not having enough ram given the image sie being passed. + tag_list = [] + + for blob in img.find_blobs([thresholds], pixels_threshold=100, area_threshold=100, merge=True): + # Next we look for a tag in an ROI that's bigger than the blob. + w = min(max(int(blob.w() * 1.2), 10), 160) # Not too small, not too big. + h = min(max(int(blob.h() * 1.2), 10), 160) # Not too small, not too big. + x = min(max(int(blob.x() + (blob.w()/4) - (w * 0.1)), 0), img.width()-1) + y = min(max(int(blob.y() + (blob.h()/4) - (h * 0.1)), 0), img.height()-1) + + box_list.append((x, y, w, h)) # We'll draw these later. + + # Since we constrict the roi size apriltags shouldn't run out of ram. + # But, if it does we handle it... + try: + tag_list.extend(img.find_apriltags(roi=(x,y,w,h), families=tag_families)) + except (MemoryError): # Don't catch all exceptions otherwise you can't stop the script. + pass + + for b in box_list: + img.draw_rectangle(b) + # Now print out the found tags + for tag in tag_list: + img.draw_rectangle(tag.rect()) + img.draw_cross(tag.cx(), tag.cy()) + for c in tag.corners(): + img.draw_circle(c[0], c[1], 5) + print("Tag:", tag.cx(), tag.cy(), tag.rotation(), tag.id()) diff --git a/scripts/examples/27-Lepton/lepton_get_object_temp.py b/scripts/examples/OpenMV/27-Lepton/lepton_get_object_temp.py similarity index 100% rename from scripts/examples/27-Lepton/lepton_get_object_temp.py rename to scripts/examples/OpenMV/27-Lepton/lepton_get_object_temp.py diff --git a/scripts/examples/27-Lepton/lepton_get_object_temp_color.py b/scripts/examples/OpenMV/27-Lepton/lepton_get_object_temp_color.py similarity index 100% rename from scripts/examples/27-Lepton/lepton_get_object_temp_color.py rename to scripts/examples/OpenMV/27-Lepton/lepton_get_object_temp_color.py diff --git a/scripts/examples/27-Lepton/lepton_get_object_temp_color_lcd.py b/scripts/examples/OpenMV/27-Lepton/lepton_get_object_temp_color_lcd.py similarity index 100% rename from scripts/examples/27-Lepton/lepton_get_object_temp_color_lcd.py rename to scripts/examples/OpenMV/27-Lepton/lepton_get_object_temp_color_lcd.py diff --git a/scripts/examples/27-Lepton/lepton_get_object_temp_lcd.py b/scripts/examples/OpenMV/27-Lepton/lepton_get_object_temp_lcd.py similarity index 100% rename from scripts/examples/27-Lepton/lepton_get_object_temp_lcd.py rename to scripts/examples/OpenMV/27-Lepton/lepton_get_object_temp_lcd.py diff --git a/scripts/examples/27-Lepton/lepton_hotspot_grayscale_color_tracking.py b/scripts/examples/OpenMV/27-Lepton/lepton_hotspot_grayscale_color_tracking.py similarity index 100% rename from scripts/examples/27-Lepton/lepton_hotspot_grayscale_color_tracking.py rename to scripts/examples/OpenMV/27-Lepton/lepton_hotspot_grayscale_color_tracking.py diff --git a/scripts/examples/27-Lepton/lepton_hotspot_grayscale_color_tracking_lcd.py b/scripts/examples/OpenMV/27-Lepton/lepton_hotspot_grayscale_color_tracking_lcd.py similarity index 100% rename from scripts/examples/27-Lepton/lepton_hotspot_grayscale_color_tracking_lcd.py rename to scripts/examples/OpenMV/27-Lepton/lepton_hotspot_grayscale_color_tracking_lcd.py diff --git a/scripts/examples/27-Lepton/lepton_hotspot_rgb565_color_tracking.py b/scripts/examples/OpenMV/27-Lepton/lepton_hotspot_rgb565_color_tracking.py similarity index 100% rename from scripts/examples/27-Lepton/lepton_hotspot_rgb565_color_tracking.py rename to scripts/examples/OpenMV/27-Lepton/lepton_hotspot_rgb565_color_tracking.py diff --git a/scripts/examples/27-Lepton/lepton_hotspot_rgb565_color_tracking_lcd.py b/scripts/examples/OpenMV/27-Lepton/lepton_hotspot_rgb565_color_tracking_lcd.py similarity index 100% rename from scripts/examples/27-Lepton/lepton_hotspot_rgb565_color_tracking_lcd.py rename to scripts/examples/OpenMV/27-Lepton/lepton_hotspot_rgb565_color_tracking_lcd.py diff --git a/scripts/examples/27-Lepton/lepton_target_temp_hotspot_grayscale_color_tracking.py b/scripts/examples/OpenMV/27-Lepton/lepton_target_temp_hotspot_grayscale_color_tracking.py similarity index 100% rename from scripts/examples/27-Lepton/lepton_target_temp_hotspot_grayscale_color_tracking.py rename to scripts/examples/OpenMV/27-Lepton/lepton_target_temp_hotspot_grayscale_color_tracking.py diff --git a/scripts/examples/27-Lepton/lepton_target_temp_hotspot_rgb565_color_tracking.py b/scripts/examples/OpenMV/27-Lepton/lepton_target_temp_hotspot_rgb565_color_tracking.py similarity index 100% rename from scripts/examples/27-Lepton/lepton_target_temp_hotspot_rgb565_color_tracking.py rename to scripts/examples/OpenMV/27-Lepton/lepton_target_temp_hotspot_rgb565_color_tracking.py diff --git a/scripts/examples/OpenMV/28-Global-Shutter/high_fps.py b/scripts/examples/OpenMV/28-Global-Shutter/high_fps.py new file mode 100644 index 000000000..d2cf12ded --- /dev/null +++ b/scripts/examples/OpenMV/28-Global-Shutter/high_fps.py @@ -0,0 +1,29 @@ +# High FPS Example +# +# This example shows off how to make the frame rate of the global shutter camera extremely +# high. To do so you need to set the resolution to a low value such that pixel binning is +# activated on the camera and then reduce the maximum exposure time. +# +# When the resolution is 320x240 or less the camera reads out pixels 2x faster. When the +# resolution is 160x120 or less the camera reads out pixels 4x faster. This happens due +# to pixel binning which is automatically activated for you to increase the readout speed. +# +# While the readout speed may increase the camera must still expose the image for the request +# time so you will not get the maximum readout speed unless you reduce the exposure time too. +# This results in a dark image however so YOU NEED A LOT of lighting for high FPS. + +import sensor, image, time + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120) - make smaller to go faster +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +sensor.set_auto_exposure(True, exposure_us=5000) # make smaller to go faster + +while(True): + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected + # to the IDE. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/28-Global-Shutter/triggered_mode.py b/scripts/examples/OpenMV/28-Global-Shutter/triggered_mode.py new file mode 100644 index 000000000..08e78c916 --- /dev/null +++ b/scripts/examples/OpenMV/28-Global-Shutter/triggered_mode.py @@ -0,0 +1,29 @@ +# Global Shutter Triggered Mode Example +# +# This example shows off setting the global shutter camera into triggered mode. In triggered mode +# snapshot() controls EXACTLY when integration of the camera pixels start such that you can sync +# taking pictures to some external movement. Since the camera captures all pixels at the same time +# (as it is a global shutter camera versus a rolling shutter camera) movement in the image will +# only be captured for the integration time and not the integration time multipled by the number +# of rows in the image. Additionally, sensor noise is reduced in triggered mode as the camera will +# not read out rows until after exposing which results in a higher quality image. +# +# That said, your maximum frame rate will be reduced by 2 to 3 as frames are no longer generated +# continously by the camera and because you have to wait for the integration to finish before +# readout of the frame. + +import sensor, image, time + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE +sensor.set_framesize(sensor.VGA) # Set frame size to VGA (640x480) +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +sensor.ioctl(sensor.IOCTL_SET_TRIGGERED_MODE, True) + +while(True): + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected + # to the IDE. The FPS should increase once disconnected. diff --git a/scripts/examples/29-IMU-Shield/imu_read.py b/scripts/examples/OpenMV/29-IMU-Shield/imu_read.py similarity index 100% rename from scripts/examples/29-IMU-Shield/imu_read.py rename to scripts/examples/OpenMV/29-IMU-Shield/imu_read.py diff --git a/scripts/examples/30-Distance-Shield/distance_read.py b/scripts/examples/OpenMV/30-Distance-Shield/distance_read.py similarity index 100% rename from scripts/examples/30-Distance-Shield/distance_read.py rename to scripts/examples/OpenMV/30-Distance-Shield/distance_read.py diff --git a/scripts/examples/31-TV-Shield/tv.py b/scripts/examples/OpenMV/31-TV-Shield/tv.py similarity index 100% rename from scripts/examples/31-TV-Shield/tv.py rename to scripts/examples/OpenMV/31-TV-Shield/tv.py diff --git a/scripts/examples/OpenMV/32-modbus/modbus_apriltag.py b/scripts/examples/OpenMV/32-modbus/modbus_apriltag.py new file mode 100644 index 000000000..c0e51b501 --- /dev/null +++ b/scripts/examples/OpenMV/32-modbus/modbus_apriltag.py @@ -0,0 +1,39 @@ +import sensor, image +import time +from pyb import UART +from modbus import ModbusRTU + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.QQVGA) # we run out of memory if the resolution is much bigger... + +uart = UART(3,115200, parity=None, stop=2, timeout=1, timeout_char=4) +modbus = ModbusRTU(uart, register_num=9999) + +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + if modbus.any(): + modbus.handle(debug=True) + else: + clock.tick() + img = sensor.snapshot() + tags = img.find_apriltags() # defaults to TAG36H11 without "families". + modbus.clear() + modbus.REGISTER[0] = len(tags) + if tags: + print(tags) + i = 1 + for tag in tags: + img.draw_rectangle(tag.rect(), color = 127) + modbus.REGISTER[i] = tag.family() + i += 1 + modbus.REGISTER[i] = tag.id() + i += 1 + modbus.REGISTER[i] = tag.cx() + i += 1 + modbus.REGISTER[i] = tag.cy() + i += 1 + #print(modbus.REGISTER[0:15]) + #print(clock.fps()) diff --git a/scripts/examples/OpenMV/32-modbus/modbus_rtu_slave.py b/scripts/examples/OpenMV/32-modbus/modbus_rtu_slave.py new file mode 100644 index 000000000..85ce4bdf9 --- /dev/null +++ b/scripts/examples/OpenMV/32-modbus/modbus_rtu_slave.py @@ -0,0 +1,17 @@ +import time +from pyb import UART +from modbus import ModbusRTU +uart = UART(3,115200, parity=None, stop=2, timeout=1, timeout_char=4) +modbus = ModbusRTU(uart, register_num=9999) + +while(True): + if modbus.any(): + modbus.handle(debug=True) + else: + time.sleep(100) + modbus.REGISTER[0] = 1000 + modbus.REGISTER[1] += 1 + modbus.REGISTER[3] += 3 + #print(modbus.REGISTER[10:15]) + # image processing in there + diff --git a/scripts/examples/33-Light-Shield/light.py b/scripts/examples/OpenMV/33-Light-Shield/light.py similarity index 100% rename from scripts/examples/33-Light-Shield/light.py rename to scripts/examples/OpenMV/33-Light-Shield/light.py diff --git a/scripts/examples/OpenMV/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py b/scripts/examples/OpenMV/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py new file mode 100644 index 000000000..cf6baa5d1 --- /dev/null +++ b/scripts/examples/OpenMV/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py @@ -0,0 +1,87 @@ +# Image Transfer - As The Remote Device +# +# This script is meant to talk to the "image_transfer_jpg_as_the_controller_device.py" on your computer. +# +# This script shows off how to transfer the frame buffer to your computer as a jpeg image. + +import image, network, omv, rpc, sensor, struct + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) + +# Turn off the frame buffer connection to the IDE from the OpenMV Cam side. +# +# This needs to be done when manually compressing jpeg images at higher quality +# so that the OpenMV Cam does not try to stream them to the IDE using a fall back +# mechanism if the JPEG image is too large to fit in the IDE JPEG frame buffer on the OpenMV Cam. + +omv.disable_fb(True) + +# The RPC library above is installed on your OpenMV Cam and provides mutliple classes for +# allowing your OpenMV Cam to be controlled over USB or WIFI. + +################################################################ +# Choose the interface you wish to control your OpenMV Cam over. +################################################################ + +# Uncomment the below line to setup your OpenMV Cam for control over a USB VCP. +# +interface = rpc.rpc_usb_vcp_slave() + +# Uncomment the below line to setup your OpenMV Cam for control over WiFi. +# +# * ssid - WiFi network to connect to. +# * ssid_key - WiFi network password. +# * ssid_security - WiFi security. +# * port - Port to route traffic to. +# * mode - Regular or access-point mode. +# * static_ip - If not None then a tuple of the (IP Address, Subnet Mask, Gateway, DNS Address) +# +# interface = rpc.rpc_wifi_slave(ssid="", +# ssid_key="", +# ssid_security=network.WINC.WPA_PSK, +# port=0x1DBA, +# mode=network.WINC.MODE_STA, +# static_ip=None) + +################################################################ +# Call Backs +################################################################ + +# When called sets the pixformat and framesize, takes a snapshot +# and then returns the frame buffer jpg size to store the image in. +# +# data is a pixformat string and framesize string. +def jpeg_image_snapshot(data): + pixformat, framesize = bytes(data).decode().split(",") + sensor.set_pixformat(eval(pixformat)) + sensor.set_framesize(eval(framesize)) + img = sensor.snapshot().compress(quality=90) + return struct.pack(" 0: print("+X Limit Reached ", end="") + if y_error < 0: print("-Y Limit Reached ", end="") + if y_error > 0: print("+Y Limit Reached ", end="") + + center_on_blob(most_dense_blob, TRACKING_RESOLUTION) + + # This loop will track the blob at a much higher readout speed and lower resolution. + while(True): + clock.tick() + img = sensor.snapshot() + + # Find the blob in the lower resolution image. + blobs = img.find_blobs(TRACKING_THRESHOLDS, + area_threshold=TRACKING_AREA_THRESHOLD, + pixels_threshold=TRACKING_PIXEL_THRESHOLD) + + # If we loose the blob then we need to find a new one. + if not len(blobs): + # Reset resolution. + sensor.set_framesize(SEARCHING_RESOLUTION) + sensor.ioctl(sensor.IOCTL_SET_READOUT_WINDOW, (sensor_w, sensor_h)) + break + + # Narrow down the blob list and highlight the blob. + most_dense_blob = max(blobs, key = lambda x: x.density()) + img.draw_rectangle(most_dense_blob.rect()) + + print(clock.fps(), "BLOB cx:%d, cy:%d" % get_mapped_centroid(most_dense_blob)) + + x_diff = most_dense_blob.cx() - (sensor.width() / 2.0) + y_diff = most_dense_blob.cy() - (sensor.height() / 2.0) + + w_threshold = (sensor.width() / 2.0) * TRACKING_EDGE_TOLERANCE + h_threshold = (sensor.height() / 2.0) * TRACKING_EDGE_TOLERANCE + + # Re-center on the blob if it starts going out of view (costs FPS). + if abs(x_diff) > w_threshold or abs(y_diff) > h_threshold: + center_on_blob(most_dense_blob, TRACKING_RESOLUTION) + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/35-Readout-Control/apriltag_tracking.py b/scripts/examples/OpenMV/35-Readout-Control/apriltag_tracking.py new file mode 100644 index 000000000..c465d1d92 --- /dev/null +++ b/scripts/examples/OpenMV/35-Readout-Control/apriltag_tracking.py @@ -0,0 +1,151 @@ +# This example shows off how to use readout window control to readout a small part of a camera +# sensor pixel array at a very high speed and move that readout window around. + +# This example is was designed and tested on the OpenMV Cam H7 Plus using the OV5640 sensor. + +import sensor, image, time + +# This example script forces the exposure to a constant value for the whole time. However, you may +# wish to dynamically adjust the exposure when the readout window shrinks to a small size. +EXPOSURE_MICROSECONDS = 20000 + +SEARCHING_RESOLUTION = sensor.QVGA +TRACKING_RESOLUTION = sensor.QQVGA # or sensor.QQQVGA + +TRACKING_LOW_RATIO_THRESHOLD = 0.2 # Go to a smaller readout window when tag side vs res is smaller. +TRACKING_HIGH_RATIO_THRESHOLD = 0.8 # Go to a larger readout window when tag side vs res is larger. + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE +sensor.set_framesize(SEARCHING_RESOLUTION) +sensor.skip_frames(time = 1000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +sensor.set_auto_gain(False) # Turn off as it will oscillate. +sensor.set_auto_exposure(False, exposure_us=EXPOSURE_MICROSECONDS) +sensor.skip_frames(time = 1000) + +# sensor_w and sensor_h are the image sensor raw pixels w/h (x/y are 0 initially). +x, y, sensor_w, sensor_h = sensor.ioctl(sensor.IOCTL_GET_READOUT_WINDOW) + +while(True): + clock.tick() + img = sensor.snapshot() + + # Tracks TAG36H11 by default. + tags = img.find_apriltags() + + if len(tags): + best_tag = max(tags, key = lambda x: x.decision_margin()) + img.draw_rectangle(best_tag.rect()) + + # This needs to be less than the sensor output at default so we can move it around. + readout_window_w = ((sensor_w // sensor.width()) * sensor.width()) / 2 + readout_window_h = ((sensor_h // sensor.height()) * sensor.height()) / 2 + + def get_mapped_centroid(t): + # By default the readout window is set the whole sensor pixel array with x/y==0. + # The resolution you see if produced by taking pixels from the readout window on + # the camera. The x/y location is relative to the sensor center. + x, y, w, h = sensor.ioctl(sensor.IOCTL_GET_READOUT_WINDOW) + + # The camera driver will try to scale to fit whatever resolution you pass to max + # width/height that fit on the sensor while keeping the aspect ratio. + ratio = min(w / float(sensor.width()), h / float(sensor.height())) + + # Reference cx() to the center of the viewport and then scale to the readout. + mapped_cx = (t.cx() - (sensor.width() / 2.0)) * ratio + # Since we are keeping the aspect ratio there might be an offset in x. + mapped_cx += (w - (sensor.width() * ratio)) / 2.0 + # Add in our displacement from the sensor center + mapped_cx += x + (sensor_w / 2.0) + + # Reference cy() to the center of the viewport and then scale to the readout. + mapped_cy = (t.cy() - (sensor.height() / 2.0)) * ratio + # Since we are keeping the aspect ratio there might be an offset in y. + mapped_cy += (h - (sensor.height() * ratio)) / 2.0 + # Add in our displacement from the sensor center + mapped_cy += y + (sensor_h / 2.0) + + return (mapped_cx, mapped_cy) # X/Y location on the sensor array. + + def center_on_tag(t, res): + global readout_window_w + global readout_window_h + mapped_cx, mapped_cy = get_mapped_centroid(t) + + # Switch to the res (if res was unchanged this does nothing). + sensor.set_framesize(res) + + # Construct readout window. x/y are offsets from the center. + x = int(mapped_cx - (sensor_w / 2.0)) + y = int(mapped_cy - (sensor_h / 2.0)) + w = int(readout_window_w) + h = int(readout_window_h) + + # Focus on the centroid. + sensor.ioctl(sensor.IOCTL_SET_READOUT_WINDOW, (x, y, w, h)) + + # See if we are hitting the edge. + new_x, new_y, w, h = sensor.ioctl(sensor.IOCTL_GET_READOUT_WINDOW) + + # You can use these error values to drive servos to move the camera if you want. + x_error = x - new_x + y_error = y - new_y + + if x_error < 0: print("-X Limit Reached ", end="") + if x_error > 0: print("+X Limit Reached ", end="") + if y_error < 0: print("-Y Limit Reached ", end="") + if y_error > 0: print("+Y Limit Reached ", end="") + + center_on_tag(best_tag, TRACKING_RESOLUTION) + + loss_count = 0 + + # This loop will track the tag at a much higher readout speed and lower resolution. + while(True): + clock.tick() + img = sensor.snapshot() + + # Tracks TAG36H11 by default. + tags = img.find_apriltags() + + # If we loose the tag then we need to find a new one. + if not len(tags): + # Handle a few bad frames due to tag flicker. + if (loss_count < 2): + loss_count += 1 + continue + # Reset resolution. + sensor.set_framesize(SEARCHING_RESOLUTION) + sensor.ioctl(sensor.IOCTL_SET_READOUT_WINDOW, (sensor_w, sensor_h)) + break + + loss_count = 0 + + # Narrow down the blob list and highlight the blob. + best_tag = max(tags, key = lambda x: x.decision_margin()) + img.draw_rectangle(best_tag.rect()) + + print(clock.fps(), "TAG cx:%d, cy:%d" % get_mapped_centroid(best_tag)) + + w_ratio = best_tag.w() / sensor.width() + h_ratio = best_tag.h() / sensor.height() + + # Shrink the tracking window until the tag fits. + while (w_ratio < TRACKING_LOW_RATIO_THRESHOLD) or (h_ratio < TRACKING_LOW_RATIO_THRESHOLD): + readout_window_w /= 2 + readout_window_h /= 2 + w_ratio *= 2 + h_ratio *= 2 + + # Enlarge the tracking window until the tag fits. + while (TRACKING_HIGH_RATIO_THRESHOLD < w_ratio) or (TRACKING_HIGH_RATIO_THRESHOLD < h_ratio): + readout_window_w *= 2 + readout_window_h *= 2 + w_ratio /= 2 + h_ratio /= 2 + + center_on_tag(best_tag, TRACKING_RESOLUTION) + + print(clock.fps()) diff --git a/scripts/examples/OpenMV/36-Web-Servers/rtsp_video_server.py b/scripts/examples/OpenMV/36-Web-Servers/rtsp_video_server.py new file mode 100644 index 000000000..8de8d8165 --- /dev/null +++ b/scripts/examples/OpenMV/36-Web-Servers/rtsp_video_server.py @@ -0,0 +1,81 @@ +# RTSP Video Server +# +# This example shows off how to stream video over RTSP with your OpenMV Cam. +# +# You can use a program like VLC to view the video stream by connecting to the +# OpenMV Cam's IP address. + +import network, omv, rtsp, sensor, time + +# RTP MJPEG streaming works using JPEG images produced by the OV2640/OV5640 camera modules. +# Not all programs (e.g. VLC) implement the full JPEG standard for decoding any JPEG image +# in RTP packets. Images JPEG compressed by the OpenMV Cam internally may not display. + +sensor.reset() + +sensor.set_pixformat(sensor.JPEG) # Only supported by the OV2640/OV5640. +sensor.set_framesize(sensor.UXGA) + +# Turn off the frame buffer connection to the IDE from the OpenMV Cam side. +# +# This needs to be done when manually compressing jpeg images at higher quality +# so that the OpenMV Cam does not try to stream them to the IDE using a fall back +# mechanism if the JPEG image is too large to fit in the IDE JPEG frame buffer on the OpenMV Cam. + +omv.disable_fb(True) + +# * ssid - WiFi network to connect to. +# * ssid_key - WiFi network password. +# * ssid_security - WiFi security. +# * port - Port to listen to (554). +# * mode - Regular or access-point mode. +# * static_ip - If not None then a tuple of the (IP Address, Subnet Mask, Gateway, DNS Address) + +server = rtsp.rtsp_server(ssid="", + ssid_key="", + ssid_security=network.WINC.WPA_PSK, + port=554, + mode=network.WINC.MODE_STA, + static_ip=None) + +# For the call back functions below: +# +# `pathname` is the name of the stream resource the client wants. You can ignore this if it's not +# needed. Otherwise, you can use it to determine what image object to return. By default the path +# name will be "/". +# +# `session` is random number that will change when a new connection is established. You can use +# session with a dictionary to differentiate different accesses to the same file name. + +def setup_callback(pathname, session): + print("Opening \"%s\" in session %d" % (pathname, session)) + +def play_callback(pathname, session): + print("Playing \"%s\" in session %d" % (pathname, session)) + +def pause_callback(pathname, session): # VLC only pauses locally. This is never called. + print("Pausing \"%s\" in session %d" % (pathname, session)) + +def teardown_callback(pathname, session): + print("Closing \"%s\" in session %d" % (pathname, session)) + +server.register_setup_cb(setup_callback) +server.register_play_cb(play_callback) +server.register_pause_cb(pause_callback) +server.register_teardown_cb(teardown_callback) + +# Track the current FPS. +clock = time.clock() + +# Called each time a new frame is needed. +def image_callback(pathname, session): + clock.tick() + img = sensor.snapshot() + # Markup image and/or do various things. + print(clock.fps()) + return img + +# Stream does not return. It will call `image_callback` when it needs to get an image object to send +# to the remote rtsp client connecting to the server. + +server.stream(image_callback) diff --git a/scripts/examples/OpenMV/99-Tests/colorbar.py b/scripts/examples/OpenMV/99-Tests/colorbar.py new file mode 100644 index 000000000..6978c9e65 --- /dev/null +++ b/scripts/examples/OpenMV/99-Tests/colorbar.py @@ -0,0 +1,55 @@ +# Colorbar Test Example +# +# This example is the color bar test run by each OpenMV Cam before being allowed +# out of the factory. The OMV sensors can output a color bar image which you +# can threshold to check the the camera bus is connected correctly. + +import sensor, time + +sensor.reset() +# Set sensor settings +sensor.set_brightness(0) +sensor.set_saturation(3) +sensor.set_gainceiling(8) +sensor.set_contrast(2) + +# Set sensor pixel format +sensor.set_framesize(sensor.QVGA) +sensor.set_pixformat(sensor.RGB565) + +# Enable colorbar test mode +sensor.set_colorbar(True) + +# Skip a few frames to allow the sensor settle down +for i in range(0, 30): + image = sensor.snapshot() + +# Color bars thresholds +t = [lambda r, g, b: r < 70 and g < 70 and b < 70, # Black + lambda r, g, b: r < 70 and g < 70 and b > 200, # Blue + lambda r, g, b: r > 200 and g < 70 and b < 70, # Red + lambda r, g, b: r > 200 and g < 70 and b > 200, # Purple + lambda r, g, b: r < 70 and g > 200 and b < 70, # Green + lambda r, g, b: r < 70 and g > 200 and b > 200, # Aqua + lambda r, g, b: r > 200 and g > 200 and b < 70, # Yellow + lambda r, g, b: r > 200 and g > 200 and b > 200] # White + +# color bars are inverted for OV7725 +if (sensor.get_id() == sensor.OV7725): + t = t[::-1] + +# 320x240 image with 8 color bars each one is approx 40 pixels. +# we start from the center of the frame buffer, and average the +# values of 10 sample pixels from the center of each color bar. +for i in range(0, 8): + avg = (0, 0, 0) + idx = 40*i+20 # center of colorbars + for off in range(0, 10): # avg 10 pixels + rgb = image.get_pixel(idx+off, 120) + avg = tuple(map(sum, zip(avg, rgb))) + + if not t[i](avg[0]/10, avg[1]/10, avg[2]/10): + raise Exception("COLOR BARS TEST FAILED. " + "BAR#(%d): RGB(%d,%d,%d)"%(i+1, avg[0]/10, avg[1]/10, avg[2]/10)) + +print("COLOR BARS TEST PASSED...") diff --git a/scripts/examples/OpenMV/99-Tests/fps.py b/scripts/examples/OpenMV/99-Tests/fps.py new file mode 100644 index 000000000..5f5b8ebae --- /dev/null +++ b/scripts/examples/OpenMV/99-Tests/fps.py @@ -0,0 +1,14 @@ +# FPS Test Script. +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_framesize(sensor.QQVGA) # or sensor.QQVGA (or others) +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_colorbar(True) # Enable colorbars output + +clock = time.clock() # Tracks FPS. +for i in range(0, 600): + clock.tick() # Track elapsed milliseconds between snapshots(). + sensor.snapshot() # Capture snapshot. + +print("FPS:", clock.fps()) diff --git a/scripts/examples/OpenMV/99-Tests/selftest.py b/scripts/examples/OpenMV/99-Tests/selftest.py new file mode 100644 index 000000000..fcb65edf6 --- /dev/null +++ b/scripts/examples/OpenMV/99-Tests/selftest.py @@ -0,0 +1,77 @@ +# Self Test Example +# +# This example shows how your OpenMV Cam tests itself before being allowed out +# of the factory. Every OpenMV Cam should pass this test. + +import sensor, time, pyb + +def test_int_adc(): + adc = pyb.ADCAll(12) + # Test VBAT + vbat = adc.read_core_vbat() + vbat_diff = abs(vbat-3.3) + if (vbat_diff > 0.1): + raise Exception('INTERNAL ADC TEST FAILED VBAT=%fv'%vbat) + + # Test VREF + vref = adc.read_core_vref() + vref_diff = abs(vref-1.2) + if (vref_diff > 0.1): + raise Exception('INTERNAL ADC TEST FAILED VREF=%fv'%vref) + adc = None + print('INTERNAL ADC TEST PASSED...') + +def test_color_bars(): + sensor.reset() + # Set sensor settings + sensor.set_brightness(0) + sensor.set_saturation(3) + sensor.set_gainceiling(8) + sensor.set_contrast(2) + + # Set sensor pixel format + sensor.set_framesize(sensor.QVGA) + sensor.set_pixformat(sensor.RGB565) + + # Enable colorbar test mode + sensor.set_colorbar(True) + + # Skip a few frames to allow the sensor settle down + for i in range(0, 100): + image = sensor.snapshot() + + #color bars thresholds + t = [lambda r, g, b: r < 70 and g < 70 and b < 70, # Black + lambda r, g, b: r < 70 and g < 70 and b > 200, # Blue + lambda r, g, b: r > 200 and g < 70 and b < 70, # Red + lambda r, g, b: r > 200 and g < 70 and b > 200, # Purple + lambda r, g, b: r < 70 and g > 200 and b < 70, # Green + lambda r, g, b: r < 70 and g > 200 and b > 200, # Aqua + lambda r, g, b: r > 200 and g > 200 and b < 70, # Yellow + lambda r, g, b: r > 200 and g > 200 and b > 200] # White + + # color bars are inverted for OV7725 + if (sensor.get_id() == sensor.OV7725): + t = t[::-1] + + #320x240 image with 8 color bars each one is approx 40 pixels. + #we start from the center of the frame buffer, and average the + #values of 10 sample pixels from the center of each color bar. + for i in range(0, 8): + avg = (0, 0, 0) + idx = 40*i+20 #center of colorbars + for off in range(0, 10): #avg 10 pixels + rgb = image.get_pixel(idx+off, 120) + avg = tuple(map(sum, zip(avg, rgb))) + + if not t[i](avg[0]/10, avg[1]/10, avg[2]/10): + raise Exception('COLOR BARS TEST FAILED.' + 'BAR#(%d): RGB(%d,%d,%d)'%(i+1, avg[0]/10, avg[1]/10, avg[2]/10)) + + print('COLOR BARS TEST PASSED...') + +if __name__ == '__main__': + print('') + test_int_adc() + test_color_bars() + diff --git a/scripts/examples/OpenMV/99-Tests/unittests.py b/scripts/examples/OpenMV/99-Tests/unittests.py new file mode 100644 index 000000000..d20932873 --- /dev/null +++ b/scripts/examples/OpenMV/99-Tests/unittests.py @@ -0,0 +1,38 @@ +# OpenMV Unit Tests. +# +import os, sensor, gc + +TEST_DIR = "unittest" +TEMP_DIR = "unittest/temp" +DATA_DIR = "unittest/data" +SCRIPT_DIR = "unittest/script" + +if not (TEST_DIR in os.listdir("")): + raise Exception('Unittest dir not found!') + +print("") +test_failed = False + +def print_result(test, passed): + s = "Unittest (%s)"%(test) + padding = "."*(60-len(s)) + print(s + padding + ("PASSED" if passed == True else "FAILED")) + +for test in sorted(os.listdir(SCRIPT_DIR)): + if test.endswith(".py"): + test_passed = True + test_path = "/".join((SCRIPT_DIR, test)) + try: + exec(open(test_path).read()) + gc.collect() + if unittest(DATA_DIR, TEMP_DIR) == False: + raise Exception() + except Exception as e: + test_failed = True + test_passed = False + print_result(test, test_passed) + +if test_failed: + print("\nSome tests have FAILED!!!\n\n") +else: + print("\nAll tests PASSED.\n\n")