From 2c9a88c3e2999b0b4be3dc2afe2ca35fe9913874 Mon Sep 17 00:00:00 2001 From: iabdalkader Date: Mon, 20 Jun 2022 14:26:20 +0200 Subject: [PATCH] scripts/examples: Add Nicla Vision examples. --- .../Nicla-Vision/00-Basics/helloworld.py | 17 ++ .../Arduino/Nicla-Vision/00-Basics/main.py | 33 +++ .../Nicla-Vision/01-Sensors/lsm6dsox_basic.py | 12 + .../Nicla-Vision/01-Sensors/lsm6dsox_mlc.py | 46 +++ .../Nicla-Vision/01-Sensors/vl53l1x_tof.py | 11 + .../Nicla-Vision/02-Board-Control/adc_read.py | 13 + .../02-Board-Control/adc_read_int_channel.py | 8 + .../Nicla-Vision/02-Board-Control/can.py | 36 +++ .../02-Board-Control/cpufreq_scaling.py | 24 ++ .../02-Board-Control/i2c_control.py | 19 ++ .../02-Board-Control/led_control.py | 22 ++ .../02-Board-Control/native_emitters.py | 19 ++ .../02-Board-Control/pin_control.py | 13 + .../02-Board-Control/pwm_control.py | 30 ++ .../Nicla-Vision/02-Board-Control/rtc.py | 12 + .../02-Board-Control/spi_control.py | 73 +++++ .../02-Board-Control/timer_control.py | 19 ++ .../02-Board-Control/uart_control.py | 13 + .../Nicla-Vision/02-Board-Control/usb_hid.py | 31 ++ .../Nicla-Vision/02-Board-Control/usb_vcp.py | 37 +++ .../02-Board-Control/vsync_gpio_output.py | 22 ++ .../Nicla-Vision/03-Drawing/arrow_drawing.py | 31 ++ .../Nicla-Vision/03-Drawing/circle_drawing.py | 31 ++ .../Nicla-Vision/03-Drawing/copy2fb.py | 15 + .../Nicla-Vision/03-Drawing/cross_drawing.py | 29 ++ .../03-Drawing/ellipse_drawing.py | 35 +++ .../Nicla-Vision/03-Drawing/flood_fill.py | 35 +++ .../Nicla-Vision/03-Drawing/image_drawing.py | 25 ++ .../03-Drawing/image_drawing_advanced.py | 93 ++++++ .../image_drawing_alpha_blending_test.py | 71 +++++ ...ng_alpha_blending_with_color_table_test.py | 81 ++++++ .../image_drawing_alpha_table_test.py | 75 +++++ ...awing_alpha_table_with_color_table_test.py | 85 ++++++ .../image_drawing_scale_down_test.py | 69 +++++ .../03-Drawing/image_drawing_scale_up_test.py | 63 ++++ .../image_drawing_with_custom_palette.py | 43 +++ .../03-Drawing/keypoints_drawing.py | 31 ++ .../Nicla-Vision/03-Drawing/line_drawing.py | 31 ++ .../03-Drawing/rectangle_drawing.py | 31 ++ .../Nicla-Vision/03-Drawing/text_drawing.py | 33 +++ .../adaptive_histogram_equalization.py | 29 ++ .../04-Image-Filters/blur_filter.py | 21 ++ .../04-Image-Filters/cartoon_filter.py | 29 ++ .../color_bilateral_filter.py | 33 +++ .../04-Image-Filters/color_binary_filter.py | 61 ++++ .../04-Image-Filters/color_light_removal.py | 25 ++ .../04-Image-Filters/edge_filter.py | 21 ++ .../04-Image-Filters/erode_and_dilate.py | 35 +++ .../04-Image-Filters/gamma_correction.py | 21 ++ .../grayscale_bilateral_filter.py | 33 +++ .../grayscale_binary_filter.py | 45 +++ .../grayscale_light_removal.py | 25 ++ .../histogram_equalization.py | 19 ++ .../04-Image-Filters/kernel_filters.py | 27 ++ .../04-Image-Filters/lens_correction.py | 21 ++ .../04-Image-Filters/linear_polar.py | 21 ++ .../04-Image-Filters/log_polar.py | 21 ++ .../mean_adaptive_threshold_filter.py | 25 ++ .../04-Image-Filters/mean_filter.py | 25 ++ .../median_adaptive_threshold_filter.py | 27 ++ .../04-Image-Filters/median_filter.py | 27 ++ .../midpoint_adaptive_threshold_filter.py | 28 ++ .../04-Image-Filters/midpoint_filter.py | 27 ++ .../mode_adaptive_threshold_filter.py | 25 ++ .../04-Image-Filters/mode_filter.py | 25 ++ .../Nicla-Vision/04-Image-Filters/negative.py | 19 ++ .../perspective_and_rotation_correction.py | 71 +++++ .../perspective_correction.py | 39 +++ .../04-Image-Filters/rotation_correction.py | 49 ++++ .../04-Image-Filters/sharpen_filter.py | 21 ++ .../Nicla-Vision/04-Image-Filters/ulab.py | 19 ++ .../04-Image-Filters/unsharp_filter.py | 21 ++ .../vflip_hmirror_transpose.py | 33 +++ .../05-Snapshot/emboss_snapshot.py | 33 +++ .../Nicla-Vision/05-Snapshot/snapshot.py | 27 ++ .../05-Snapshot/snapshot_on_face_detection.py | 51 ++++ .../05-Snapshot/snapshot_on_movement.py | 44 +++ .../05-Snapshot/time_lapse_photos.py | 67 +++++ .../Nicla-Vision/06-Video-Recording/gif.py | 37 +++ .../gif_on_face_detection.py | 65 +++++ .../06-Video-Recording/gif_on_movement.py | 58 ++++ .../06-Video-Recording/imageio_memory.py | 33 +++ .../06-Video-Recording/imageio_read.py | 32 +++ .../06-Video-Recording/imageio_write.py | 36 +++ .../Nicla-Vision/06-Video-Recording/mjpeg.py | 37 +++ .../mjpeg_on_face_detection.py | 65 +++++ .../06-Video-Recording/mjpeg_on_movement.py | 58 ++++ .../07-Face-Detection/face_detection.py | 46 +++ .../07-Face-Detection/face_recognition.py | 27 ++ .../07-Face-Detection/face_tracking.py | 63 ++++ .../09-Feature-Detection/edges.py | 19 ++ .../09-Feature-Detection/find_circles.py | 39 +++ .../find_line_segments.py | 39 +++ .../09-Feature-Detection/find_lines.py | 57 ++++ .../09-Feature-Detection/find_rects.py | 31 ++ .../Nicla-Vision/09-Feature-Detection/hog.py | 25 ++ .../09-Feature-Detection/keypoints.py | 51 ++++ .../09-Feature-Detection/keypoints_save.py | 30 ++ .../Nicla-Vision/09-Feature-Detection/lbp.py | 49 ++++ .../linear_regression_fast.py | 43 +++ .../linear_regression_robust.py | 45 +++ .../09-Feature-Detection/template_matching.py | 44 +++ .../Nicla-Vision/16-Codes/find_barcodes.py | 63 ++++ .../16-Codes/find_datamatrices.py | 25 ++ .../16-Codes/find_datamatrices_w_lens_zoom.py | 25 ++ .../16-Codes/qrcodes_with_lens_corr.py | 21 ++ .../16-Codes/qrcodes_with_lens_zoom.py | 21 ++ .../Nicla-Vision/19-Low-Power/deep_sleep.py | 26 ++ .../19-Low-Power/extint_wakeup.py | 21 ++ .../Nicla-Vision/19-Low-Power/stop_mode.py | 19 ++ .../in_memory_basic_frame_differencing.py | 46 +++ .../in_memory_structural_similarity.py | 38 +++ .../on_disk_basic_frame_differencing.py | 42 +++ .../on_disk_structural_similarity.py | 34 +++ .../absolute-rotation-scale.py | 67 +++++ .../22-Optical-Flow/absolute-translation.py | 55 ++++ .../differential-rotation-scale.py | 67 +++++ .../differential-translation.py | 55 ++++ .../image-patches-absolute-rotation-scale.py | 73 +++++ .../image-patches-absolute-translation.py | 69 +++++ ...age-patches-differential-rotation-scale.py | 73 +++++ .../image-patches-differential-translation.py | 69 +++++ .../25-Machine-Learning/nn_stm32cubeai.py | 34 +++ .../25-Machine-Learning/tf_face_collection.py | 31 ++ .../tf_face_recognition.py | 41 +++ .../tf_mobilenet_search_just_center.py | 66 +++++ .../tf_mobilenet_search_whole_window.py | 60 ++++ .../tf_object_detection.py | 51 ++++ .../tf_person_detection_search_just_center.py | 48 ++++ ...tf_person_detection_search_whole_window.py | 42 +++ .../26-April-Tags/find_apriltags.py | 55 ++++ .../26-April-Tags/find_apriltags_3d_pose.py | 55 ++++ .../26-April-Tags/find_apriltags_max_res.py | 56 ++++ .../find_apriltags_w_lens_zoom.py | 31 ++ .../26-April-Tags/find_small_apriltags.py | 67 +++++ ..._as_the_remote_device_for_your_computer.py | 87 ++++++ ..._as_the_remote_device_for_your_computer.py | 79 +++++ ...e_transfer_raw_as_the_controller_device.py | 129 +++++++++ ...image_transfer_raw_as_the_remote_device.py | 99 +++++++ ...pular_features_as_the_controller_device.py | 157 ++++++++++ .../popular_features_as_the_remote_device.py | 271 ++++++++++++++++++ .../36-Web-Servers/rtsp_video_server_lan.py | 79 +++++ .../36-Web-Servers/rtsp_video_server_wlan.py | 79 +++++ .../Nicla-Vision/37-Audio/audio_fft.py | 56 ++++ .../Arduino/Nicla-Vision/40-WiFi/connect.py | 19 ++ .../Arduino/Nicla-Vision/40-WiFi/dns.py | 20 ++ .../Nicla-Vision/40-WiFi/http_client.py | 39 +++ .../Nicla-Vision/40-WiFi/http_client_ssl.py | 49 ++++ .../Nicla-Vision/40-WiFi/mjpeg_streamer.py | 78 +++++ .../Arduino/Nicla-Vision/40-WiFi/mqtt_pub.py | 32 +++ .../Arduino/Nicla-Vision/40-WiFi/mqtt_sub.py | 39 +++ .../Arduino/Nicla-Vision/40-WiFi/ntp.py | 33 +++ .../Arduino/Nicla-Vision/40-WiFi/scan.py | 18 ++ .../Arduino/Nicla-Vision/40-WiFi/static_ip.py | 33 +++ .../41-Bluetooth/ble_temperature.py | 97 +++++++ 155 files changed, 6675 insertions(+) create mode 100644 scripts/examples/Arduino/Nicla-Vision/00-Basics/helloworld.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/00-Basics/main.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/01-Sensors/lsm6dsox_basic.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/01-Sensors/lsm6dsox_mlc.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/01-Sensors/vl53l1x_tof.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/02-Board-Control/adc_read.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/02-Board-Control/adc_read_int_channel.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/02-Board-Control/can.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/02-Board-Control/cpufreq_scaling.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/02-Board-Control/i2c_control.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/02-Board-Control/led_control.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/02-Board-Control/native_emitters.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/02-Board-Control/pin_control.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/02-Board-Control/pwm_control.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/02-Board-Control/rtc.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/02-Board-Control/spi_control.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/02-Board-Control/timer_control.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/02-Board-Control/uart_control.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/02-Board-Control/usb_hid.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/02-Board-Control/usb_vcp.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/02-Board-Control/vsync_gpio_output.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/03-Drawing/arrow_drawing.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/03-Drawing/circle_drawing.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/03-Drawing/copy2fb.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/03-Drawing/cross_drawing.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/03-Drawing/ellipse_drawing.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/03-Drawing/flood_fill.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_advanced.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_blending_test.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_blending_with_color_table_test.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_table_test.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_table_with_color_table_test.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_scale_down_test.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_scale_up_test.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_with_custom_palette.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/03-Drawing/keypoints_drawing.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/03-Drawing/line_drawing.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/03-Drawing/rectangle_drawing.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/03-Drawing/text_drawing.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/adaptive_histogram_equalization.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/blur_filter.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/cartoon_filter.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/color_bilateral_filter.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/color_binary_filter.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/color_light_removal.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/edge_filter.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/erode_and_dilate.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/gamma_correction.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/grayscale_bilateral_filter.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/grayscale_binary_filter.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/grayscale_light_removal.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/histogram_equalization.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/kernel_filters.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/lens_correction.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/linear_polar.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/log_polar.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mean_adaptive_threshold_filter.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mean_filter.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/median_adaptive_threshold_filter.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/median_filter.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/midpoint_adaptive_threshold_filter.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/midpoint_filter.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mode_adaptive_threshold_filter.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mode_filter.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/negative.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/perspective_and_rotation_correction.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/perspective_correction.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/rotation_correction.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/sharpen_filter.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/ulab.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/unsharp_filter.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/vflip_hmirror_transpose.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/05-Snapshot/emboss_snapshot.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/05-Snapshot/snapshot.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/05-Snapshot/snapshot_on_face_detection.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/05-Snapshot/snapshot_on_movement.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/05-Snapshot/time_lapse_photos.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/gif.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/gif_on_face_detection.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/gif_on_movement.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/imageio_memory.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/imageio_read.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/imageio_write.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/mjpeg.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/mjpeg_on_face_detection.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/mjpeg_on_movement.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/07-Face-Detection/face_detection.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/07-Face-Detection/face_recognition.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/07-Face-Detection/face_tracking.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/edges.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_circles.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_line_segments.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_lines.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_rects.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/hog.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/keypoints.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/keypoints_save.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/lbp.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/linear_regression_fast.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/linear_regression_robust.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/template_matching.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/16-Codes/find_barcodes.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/16-Codes/find_datamatrices.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/16-Codes/find_datamatrices_w_lens_zoom.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/16-Codes/qrcodes_with_lens_corr.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/16-Codes/qrcodes_with_lens_zoom.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/19-Low-Power/deep_sleep.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/19-Low-Power/extint_wakeup.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/19-Low-Power/stop_mode.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/in_memory_basic_frame_differencing.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/in_memory_structural_similarity.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/on_disk_basic_frame_differencing.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/on_disk_structural_similarity.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/absolute-rotation-scale.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/absolute-translation.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/differential-rotation-scale.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/differential-translation.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-absolute-rotation-scale.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-absolute-translation.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-differential-rotation-scale.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-differential-translation.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/nn_stm32cubeai.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_face_collection.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_face_recognition.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_mobilenet_search_just_center.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_mobilenet_search_whole_window.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_object_detection.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_person_detection_search_just_center.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_person_detection_search_whole_window.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_3d_pose.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_max_res.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_w_lens_zoom.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_small_apriltags.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/34-Remote-Control/image_transfer_jpg_streaming_as_the_remote_device_for_your_computer.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/34-Remote-Control/image_transfer_raw_as_the_controller_device.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/34-Remote-Control/image_transfer_raw_as_the_remote_device.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/34-Remote-Control/popular_features_as_the_controller_device.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/34-Remote-Control/popular_features_as_the_remote_device.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/36-Web-Servers/rtsp_video_server_lan.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/36-Web-Servers/rtsp_video_server_wlan.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/37-Audio/audio_fft.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/40-WiFi/connect.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/40-WiFi/dns.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/40-WiFi/http_client.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/40-WiFi/http_client_ssl.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/40-WiFi/mjpeg_streamer.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/40-WiFi/mqtt_pub.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/40-WiFi/mqtt_sub.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/40-WiFi/ntp.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/40-WiFi/scan.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/40-WiFi/static_ip.py create mode 100644 scripts/examples/Arduino/Nicla-Vision/41-Bluetooth/ble_temperature.py diff --git a/scripts/examples/Arduino/Nicla-Vision/00-Basics/helloworld.py b/scripts/examples/Arduino/Nicla-Vision/00-Basics/helloworld.py new file mode 100644 index 000000000..a18b84b21 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/00-Basics/helloworld.py @@ -0,0 +1,17 @@ +# Hello World Example +# +# Welcome to the OpenMV IDE! Click on the green run arrow button below to run the script! + +import sensor, image, time + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +while(True): + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected + # to the IDE. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/00-Basics/main.py b/scripts/examples/Arduino/Nicla-Vision/00-Basics/main.py new file mode 100644 index 000000000..461b2b7dd --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/00-Basics/main.py @@ -0,0 +1,33 @@ +# Main Module Example +# +# When your OpenMV Cam is disconnected from your computer it will either run the +# main.py script on the SD card (if attached) or the main.py script on +# your OpenMV Cam's internal flash drive. + +import time, pyb + +led = pyb.LED(3) # Red LED = 1, Green LED = 2, Blue LED = 3. +usb = pyb.USB_VCP() # This is a serial port object that allows you to communciate + # with your computer. While it is not open the code below runs. + +while(not usb.isconnected()): + led.on() + time.sleep_ms(150) + led.off() + time.sleep_ms(100) + led.on() + time.sleep_ms(150) + led.off() + time.sleep_ms(600) + +led = pyb.LED(2) # Switch to using the green LED. + +while(usb.isconnected()): + led.on() + time.sleep_ms(150) + led.off() + time.sleep_ms(100) + led.on() + time.sleep_ms(150) + led.off() + time.sleep_ms(600) diff --git a/scripts/examples/Arduino/Nicla-Vision/01-Sensors/lsm6dsox_basic.py b/scripts/examples/Arduino/Nicla-Vision/01-Sensors/lsm6dsox_basic.py new file mode 100644 index 000000000..9bba2c785 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/01-Sensors/lsm6dsox_basic.py @@ -0,0 +1,12 @@ +# LSM6DSOX Gyro example. +import time +from lsm6dsox import LSM6DSOX +from machine import I2C, SPI, Pin + +lsm = LSM6DSOX(SPI(5), cs_pin=Pin("PF6", Pin.OUT_PP, Pin.PULL_UP)) + +while (True): + print('Accelerometer: x:{:>8.3f} y:{:>8.3f} z:{:>8.3f}'.format(*lsm.read_accel())) + print('Gyroscope: x:{:>8.3f} y:{:>8.3f} z:{:>8.3f}'.format(*lsm.read_gyro())) + print("") + time.sleep_ms(100) diff --git a/scripts/examples/Arduino/Nicla-Vision/01-Sensors/lsm6dsox_mlc.py b/scripts/examples/Arduino/Nicla-Vision/01-Sensors/lsm6dsox_mlc.py new file mode 100644 index 000000000..96110acb9 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/01-Sensors/lsm6dsox_mlc.py @@ -0,0 +1,46 @@ +# LSM6DSOX IMU MLC (Machine Learning Core) Example. +# Download the raw UCF file, copy to storage and reset. + +# NOTE: The pre-trained models (UCF files) for the examples can be found here: +# https://github.com/STMicroelectronics/STMems_Machine_Learning_Core/tree/master/application_examples/lsm6dsox + +import time +from lsm6dsox import LSM6DSOX +from machine import I2C, SPI, Pin + +INT_MODE = True # Run in interrupt mode. +INT_FLAG = False # Set True on interrupt. + +def imu_int_handler(pin): + global INT_FLAG + INT_FLAG = True + +if (INT_MODE == True): + int_pin = Pin('PA1', mode=Pin.IN, pull=Pin.PULL_UP) + int_pin.irq(handler=imu_int_handler, trigger=Pin.IRQ_RISING) + +# Vibration detection example +UCF_FILE = "lsm6dsox_vibration_monitoring.ucf" +UCF_LABELS = {0:"no vibration", 1:"low vibration", 2:"high vibration"} +# NOTE: Selected data rate and scale must match the MLC data rate and scale. +lsm = LSM6DSOX(SPI(5), cs_pin=Pin("PF6", Pin.OUT_PP, Pin.PULL_UP), + gyro_odr=26, accel_odr=26, gyro_scale=2000, accel_scale=4, ucf=UCF_FILE) + +# Head gestures example +#UCF_FILE = "lsm6dsox_head_gestures.ucf" +#UCF_LABELS = {0:"Nod", 1:"Shake", 2:"Stationary", 3:"Swing", 4:"Walk"} +# NOTE: Selected data rate and scale must match the MLC data rate and scale. +#lsm = LSM6DSOX(SPI(5), cs_pin=Pin("PF6", Pin.OUT_PP, Pin.PULL_UP), +# gyro_odr=26, accel_odr=26, gyro_scale=250, accel_scale=2, ucf=UCF_FILE) + +print("MLC configured...") + +while (True): + if (INT_MODE): + if (INT_FLAG): + INT_FLAG=False + print(UCF_LABELS[lsm.read_mlc_output()[0]]) + else: + buf = lsm.read_mlc_output() + if (buf != None): + print(UCF_LABELS[buf[0]]) diff --git a/scripts/examples/Arduino/Nicla-Vision/01-Sensors/vl53l1x_tof.py b/scripts/examples/Arduino/Nicla-Vision/01-Sensors/vl53l1x_tof.py new file mode 100644 index 000000000..e8ef75464 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/01-Sensors/vl53l1x_tof.py @@ -0,0 +1,11 @@ +# VL53L1X ToF sensor basic distance measurement example. +from machine import I2C +from vl53l1x import VL53L1X +import time + +tof = VL53L1X(I2C(2)) + +while True: + print(f"Distance: {tof.read()}mm") + time.sleep_ms(50) + diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/adc_read.py b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/adc_read.py new file mode 100644 index 000000000..20fec14b3 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/adc_read.py @@ -0,0 +1,13 @@ +# ADC Read Example. +# +# This example shows how to use the ADC to read an analog pin. + +import time +from pyb import ADC + +adc = ADC("A0") + +while(True): + # The ADC has 12-bits of resolution for 4096 values. + print("ADC = %fv" % ((adc.read() * 3.3) / 4095)) + time.sleep_ms(100) diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/adc_read_int_channel.py b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/adc_read_int_channel.py new file mode 100644 index 000000000..4e913870d --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/adc_read_int_channel.py @@ -0,0 +1,8 @@ +# ADC Internal Channels Example +# +# This example shows how to read internal ADC channels. + +import time, pyb + +adc = pyb.ADCAll(12) +print("VREF = %.1fv VBAT = %.1fv Temp = %d" % (adc.read_core_vref(), adc.read_core_vbat(), adc.read_core_temp())) diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/can.py b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/can.py new file mode 100644 index 000000000..59955bad2 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/can.py @@ -0,0 +1,36 @@ +# CAN Example +# +# This example demonstrates CAN communications between two cameras. +# NOTE: you need two CAN transceiver shields and DB9 cable to run this example. + +import time, omv +from pyb import CAN + +# NOTE: Set to False on receiving node. +TRANSMITTER = True + +can = CAN(1, CAN.NORMAL, baudrate=125_000, sample_point=75) +# NOTE: uncomment to set bit timing manually, for example: +#can.init(CAN.NORMAL, prescaler=32, sjw=1, bs1=8, bs2=3) +can.restart() + +if (TRANSMITTER): + while (True): + # Send message with id 1 + can.send('Hello', 1) + time.sleep_ms(1000) + +else: + # Runs on the receiving node. + if (omv.board_type() == 'H7'): # FDCAN + # Set a filter to receive messages with id=1 -> 4 + # Filter index, mode (RANGE, DUAL or MASK), FIFO (0 or 1), params + can.setfilter(0, CAN.RANGE, 0, (1, 4)) + else: + # Set a filter to receive messages with id=1, 2, 3 and 4 + # Filter index, mode (LIST16, etc..), FIFO (0 or 1), params + can.setfilter(0, CAN.LIST16, 0, (1, 2, 3, 4)) + + while (True): + # Receive messages on FIFO 0 + print(can.recv(0, timeout=10000)) diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/cpufreq_scaling.py b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/cpufreq_scaling.py new file mode 100644 index 000000000..963810575 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/cpufreq_scaling.py @@ -0,0 +1,24 @@ +# CPU frequency scaling example. +# +# This example shows how to use the cpufreq module to change the CPU frequency on the fly. +import sensor, image, time, cpufreq + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +clock = time.clock() # Create a clock object to track the FPS. + +def test_image_processing(): + for i in range(0, 50): + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + img.find_edges(image.EDGE_CANNY, threshold=(50, 80)) + +print("\nFrequency Scaling Test...") +for f in cpufreq.get_supported_frequencies(): + print("Testing CPU Freq: %dMHz..." %(f)) + cpufreq.set_frequency(f) + clock.reset() + test_image_processing() + freqs = cpufreq.get_current_frequencies() + print("CPU Freq:%dMHz HCLK:%dMhz PCLK1:%dMhz PCLK2:%dMhz FPS:%.2f" %(freqs[0], freqs[1], freqs[2], freqs[3], clock.fps())) diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/i2c_control.py b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/i2c_control.py new file mode 100644 index 000000000..2d0ca10d9 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/i2c_control.py @@ -0,0 +1,19 @@ +# I2C Control +# +# This example shows how to use the i2c bus on your OpenMV Cam by dumping the +# contents on a standard EEPROM. To run this example either connect the +# Thermopile Shield to your OpenMV Cam or an I2C EEPROM to your OpenMV Cam. + +from pyb import I2C + +i2c = I2C(1, I2C.MASTER) +mem = i2c.mem_read(256, 0x50, 0) # The eeprom slave address is 0x50. + +print("\n[") +for i in range(16): + print("\t[", end='') + for j in range(16): + print("%03d" % mem[(i*16)+j], end='') + if j != 15: print(", ", end='') + print("]," if i != 15 else "]") +print("]") diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/led_control.py b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/led_control.py new file mode 100644 index 000000000..81a25b302 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/led_control.py @@ -0,0 +1,22 @@ +# LED Control Example +# +# This example shows how to control the RGB LED. +import time +from pyb import LED + +red_led = LED(1) +green_led = LED(2) +blue_led = LED(3) + +def led_control(x): + if (x&1)==0: red_led.off() + elif (x&1)==1: red_led.on() + if (x&2)==0: green_led.off() + elif (x&2)==2: green_led.on() + if (x&4)==0: blue_led.off() + elif (x&4)==4: blue_led.on() + +while(True): + for i in range(16): + led_control(i) + time.sleep_ms(500) diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/native_emitters.py b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/native_emitters.py new file mode 100644 index 000000000..3839297fd --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/native_emitters.py @@ -0,0 +1,19 @@ +import time + +@micropython.asm_thumb +def asm(): + movw(r0, 42) + +@micropython.viper +def viper(a, b): + return a + b + +@micropython.native +def native(a, b): + return a + b + + +print(asm()) +print(viper(1, 2)) +print(native(1, 2)) + diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/pin_control.py b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/pin_control.py new file mode 100644 index 000000000..91a241b89 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/pin_control.py @@ -0,0 +1,13 @@ +# Pin Control Example +# +# This example shows how to use the I/O pins in GPIO mode. + +from pyb import Pin + +# Connect a switch to pin 0 that will pull it low when the switch is closed. +# Pin 1 will then light up. +pin0 = Pin('GPIO1', Pin.IN, Pin.PULL_UP) +pin1 = Pin('GPIO2', Pin.OUT_PP, Pin.PULL_NONE) + +while(True): + pin1.value(not pin0.value()) diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/pwm_control.py b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/pwm_control.py new file mode 100644 index 000000000..8c67c0d42 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/pwm_control.py @@ -0,0 +1,30 @@ +# PWM Control Example +# +# This example shows how to use PWM. + +import time +from pyb import Pin, Timer + +class PWM(): + def __init__(self, pin, tim, ch): + self.pin = pin + self.tim = tim + self.ch = ch; + +pwms = { + 'PWM1' : PWM('PE12', 1, 1), + 'PWM2' : PWM('PE11', 1, 2), +# 'PWM3' : PWM('PA9', 1, 2), + 'PWM3' : PWM('PA10', 1, 3), + 'PWM4' : PWM('PE14', 1, 4), + 'PWM5' : PWM('PB8', 4, 3), + 'PWM6' : PWM('PB9', 4, 4), +} + +# Generate a 1KHz square wave with 50% cycle on the following PWM. +for k, pwm in pwms.items(): + tim = Timer(pwm.tim, freq=1000) # Frequency in Hz + ch = tim.channel(pwm.ch, Timer.PWM, pin=Pin(pwm.pin), pulse_width_percent=50) + +while (True): + time.sleep_ms(1000) diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/rtc.py b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/rtc.py new file mode 100644 index 000000000..cfd2bbefc --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/rtc.py @@ -0,0 +1,12 @@ +# RTC Example +# +# This example shows how to use the RTC. +import time +from pyb import RTC + +rtc = RTC() +rtc.datetime((2013, 7, 9, 2, 0, 0, 0, 0)) + +while (True): + print(rtc.datetime()) + time.sleep_ms(1000) diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/spi_control.py b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/spi_control.py new file mode 100644 index 000000000..d7ab35ae3 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/spi_control.py @@ -0,0 +1,73 @@ +# SPI Control +# +# This example shows how to use the SPI bus to control the +# 1.8" TFT LCD display (JD-T18003-T01) with ST7735R driver. + +import sensor, image, time +from pyb import Pin, SPI + +cs = Pin("GPIO1", Pin.OUT_OD) +rst = Pin("GPIO2", Pin.OUT_PP) +rs = Pin("GPIO3", Pin.OUT_PP) + +# NOTE: The SPI clock frequency will not always be the requested frequency. The hardware only supports +# frequencies that are the bus frequency divided by a prescaler (which can be 2, 4, 8, 16, 32, 64, 128 or 256). +spi = SPI(4, SPI.MASTER, baudrate=int(1000000000/66), polarity=0, phase=0) + +def write_command_byte(c): + cs.low() + rs.low() + spi.send(c) + cs.high() + +def write_data_byte(c): + cs.low() + rs.high() + spi.send(c) + cs.high() + +def write_command(c, *data): + write_command_byte(c) + if data: + for d in data: write_data_byte(d) + +def write_image(img): + cs.low() + rs.high() + spi.send(img) + cs.high() + +# Reset the LCD. +rst.low() +time.sleep_ms(100) +rst.high() +time.sleep_ms(100) + +write_command(0x11) # Sleep Exit +time.sleep_ms(120) + +# Memory Data Access Control +# Write 0xC8 for BGR mode. +write_command(0x36, 0xC0) + +# Interface Pixel Format +write_command(0x3A, 0x05) + +# Display On +write_command(0x29) + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QQVGA2) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + write_command(0x2C) # Write image command... + write_image(img) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/timer_control.py b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/timer_control.py new file mode 100644 index 000000000..5ccdbe99f --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/timer_control.py @@ -0,0 +1,19 @@ +# Timer Control Example +# +# This example shows how to use a timer for callbacks. + +import time +from pyb import Pin, Timer, LED + +blue_led = LED(3) + +# we will receive the timer object when being called +# Note: functions that allocate memory are Not allowed in callbacks +def tick(timer): + blue_led.toggle() + +tim = Timer(2, freq=1) # create a timer object using timer 2 - trigger at 1Hz +tim.callback(tick) # set the callback to our tick function + +while (True): + time.sleep_ms(1000) diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/uart_control.py b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/uart_control.py new file mode 100644 index 000000000..7eff664f0 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/uart_control.py @@ -0,0 +1,13 @@ +# UART Control +# +# This example shows how to use the serial port on your OpenMV Cam. + +import time +from pyb import UART + +# Init UART object. +uart = UART(4, 19200) + +while(True): + uart.write("Hello World!\r") + time.sleep_ms(1000) diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/usb_hid.py b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/usb_hid.py new file mode 100644 index 000000000..32adf34e3 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/usb_hid.py @@ -0,0 +1,31 @@ +# Making OpenMV Camera act as a Mouse using HID. +# +# First we need to create boot.py file to change the default USB mode (VCP+MSC). +# Note: It is recommended to save this file to uSD card not the flash storage. +# This will make it easier to restore the default OpenMV (VCP+MSC) USB mode later +# by just deleting boot.py from uSD using the PC. +# +# Add the following script to boot.py: +# +##import pyb #(UNCOMMENT THIS LINE!) +##pyb.usb_mode('VCP+HID') # serial device + mouse (UNCOMMENT THIS LINE!) +##pyb.usb_mode('VCP+MSC') # serial device + storage device (default) +##pyb.usb_mode('VCP+HID', hid=pyb.hid_keyboard) # serial device + keyboard +# +# Copy boot.py to the root of the uSD card and restart the camera, it should now +# act as a serial device and a mouse. +# +# Connect to the camera using the IDE and run this script, you should see the mouse move. +# +# Note: To restore the default VCP+MSC USB mode, either use the PC to remove boot.py +# from the uSD card, or use the following Python line: import os; os.remove('boot.py') + +import pyb, time + +hid = pyb.USB_HID() + +while(True): + # x, y and scroll + # move 10 pixels to the right + hid.send((0, 10, 0, 0)) + time.sleep_ms(500) diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/usb_vcp.py b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/usb_vcp.py new file mode 100644 index 000000000..159cfd101 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/usb_vcp.py @@ -0,0 +1,37 @@ +# USB VCP example. +# This example shows how to use the USB VCP class to send an image to PC on demand. +# +# WARNING: +# This script should NOT be run from the IDE or command line, it should be saved as main.py +# Note the following commented script shows how to receive the image from the host side. +# +# #!/usr/bin/env python2.7 +# import sys, serial, struct +# port = '/dev/ttyACM0' +# sp = serial.Serial(port, baudrate=115200, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, +# xonxoff=False, rtscts=False, stopbits=serial.STOPBITS_ONE, timeout=None, dsrdtr=True) +# sp.setDTR(True) # dsrdtr is ignored on Windows. +# sp.write("snap") +# sp.flush() +# size = struct.unpack('> 8) & 3 + # To test combining different formats + if (image_format==1): small_img = small_img.to_bitmap(copy=True); status = 'bitmap ' + if (image_format==2): small_img = small_img.to_grayscale(copy=True); status = 'grayscale ' + if (image_format==3): small_img = small_img.to_rgb565(copy=True); status = 'rgb565 ' + + # update small image location + if BOUNCE: + x = x + xd + if (xxmax): + xd = -xd + + y = y + yd + if (yymax): + yd = -yd + + # Update small image scale + if RESCALE: + rescale = rescale + rd + if (rescalemax_rescale): + rd = -rd + + # Find the center of the image + scaled_width = int(small_img.width() * abs(rescale)) + scaled_height= int(small_img.height() * abs(rescale)) + + apply_mask = CYCLE_MASK and ((value_mixer >> 9) & 1) + if apply_mask: + img.draw_image(small_img, int(x), int(y), mask=small_img.to_bitmap(copy=True), x_scale=rescale, y_scale=rescale, alpha=240, hint=image.IMAGE_HINT_BILINEAR | image.IMAGE_HINT_CENTER) + status += 'alpha:240 ' + status += '+mask ' + else: + img.draw_image(small_img, int(x), int(y), x_scale=rescale, y_scale=rescale, alpha=128, hint=image.IMAGE_HINT_BILINEAR | image.IMAGE_HINT_CENTER) + status += 'alpha:128 ' + + img.draw_string(8, 0, status, mono_space = False) + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_blending_test.py b/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_blending_test.py new file mode 100644 index 000000000..8cae0107a --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_blending_test.py @@ -0,0 +1,71 @@ +# Image Drawing Alpha Blending Test +# +# This script tests the performance and quality of the draw_image() +# method which can perform nearest neighbor, bilinear, bicubic, and +# area scaling along with color channel extraction, alpha blending, +# color palette application, and alpha palette application. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) + +hint = image.BICUBIC # image.BILINEAR image.BICUBIC + +small_img = image.Image(4, 4, sensor.RGB565) +small_img.set_pixel(0, 0, (0, 0, 127)) +small_img.set_pixel(1, 0, (47, 255, 199)) +small_img.set_pixel(2, 0, (0, 188, 255)) +small_img.set_pixel(3, 0, (0, 0, 127)) +small_img.set_pixel(0, 1, (0, 176, 255)) +small_img.set_pixel(1, 1, (222, 0, 0 )) +small_img.set_pixel(2, 1, (50, 255, 195)) +small_img.set_pixel(3, 1, (86, 255, 160)) +small_img.set_pixel(0, 2, (255, 211, 0 )) +small_img.set_pixel(1, 2, (83, 255, 163)) +small_img.set_pixel(2, 2, (255, 211, 0)) +small_img.set_pixel(3, 2, (0, 80, 255)) +small_img.set_pixel(0, 3, (255, 118, 0 )) +small_img.set_pixel(1, 3, (127, 0, 0 )) +small_img.set_pixel(2, 3, (0, 144, 255)) +small_img.set_pixel(3, 3, (50, 255, 195)) +#small_img.to_grayscale() +#small_img.to_bitmap() + +big_img = image.Image(128, 128, sensor.RGB565) +big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint) +#big_img.to_grayscale() +#big_img.to_bitmap() + +alpha_div = 1 +alpha_value = 0 +alpha_step = 2 + +x_bounce = sensor.width()//2 +x_bounce_toggle = 1 + +y_bounce = sensor.height()//2 +y_bounce_toggle = 1 + +clock = time.clock() +while(True): + clock.tick() + + img = sensor.snapshot() + #img.to_grayscale() + #img.to_bitmap() + img.draw_image(big_img, x_bounce, y_bounce, + rgb_channel=-1, alpha=alpha_value//alpha_div, + hint=hint|image.CENTER) + + x_bounce += x_bounce_toggle + if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle + + y_bounce += y_bounce_toggle + if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle + + alpha_value += alpha_step + if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_blending_with_color_table_test.py b/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_blending_with_color_table_test.py new file mode 100644 index 000000000..a3f70c0a6 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_blending_with_color_table_test.py @@ -0,0 +1,81 @@ +# Image Drawing Color Table with Alpha Blending Test +# +# This script tests the performance and quality of the draw_image() +# method which can perform nearest neighbor, bilinear, bicubic, and +# area scaling along with color channel extraction, alpha blending, +# color palette application, and alpha palette application. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) + +hint = image.BICUBIC # image.BILINEAR image.BICUBIC + +# RGB channel extraction is done after scaling normally, this +# may produce false colors. Set this flag to do it before. +# +hint |= 0 # image.EXTRACT_RGB_CHANNEL_FIRST + +# Color table application is done after scaling normally, this +# may produce false colors. Set this flag to do it before. +# +hint |= 0 # image.APPLY_COLOR_PALETTE_FIRST + +small_img = image.Image(4, 4, sensor.RGB565) +small_img.set_pixel(0, 0, (0, 0, 127)) +small_img.set_pixel(1, 0, (47, 255, 199)) +small_img.set_pixel(2, 0, (0, 188, 255)) +small_img.set_pixel(3, 0, (0, 0, 127)) +small_img.set_pixel(0, 1, (0, 176, 255)) +small_img.set_pixel(1, 1, (222, 0, 0 )) +small_img.set_pixel(2, 1, (50, 255, 195)) +small_img.set_pixel(3, 1, (86, 255, 160)) +small_img.set_pixel(0, 2, (255, 211, 0 )) +small_img.set_pixel(1, 2, (83, 255, 163)) +small_img.set_pixel(2, 2, (255, 211, 0)) +small_img.set_pixel(3, 2, (0, 80, 255)) +small_img.set_pixel(0, 3, (255, 118, 0 )) +small_img.set_pixel(1, 3, (127, 0, 0 )) +small_img.set_pixel(2, 3, (0, 144, 255)) +small_img.set_pixel(3, 3, (50, 255, 195)) +#small_img.to_grayscale() +#small_img.to_bitmap() + +big_img = image.Image(128, 128, sensor.RGB565) +big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint) +#big_img.to_grayscale() +#big_img.to_bitmap() + +alpha_div = 1 +alpha_value = 0 +alpha_step = 2 + +x_bounce = sensor.width()//2 +x_bounce_toggle = 1 + +y_bounce = sensor.height()//2 +y_bounce_toggle = 1 + +clock = time.clock() +while(True): + clock.tick() + + img = sensor.snapshot() + #img.to_grayscale() + #img.to_bitmap() + img.draw_image(big_img, x_bounce, y_bounce, + rgb_channel=-1, alpha=alpha_value//alpha_div, + color_palette=sensor.PALETTE_IRONBOW, hint=hint|image.CENTER) + + x_bounce += x_bounce_toggle + if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle + + y_bounce += y_bounce_toggle + if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle + + alpha_value += alpha_step + if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_table_test.py b/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_table_test.py new file mode 100644 index 000000000..7ca783fe9 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_table_test.py @@ -0,0 +1,75 @@ +# Image Drawing Alpha Table Test +# +# This script tests the performance and quality of the draw_image() +# method which can perform nearest neighbor, bilinear, bicubic, and +# area scaling along with color channel extraction, alpha blending, +# color palette application, and alpha palette application. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) + +hint = image.BICUBIC # image.BILINEAR image.BICUBIC + +small_img = image.Image(4, 4, sensor.RGB565) +small_img.set_pixel(0, 0, (0, 0, 127)) +small_img.set_pixel(1, 0, (47, 255, 199)) +small_img.set_pixel(2, 0, (0, 188, 255)) +small_img.set_pixel(3, 0, (0, 0, 127)) +small_img.set_pixel(0, 1, (0, 176, 255)) +small_img.set_pixel(1, 1, (222, 0, 0 )) +small_img.set_pixel(2, 1, (50, 255, 195)) +small_img.set_pixel(3, 1, (86, 255, 160)) +small_img.set_pixel(0, 2, (255, 211, 0 )) +small_img.set_pixel(1, 2, (83, 255, 163)) +small_img.set_pixel(2, 2, (255, 211, 0)) +small_img.set_pixel(3, 2, (0, 80, 255)) +small_img.set_pixel(0, 3, (255, 118, 0 )) +small_img.set_pixel(1, 3, (127, 0, 0 )) +small_img.set_pixel(2, 3, (0, 144, 255)) +small_img.set_pixel(3, 3, (50, 255, 195)) +#small_img.to_grayscale() +#small_img.to_bitmap() + +big_img = image.Image(128, 128, sensor.RGB565) +big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint) +#big_img.to_grayscale() +#big_img.to_bitmap() + +alpha_lut = image.Image(256, 1, sensor.GRAYSCALE) +for i in range(256): + alpha_lut.set_pixel(i, 0, 255 if i > 127 else 0) + +alpha_div = 1 +alpha_value = 0 +alpha_step = 2 + +x_bounce = sensor.width()//2 +x_bounce_toggle = 1 + +y_bounce = sensor.height()//2 +y_bounce_toggle = 1 + +clock = time.clock() +while(True): + clock.tick() + + img = sensor.snapshot() + #img.to_grayscale() + #img.to_bitmap() + img.draw_image(big_img, x_bounce, y_bounce, + rgb_channel=-1, alpha=alpha_value//alpha_div, + alpha_palette=alpha_lut, hint=hint|image.CENTER) + + x_bounce += x_bounce_toggle + if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle + + y_bounce += y_bounce_toggle + if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle + + alpha_value += alpha_step + if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_table_with_color_table_test.py b/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_table_with_color_table_test.py new file mode 100644 index 000000000..014a2c1ce --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_table_with_color_table_test.py @@ -0,0 +1,85 @@ +# Image Drawing Color Table with Alpha Table Test +# +# This script tests the performance and quality of the draw_image() +# method which can perform nearest neighbor, bilinear, bicubic, and +# area scaling along with color channel extraction, alpha blending, +# color palette application, and alpha palette application. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) + +hint = image.BICUBIC # image.BILINEAR image.BICUBIC + +# RGB channel extraction is done after scaling normally, this +# may produce false colors. Set this flag to do it before. +# +hint |= 0 # image.EXTRACT_RGB_CHANNEL_FIRST + +# Color table application is done after scaling normally, this +# may produce false colors. Set this flag to do it before. +# +hint |= 0 # image.APPLY_COLOR_PALETTE_FIRST + +small_img = image.Image(4, 4, sensor.RGB565) +small_img.set_pixel(0, 0, (0, 0, 127)) +small_img.set_pixel(1, 0, (47, 255, 199)) +small_img.set_pixel(2, 0, (0, 188, 255)) +small_img.set_pixel(3, 0, (0, 0, 127)) +small_img.set_pixel(0, 1, (0, 176, 255)) +small_img.set_pixel(1, 1, (222, 0, 0 )) +small_img.set_pixel(2, 1, (50, 255, 195)) +small_img.set_pixel(3, 1, (86, 255, 160)) +small_img.set_pixel(0, 2, (255, 211, 0 )) +small_img.set_pixel(1, 2, (83, 255, 163)) +small_img.set_pixel(2, 2, (255, 211, 0)) +small_img.set_pixel(3, 2, (0, 80, 255)) +small_img.set_pixel(0, 3, (255, 118, 0 )) +small_img.set_pixel(1, 3, (127, 0, 0 )) +small_img.set_pixel(2, 3, (0, 144, 255)) +small_img.set_pixel(3, 3, (50, 255, 195)) +#small_img.to_grayscale() +#small_img.to_bitmap() + +big_img = image.Image(128, 128, sensor.RGB565) +big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint) +#big_img.to_grayscale() +#big_img.to_bitmap() + +alpha_lut = image.Image(256, 1, sensor.GRAYSCALE) +for i in range(256): + alpha_lut.set_pixel(i, 0, 255 if i > 127 else 0) + +alpha_div = 1 +alpha_value = 0 +alpha_step = 2 + +x_bounce = sensor.width()//2 +x_bounce_toggle = 1 + +y_bounce = sensor.height()//2 +y_bounce_toggle = 1 + +clock = time.clock() +while(True): + clock.tick() + + img = sensor.snapshot() + #img.to_grayscale() + #img.to_bitmap() + img.draw_image(big_img, x_bounce, y_bounce, + rgb_channel=-1, alpha=alpha_value//alpha_div, + color_palette=sensor.PALETTE_IRONBOW, alpha_palette=alpha_lut, hint=hint|image.CENTER) + + x_bounce += x_bounce_toggle + if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle + + y_bounce += y_bounce_toggle + if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle + + alpha_value += alpha_step + if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_scale_down_test.py b/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_scale_down_test.py new file mode 100644 index 000000000..7b4f9a67c --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_scale_down_test.py @@ -0,0 +1,69 @@ +# Image Scaling Down Drawing Test +# +# This script tests the performance and quality of the draw_image() +# method which can perform nearest neighbor, bilinear, bicubic, and +# area scaling along with color channel extraction, alpha blending, +# color palette application, and alpha palette application. + +# DISABLE THE FRAME BUFFER TO SEE THE REAL FPS + +import sensor, image, time + +up_hint = 0 # image.BILINEAR image.BICUBIC +down_hint = image.AREA # image.BILINEAR image.BICUBIC image.AREA + +bounce_div = 128 + +medium_img = image.Image(32, 32, sensor.RGB565, copy_to_fb=True) +#medium_img.to_grayscale() +#medium_img.to_bitmap() + +small_img = image.Image(4, 4, sensor.RGB565) +small_img.set_pixel(0, 0, (0, 0, 127)) +small_img.set_pixel(1, 0, (47, 255, 199)) +small_img.set_pixel(2, 0, (0, 188, 255)) +small_img.set_pixel(3, 0, (0, 0, 127)) +small_img.set_pixel(0, 1, (0, 176, 255)) +small_img.set_pixel(1, 1, (222, 0, 0 )) +small_img.set_pixel(2, 1, (50, 255, 195)) +small_img.set_pixel(3, 1, (86, 255, 160)) +small_img.set_pixel(0, 2, (255, 211, 0 )) +small_img.set_pixel(1, 2, (83, 255, 163)) +small_img.set_pixel(2, 2, (255, 211, 0)) +small_img.set_pixel(3, 2, (0, 80, 255)) +small_img.set_pixel(0, 3, (255, 118, 0 )) +small_img.set_pixel(1, 3, (127, 0, 0 )) +small_img.set_pixel(2, 3, (0, 144, 255)) +small_img.set_pixel(3, 3, (50, 255, 195)) +#small_img.to_grayscale() +#small_img.to_bitmap() + +big_img = image.Image(128, 128, sensor.RGB565) +big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=up_hint) +#big_img.to_grayscale() +#big_img.to_bitmap() + +x_bounce = 0 +x_bounce_toggle = 0 + +y_bounce = 0 +y_bounce_toggle = 0 + +clock = time.clock() +while(True): + clock.tick() + + medium_img.clear() + medium_img.draw_image(big_img, + x_bounce // bounce_div, y_bounce // bounce_div, + x_scale=0.25, y_scale=0.25, + hint=down_hint) + sensor.flush() + + x_bounce += x_bounce_toggle + if abs(x_bounce // bounce_div) >= (medium_img.width()*1.1): x_bounce_toggle = -x_bounce_toggle + + y_bounce += y_bounce_toggle + if abs(y_bounce // bounce_div) >= (medium_img.height()*1.1): y_bounce_toggle = -y_bounce_toggle + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_scale_up_test.py b/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_scale_up_test.py new file mode 100644 index 000000000..92bcf91ee --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_scale_up_test.py @@ -0,0 +1,63 @@ +# Image Scaling Up Drawing Test +# +# This script tests the performance and quality of the draw_image() +# method which can perform nearest neighbor, bilinear, bicubic, and +# area scaling along with color channel extraction, alpha blending, +# color palette application, and alpha palette application. + +# DISABLE THE FRAME BUFFER TO SEE THE REAL FPS + +import sensor, image, time + +hint = 0 # image.BILINEAR image.BICUBIC + +bounce_div = 32 + +big_img = image.Image(128, 128, sensor.RGB565, copy_to_fb=True) +#big_img.to_grayscale() +#big_img.to_bitmap() + +small_img = image.Image(4, 4, sensor.RGB565) +small_img.set_pixel(0, 0, (0, 0, 127)) +small_img.set_pixel(1, 0, (47, 255, 199)) +small_img.set_pixel(2, 0, (0, 188, 255)) +small_img.set_pixel(3, 0, (0, 0, 127)) +small_img.set_pixel(0, 1, (0, 176, 255)) +small_img.set_pixel(1, 1, (222, 0, 0 )) +small_img.set_pixel(2, 1, (50, 255, 195)) +small_img.set_pixel(3, 1, (86, 255, 160)) +small_img.set_pixel(0, 2, (255, 211, 0 )) +small_img.set_pixel(1, 2, (83, 255, 163)) +small_img.set_pixel(2, 2, (255, 211, 0)) +small_img.set_pixel(3, 2, (0, 80, 255)) +small_img.set_pixel(0, 3, (255, 118, 0 )) +small_img.set_pixel(1, 3, (127, 0, 0 )) +small_img.set_pixel(2, 3, (0, 144, 255)) +small_img.set_pixel(3, 3, (50, 255, 195)) +#small_img.to_grayscale() +#small_img.to_bitmap() + +x_bounce = 0 +x_bounce_toggle = 0 + +y_bounce = 0 +y_bounce_toggle = 0 + +clock = time.clock() +while(True): + clock.tick() + + big_img.clear() + big_img.draw_image(small_img, + x_bounce // bounce_div, y_bounce // bounce_div, + x_scale=32, y_scale=32, + hint=hint) + sensor.flush() + + x_bounce += x_bounce_toggle + if abs(x_bounce // bounce_div) >= (big_img.width()*1.1): x_bounce_toggle = -x_bounce_toggle + + y_bounce += y_bounce_toggle + if abs(y_bounce // bounce_div) >= (big_img.height()*1.1): y_bounce_toggle = -y_bounce_toggle + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_with_custom_palette.py b/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_with_custom_palette.py new file mode 100644 index 000000000..d379b3342 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_with_custom_palette.py @@ -0,0 +1,43 @@ +# Draw Image Example with custom color palette +# +# This example shows off how to draw images in the frame buffer with a custom generated color palette. + +import sensor, image, time, pyb + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) # or GRAYSCALE... +sensor.set_framesize(sensor.QQVGA) # or QQVGA... +sensor.skip_frames(time = 2000) +clock = time.clock() + +# the color palette is actually an image, this allows you to use image ops to create palettes +# the image must have 256 entries i.e. 256x1, 64x4, 16x16 and have the format rgb565 + +# Initialise palette source colors into an image +palette_source_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 0, 255)] +palette_source_color_image = image.Image(len(palette_source_colors), 1, sensor.RGB565) +for i, color in enumerate(palette_source_colors): + palette_source_color_image[i] = color + +# Scale the image to palette width and smooth them +palette = image.Image(256,1, sensor.RGB565) +palette.draw_image(palette_source_color_image, 0, 0, x_scale=palette.width() / palette_source_color_image.width()) +palette.mean(int(palette.width() / palette_source_color_image.width()/2)) + +while(True): + clock.tick() + + img = sensor.snapshot() + # Get a copy of grayscale image before converting to color + img_copy = img.copy() + + img.to_rgb565() + + palette_boundary_inset = int(sensor.width() / 40) + palette_scale_x = (sensor.width() - palette_boundary_inset * 2) / palette.width() + + img.draw_image(img_copy, 0, 0, color_palette=palette) + img.draw_image(palette, palette_boundary_inset, palette_boundary_inset, x_scale=palette_scale_x, y_scale=8) + img.draw_rectangle(palette_boundary_inset, palette_boundary_inset, int(palette.width()*palette_scale_x), 8, color=(255,255,255), thickness=1) + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/keypoints_drawing.py b/scripts/examples/Arduino/Nicla-Vision/03-Drawing/keypoints_drawing.py new file mode 100644 index 000000000..c41a8161d --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/03-Drawing/keypoints_drawing.py @@ -0,0 +1,31 @@ +# Keypoints Drawing +# +# This example shows off drawing keypoints on the OpenMV Cam. Usually you call draw_keypoints() +# on a keypoints object but you can also call it on a list of 3-value tuples... + +import sensor, image, time, pyb + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... +sensor.set_framesize(sensor.QVGA) # or QQVGA... +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + + img = sensor.snapshot() + + for i in range(20): + x = (pyb.rng() % (2*img.width())) - (img.width()//2) + y = (pyb.rng() % (2*img.height())) - (img.height()//2) + rot = pyb.rng() % 360 + + r = (pyb.rng() % 127) + 128 + g = (pyb.rng() % 127) + 128 + b = (pyb.rng() % 127) + 128 + + # This method draws a keypoints object or a list of (x, y, rot) tuples... + img.draw_keypoints([(x, y, rot)], color = (r, g, b), size = 20, thickness = 2, fill = False) + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/line_drawing.py b/scripts/examples/Arduino/Nicla-Vision/03-Drawing/line_drawing.py new file mode 100644 index 000000000..eb2d761bd --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/03-Drawing/line_drawing.py @@ -0,0 +1,31 @@ +# Line Drawing +# +# This example shows off drawing lines on the OpenMV Cam. + +import sensor, image, time, pyb + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... +sensor.set_framesize(sensor.QVGA) # or QQVGA... +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + + img = sensor.snapshot() + + for i in range(10): + x0 = (pyb.rng() % (2*img.width())) - (img.width()//2) + y0 = (pyb.rng() % (2*img.height())) - (img.height()//2) + x1 = (pyb.rng() % (2*img.width())) - (img.width()//2) + y1 = (pyb.rng() % (2*img.height())) - (img.height()//2) + r = (pyb.rng() % 127) + 128 + g = (pyb.rng() % 127) + 128 + b = (pyb.rng() % 127) + 128 + + # If the first argument is a scaler then this method expects + # to see x0, y0, x1, and y1. Otherwise, it expects a (x0,y0,x1,y1) tuple. + img.draw_line(x0, y0, x1, y1, color = (r, g, b), thickness = 2) + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/rectangle_drawing.py b/scripts/examples/Arduino/Nicla-Vision/03-Drawing/rectangle_drawing.py new file mode 100644 index 000000000..ab2afac66 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/03-Drawing/rectangle_drawing.py @@ -0,0 +1,31 @@ +# Rectangle Drawing +# +# This example shows off drawing rectangles on the OpenMV Cam. + +import sensor, image, time, pyb + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... +sensor.set_framesize(sensor.QVGA) # or QQVGA... +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + + img = sensor.snapshot() + + for i in range(10): + x = (pyb.rng() % (2*img.width())) - (img.width()//2) + y = (pyb.rng() % (2*img.height())) - (img.height()//2) + w = (pyb.rng() % (img.width()//2)) + h = (pyb.rng() % (img.height()//2)) + r = (pyb.rng() % 127) + 128 + g = (pyb.rng() % 127) + 128 + b = (pyb.rng() % 127) + 128 + + # If the first argument is a scaler then this method expects + # to see x, y, w, and h. Otherwise, it expects a (x,y,w,h) tuple. + img.draw_rectangle(x, y, w, h, color = (r, g, b), thickness = 2, fill = False) + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/text_drawing.py b/scripts/examples/Arduino/Nicla-Vision/03-Drawing/text_drawing.py new file mode 100644 index 000000000..da37af656 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/03-Drawing/text_drawing.py @@ -0,0 +1,33 @@ +# Text Drawing +# +# This example shows off drawing text on the OpenMV Cam. + +import sensor, image, time, pyb + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... +sensor.set_framesize(sensor.QVGA) # or QQVGA... +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + + img = sensor.snapshot() + + for i in range(10): + x = (pyb.rng() % (2*img.width())) - (img.width()//2) + y = (pyb.rng() % (2*img.height())) - (img.height()//2) + r = (pyb.rng() % 127) + 128 + g = (pyb.rng() % 127) + 128 + b = (pyb.rng() % 127) + 128 + + # If the first argument is a scaler then this method expects + # to see x, y, and text. Otherwise, it expects a (x,y,text) tuple. + + # Character and string rotation can be done at 0, 90, 180, 270, and etc. degrees. + img.draw_string(x, y, "Hello World!", color = (r, g, b), scale = 2, mono_space = False, + char_rotation = 0, char_hmirror = False, char_vflip = False, + string_rotation = 0, string_hmirror = False, string_vflip = False) + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/adaptive_histogram_equalization.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/adaptive_histogram_equalization.py new file mode 100644 index 000000000..a958b531e --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/adaptive_histogram_equalization.py @@ -0,0 +1,29 @@ +# Adaptive Histogram Equalization +# +# This example shows off how to use adaptive histogram equalization to improve +# the contrast in the image. Adaptive histogram equalization splits the image +# into regions and then equalizes the histogram in those regions to improve +# the image contrast versus a global histogram equalization. Additionally, +# you may specify a clip limit to prevent the contrast from going wild. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QQVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + + # A clip_limit of < 0 gives you normal adaptive histogram equalization + # which may result in huge amounts of contrast noise... + + # A clip_limit of 1 does nothing. For best results go slightly higher + # than 1 like below. The higher you go the closer you get back to + # standard adaptive histogram equalization with huge contrast swings. + + img = sensor.snapshot().histeq(adaptive=True, clip_limit=3) + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/blur_filter.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/blur_filter.py new file mode 100644 index 000000000..6074d2f2b --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/blur_filter.py @@ -0,0 +1,21 @@ +# Blur Filter Example +# +# This example shows off using the guassian filter to blur images. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # Run the kernel on every pixel of the image. + img.gaussian(1) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/cartoon_filter.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/cartoon_filter.py new file mode 100644 index 000000000..c1703ca47 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/cartoon_filter.py @@ -0,0 +1,29 @@ +# Cartoon Filter +# +# This example shows off a simple cartoon filter on images. The cartoon +# filter works by joining similar pixel areas of an image and replacing +# the pixels in those areas with the area mean. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... +sensor.set_framesize(sensor.QVGA) # or QQVGA... +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + + # seed_threshold controls the maximum area growth of a colored + # region. Making this larger will merge more pixels. + + # floating_threshold controls the maximum pixel-to-pixel difference + # when growing a region. Settings this very high will quickly combine + # all pixels in the image. You should keep this small. + + # cartoon() will grow regions while both thresholds are statisfied... + + img = sensor.snapshot().cartoon(seed_threshold=0.05, floating_thresholds=0.05) + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/color_bilateral_filter.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/color_bilateral_filter.py new file mode 100644 index 000000000..1bdbbb7eb --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/color_bilateral_filter.py @@ -0,0 +1,33 @@ +# Color Bilteral Filter Example +# +# This example shows off using the bilateral filter on color images. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # color_sigma controls how close color wise pixels have to be to each other to be + # blured togheter. A smaller value means they have to be closer. + # A larger value is less strict. + + # space_sigma controls how close space wise pixels have to be to each other to be + # blured togheter. A smaller value means they have to be closer. + # A larger value is less strict. + + # Run the kernel on every pixel of the image. + img.bilateral(3, color_sigma=0.1, space_sigma=1) + + # Note that the bilateral filter can introduce image defects if you set + # color_sigma/space_sigma to aggresively. Increase the sigma values until + # the defects go away if you see them. + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/color_binary_filter.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/color_binary_filter.py new file mode 100644 index 000000000..b8686f7bc --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/color_binary_filter.py @@ -0,0 +1,61 @@ +# Color Binary Filter Example +# +# This script shows off the binary image filter. You may pass binary any +# number of thresholds to segment the image by. + +import sensor, image, time + +sensor.reset() +sensor.set_framesize(sensor.QVGA) +sensor.set_pixformat(sensor.RGB565) +sensor.skip_frames(time = 2000) +clock = time.clock() + +# Use the Tools -> Machine Vision -> Threshold Edtor to pick better thresholds. +red_threshold = (0,100, 0,127, 0,127) # L A B +green_threshold = (0,100, -128,0, 0,127) # L A B +blue_threshold = (0,100, -128,127, -128,0) # L A B + +while(True): + + # Test red threshold + for i in range(100): + clock.tick() + img = sensor.snapshot() + img.binary([red_threshold]) + print(clock.fps()) + + # Test green threshold + for i in range(100): + clock.tick() + img = sensor.snapshot() + img.binary([green_threshold]) + print(clock.fps()) + + # Test blue threshold + for i in range(100): + clock.tick() + img = sensor.snapshot() + img.binary([blue_threshold]) + print(clock.fps()) + + # Test not red threshold + for i in range(100): + clock.tick() + img = sensor.snapshot() + img.binary([red_threshold], invert = 1) + print(clock.fps()) + + # Test not green threshold + for i in range(100): + clock.tick() + img = sensor.snapshot() + img.binary([green_threshold], invert = 1) + print(clock.fps()) + + # Test not blue threshold + for i in range(100): + clock.tick() + img = sensor.snapshot() + img.binary([blue_threshold], invert = 1) + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/color_light_removal.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/color_light_removal.py new file mode 100644 index 000000000..270104cfb --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/color_light_removal.py @@ -0,0 +1,25 @@ +# Color Light Removal +# +# This example shows off how to remove bright lights from the image. +# You can do this using the binary() method with the "zero=" argument. +# +# Removing bright lights from the image allows you to now use +# histeq() on the image without outliers from oversaturated +# parts of the image breaking the algorithm... + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +thresholds = (90, 100, -128, 127, -128, 127) + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot().binary([thresholds], invert=False, zero=True) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/edge_filter.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/edge_filter.py new file mode 100644 index 000000000..cdc03ddf3 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/edge_filter.py @@ -0,0 +1,21 @@ +# Edge Filter Example +# +# This example shows off using the laplacian filter to detect edges. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # Run the kernel on every pixel of the image. + img.laplacian(1) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/erode_and_dilate.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/erode_and_dilate.py new file mode 100644 index 000000000..06a6fde68 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/erode_and_dilate.py @@ -0,0 +1,35 @@ +# Erode and Dilate Example +# +# This example shows off the erode and dilate functions which you can run on +# a binary image to remove noise. This example was originally a test but its +# useful for showing off how these functions work. + +import pyb, sensor, image + +sensor.reset() +sensor.set_framesize(sensor.QVGA) + +grayscale_thres = (170, 255) +rgb565_thres = (70, 100, -128, 127, -128, 127) + +while(True): + + sensor.set_pixformat(sensor.GRAYSCALE) + for i in range(20): + img = sensor.snapshot() + img.binary([grayscale_thres]) + img.erode(2) + for i in range(20): + img = sensor.snapshot() + img.binary([grayscale_thres]) + img.dilate(2) + + sensor.set_pixformat(sensor.RGB565) + for i in range(20): + img = sensor.snapshot() + img.binary([rgb565_thres]) + img.erode(2) + for i in range(20): + img = sensor.snapshot() + img.binary([rgb565_thres]) + img.dilate(2) diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/gamma_correction.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/gamma_correction.py new file mode 100644 index 000000000..2dd1137c5 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/gamma_correction.py @@ -0,0 +1,21 @@ +# Gamma Correction +# +# This example shows off gamma correction to make the image brighter. The gamma +# correction method can also fix contrast and brightness too. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + + # Gamma, contrast, and brightness correction are applied to each color channel. The + # values are scaled to the range per color channel per image type... + img = sensor.snapshot().gamma_corr(gamma = 0.5, contrast = 1.0, brightness = 0.0) + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/grayscale_bilateral_filter.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/grayscale_bilateral_filter.py new file mode 100644 index 000000000..6b3a67b21 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/grayscale_bilateral_filter.py @@ -0,0 +1,33 @@ +# Grayscale Bilteral Filter Example +# +# This example shows off using the bilateral filter on grayscale images. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # color_sigma controls how close color wise pixels have to be to each other to be + # blured togheter. A smaller value means they have to be closer. + # A larger value is less strict. + + # space_sigma controls how close space wise pixels have to be to each other to be + # blured togheter. A smaller value means they have to be closer. + # A larger value is less strict. + + # Run the kernel on every pixel of the image. + img.bilateral(3, color_sigma=0.1, space_sigma=1) + + # Note that the bilateral filter can introduce image defects if you set + # color_sigma/space_sigma to aggresively. Increase the sigma values until + # the defects go away if you see them. + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/grayscale_binary_filter.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/grayscale_binary_filter.py new file mode 100644 index 000000000..dfaed5012 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/grayscale_binary_filter.py @@ -0,0 +1,45 @@ +# Grayscale Binary Filter Example +# +# This script shows off the binary image filter. You may pass binary any +# number of thresholds to segment the image by. + +import sensor, image, time + +sensor.reset() +sensor.set_framesize(sensor.QVGA) +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.skip_frames(time = 2000) +clock = time.clock() + +low_threshold = (0, 50) +high_threshold = (205, 255) + +while(True): + + # Test low threshold + for i in range(100): + clock.tick() + img = sensor.snapshot() + img.binary([low_threshold]) + print(clock.fps()) + + # Test high threshold + for i in range(100): + clock.tick() + img = sensor.snapshot() + img.binary([high_threshold]) + print(clock.fps()) + + # Test not low threshold + for i in range(100): + clock.tick() + img = sensor.snapshot() + img.binary([low_threshold], invert = 1) + print(clock.fps()) + + # Test not high threshold + for i in range(100): + clock.tick() + img = sensor.snapshot() + img.binary([high_threshold], invert = 1) + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/grayscale_light_removal.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/grayscale_light_removal.py new file mode 100644 index 000000000..d42b8a8e9 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/grayscale_light_removal.py @@ -0,0 +1,25 @@ +# Grayscale Light Removal +# +# This example shows off how to remove bright lights from the image. +# You can do this using the binary() method with the "zero=" argument. +# +# Removing bright lights from the image allows you to now use +# histeq() on the image without outliers from oversaturated +# parts of the image breaking the algorithm... + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +thresholds = (220, 255) + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot().binary([thresholds], invert=False, zero=True) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/histogram_equalization.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/histogram_equalization.py new file mode 100644 index 000000000..2a3aece50 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/histogram_equalization.py @@ -0,0 +1,19 @@ +# Histogram Equalization +# +# This example shows off how to use histogram equalization to improve +# the contrast in the image. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QQVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + + img = sensor.snapshot().histeq() + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/kernel_filters.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/kernel_filters.py new file mode 100644 index 000000000..9b9dd565f --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/kernel_filters.py @@ -0,0 +1,27 @@ +# Kernel Filtering Example +# +# This example shows off how to use a generic kernel filter. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +kernel_size = 1 # 3x3==1, 5x5==2, 7x7==3, etc. + +kernel = [-2, -1, 0, \ + -1, 1, 1, \ + 0, 1, 2] + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # Run the kernel on every pixel of the image. + img.morph(kernel_size, kernel) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/lens_correction.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/lens_correction.py new file mode 100644 index 000000000..56066f583 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/lens_correction.py @@ -0,0 +1,21 @@ +# Lens Correction +# +# This example shows off how to use the lens correction method to fix lens +# distortion in an image. You need to do this for qrcode / barcode / data matrix +# detection. Increase the strength below until lines are straight in the view. +# Zoom in (higher) or out (lower) until you see enough of the image. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + + img = sensor.snapshot().lens_corr(strength = 1.8, zoom = 1.0) + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/linear_polar.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/linear_polar.py new file mode 100644 index 000000000..98106542f --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/linear_polar.py @@ -0,0 +1,21 @@ +# Linear Polar Mapping Example +# +# This example shows off re-projecting the image using a linear polar +# transformation. Linear polar images are useful in that rotations +# become translations in the X direction and linear changes +# in scale become linear translations in the Y direction. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot().linpolar(reverse=False) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/log_polar.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/log_polar.py new file mode 100644 index 000000000..d79f374b9 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/log_polar.py @@ -0,0 +1,21 @@ +# Log Polar Mapping Example +# +# This example shows off re-projecting the image using a log polar +# transformation. Log polar images are useful in that rotations +# become translations in the X direction and exponential changes +# in scale (x2, x4, etc.) become linear translations in the Y direction. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot().logpolar(reverse=False) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mean_adaptive_threshold_filter.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mean_adaptive_threshold_filter.py new file mode 100644 index 000000000..2d140ecc4 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mean_adaptive_threshold_filter.py @@ -0,0 +1,25 @@ +# Mean Adaptive Threshold Filter Example +# +# This example shows off mean filtering with adaptive thresholding. +# When mean(threshold=True) the mean() method adaptive thresholds the image +# by comparing the mean of the pixels around a pixel, minus an offset, with that pixel. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # The first argument is the kernel size. N coresponds to a ((N*2)+1)^2 + # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You + # shouldn't ever need to use a value bigger than 2. + img.mean(1, threshold=True, offset=5, invert=True) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mean_filter.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mean_filter.py new file mode 100644 index 000000000..c6de0c81b --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mean_filter.py @@ -0,0 +1,25 @@ +# Mean Filter Example +# +# This example shows off mean filtering. Mean filtering is your standard average +# filter in a NxN neighborhood. Mean filtering removes noise in the image by +# bluring everything. But, it's the fastest kernel filter operation. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # The only argument is the kernel size. N coresponds to a ((N*2)+1)^2 + # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You + # shouldn't ever need to use a value bigger than 2. + img.mean(1) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/median_adaptive_threshold_filter.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/median_adaptive_threshold_filter.py new file mode 100644 index 000000000..673b28482 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/median_adaptive_threshold_filter.py @@ -0,0 +1,27 @@ +# Median Adaptive Threshold Filter Example +# +# This example shows off median filtering with adaptive thresholding. +# When median(threshold=True) the median() method adaptive thresholds the image +# by comparing the median of the pixels around a pixel, minus an offset, with that pixel. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # The first argument to the median filter is the kernel size, it can be + # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. The second + # argument "percentile" is the percentile number to choose from the NxN + # neighborhood. 0.5 is the median, 0.25 is the lower quartile, and 0.75 + # would be the upper quartile. + img.median(1, percentile=0.5, threshold=True, offset=5, invert=True) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/median_filter.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/median_filter.py new file mode 100644 index 000000000..441464a36 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/median_filter.py @@ -0,0 +1,27 @@ +# Median Filter Example +# +# This example shows off median filtering. Median filtering replaces every pixel +# with the median value of it's NxN neighborhood. Median filtering is good for +# removing noise in the image while preserving edges. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # The first argument to the median filter is the kernel size, it can be + # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. The second + # argument "percentile" is the percentile number to choose from the NxN + # neighborhood. 0.5 is the median, 0.25 is the lower quartile, and 0.75 + # would be the upper quartile. + img.median(1, percentile=0.5) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/midpoint_adaptive_threshold_filter.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/midpoint_adaptive_threshold_filter.py new file mode 100644 index 000000000..adaeaaa5d --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/midpoint_adaptive_threshold_filter.py @@ -0,0 +1,28 @@ +# Midpoint Adaptive Threshold Filter Example +# +# This example shows off midpoint filtering with adaptive thresholding. +# When midpoint(threshold=True) the midpoint() method adaptive thresholds the image +# by comparing the midpoint of the pixels around a pixel, minus an offset, with that pixel. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # The first argument is the kernel size. N coresponds to a ((N*2)+1)^2 + # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You + # shouldn't ever need to use a value bigger than 2. The "bias" argument + # lets you select between min and max blending. 0.5 == midpoint filter, + # 0.0 == min filter, and 1.0 == max filter. Note that the min filter + # makes images darker while the max filter makes images lighter. + img.midpoint(1, bias=0.5, threshold=True, offset=5, invert=True) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/midpoint_filter.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/midpoint_filter.py new file mode 100644 index 000000000..ee9ab5d94 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/midpoint_filter.py @@ -0,0 +1,27 @@ +# Midpoint Filter Example +# +# This example shows off midpoint filtering. Midpoint filtering replaces each +# pixel by the average of the min and max pixel values for a NxN neighborhood. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # The first argument is the kernel size. N coresponds to a ((N*2)+1)^2 + # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You + # shouldn't ever need to use a value bigger than 2. The "bias" argument + # lets you select between min and max blending. 0.5 == midpoint filter, + # 0.0 == min filter, and 1.0 == max filter. Note that the min filter + # makes images darker while the max filter makes images lighter. + img.midpoint(1, bias=0.5) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mode_adaptive_threshold_filter.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mode_adaptive_threshold_filter.py new file mode 100644 index 000000000..8ab9a0675 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mode_adaptive_threshold_filter.py @@ -0,0 +1,25 @@ +# Mode Adaptive Threshold Filter Example +# +# This example shows off mode filtering with adaptive thresholding. +# When mode(threshold=True) the mode() method adaptive thresholds the image +# by comparing the mode of the pixels around a pixel, minus an offset, with that pixel. +# Avoid using the mode filter on RGB565 images. It will cause artifacts on image edges... + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # The only argument to the median filter is the kernel size, it can be + # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. + img.mode(1, threshold=True, offset=5, invert=True) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mode_filter.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mode_filter.py new file mode 100644 index 000000000..170937c58 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mode_filter.py @@ -0,0 +1,25 @@ +# Mode Filter Example +# +# This example shows off mode filtering. Mode filtering is a highly non-linear +# operation which replaces each pixel with the mode of the NxN neighborhood +# of pixels around it. Avoid using the mode filter on RGB565 images. It will +# cause artifacts on image edges... + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # The only argument to the median filter is the kernel size, it can be + # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. + img.mode(1) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/negative.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/negative.py new file mode 100644 index 000000000..36186cba9 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/negative.py @@ -0,0 +1,19 @@ +# Negative Example +# +# This example shows off negating the image. This is not a particularly +# useful method but it can come in handy once in a while. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot().negate() + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/perspective_and_rotation_correction.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/perspective_and_rotation_correction.py new file mode 100644 index 000000000..b89117d40 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/perspective_and_rotation_correction.py @@ -0,0 +1,71 @@ +# Rotation Correction +# +# This example shows off how to use the rotation_corr() to both correct for +# perspective distortion and then to rotate the new corrected image in 3D +# space aftwards to handle movement. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +# The image will be warped such that the following points become the new: +# +# (0, 0) +# (w-1, 0) +# (w-1, h-1) +# (0, h-1) +# +# Try setting the points below to the corners of a quadrilateral +# (in clock-wise order) in the field-of-view. You can get points +# on the image by clicking and dragging on the frame buffer and +# recording the values shown in the histogram widget. + +w = sensor.width() +h = sensor.height() + +TARGET_POINTS = [(0, 0), # (x, y) CHANGE ME! + (w-1, 0), # (x, y) CHANGE ME! + (w-1, h-1), # (x, y) CHANGE ME! + (0, h-1)] # (x, y) CHANGE ME! + +# Degrees per frame to rotation by... +X_ROTATION_DEGREE_RATE = 5 +Y_ROTATION_DEGREE_RATE = 0.5 +Z_ROTATION_DEGREE_RATE = 0 +X_OFFSET = 0 +Y_OFFSET = 0 + +ZOOM_AMOUNT = 1 # Lower zooms out - Higher zooms in. +FOV_WINDOW = 25 # Between 0 and 180. Represents the field-of-view of the scene + # window when rotating the image in 3D space. When closer to + # zero results in lines becoming straighter as the window + # moves away from the image being rotated in 3D space. A large + # value moves the window closer to the image in 3D space which + # results in the more perspective distortion and sometimes + # the image in 3D intersecting the scene window. + +x_rotation_counter = 0 +y_rotation_counter = 0 +z_rotation_counter = 0 + +while(True): + clock.tick() + + img = sensor.snapshot().rotation_corr(x_rotation = x_rotation_counter, \ + y_rotation = y_rotation_counter, \ + z_rotation = z_rotation_counter, \ + x_translation = X_OFFSET, \ + y_translation = Y_OFFSET, \ + zoom = ZOOM_AMOUNT, \ + fov = FOV_WINDOW, \ + corners = TARGET_POINTS) + + x_rotation_counter += X_ROTATION_DEGREE_RATE + y_rotation_counter += Y_ROTATION_DEGREE_RATE + z_rotation_counter += Z_ROTATION_DEGREE_RATE + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/perspective_correction.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/perspective_correction.py new file mode 100644 index 000000000..a8f2aa58d --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/perspective_correction.py @@ -0,0 +1,39 @@ +# Perspective Correction +# +# This example shows off how to use the rotation_corr() to fix perspective +# issues related to how your OpenMV Cam is mounted. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +# The image will be warped such that the following points become the new: +# +# (0, 0) +# (w-1, 0) +# (w-1, h-1) +# (0, h-1) +# +# Try setting the points below to the corners of a quadrilateral +# (in clock-wise order) in the field-of-view. You can get points +# on the image by clicking and dragging on the frame buffer and +# recording the values shown in the histogram widget. + +w = sensor.width() +h = sensor.height() + +TARGET_POINTS = [(0, 0), # (x, y) CHANGE ME! + (w-1, 0), # (x, y) CHANGE ME! + (w-1, h-1), # (x, y) CHANGE ME! + (0, h-1)] # (x, y) CHANGE ME! + +while(True): + clock.tick() + + img = sensor.snapshot().rotation_corr(corners = TARGET_POINTS) + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/rotation_correction.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/rotation_correction.py new file mode 100644 index 000000000..b95e41d78 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/rotation_correction.py @@ -0,0 +1,49 @@ +# Rotation Correction +# +# This example shows off how to use the rotation_corr() to play with the scene +# window your OpenMV Cam sees. + +import sensor, image, time + +# Degrees per frame to rotation by... +X_ROTATION_DEGREE_RATE = 5 +Y_ROTATION_DEGREE_RATE = 0.5 +Z_ROTATION_DEGREE_RATE = 0 +X_OFFSET = 0 +Y_OFFSET = 0 + +ZOOM_AMOUNT = 1 # Lower zooms out - Higher zooms in. +FOV_WINDOW = 60 # Between 0 and 180. Represents the field-of-view of the scene + # window when rotating the image in 3D space. When closer to + # zero results in lines becoming straighter as the window + # moves away from the image being rotated in 3D space. A large + # value moves the window closer to the image in 3D space which + # results in the more perspective distortion and sometimes + # the image in 3D intersecting the scene window. + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +x_rotation_counter = 0 +y_rotation_counter = 0 +z_rotation_counter = 0 + +while(True): + clock.tick() + + img = sensor.snapshot().rotation_corr(x_rotation = x_rotation_counter, \ + y_rotation = y_rotation_counter, \ + z_rotation = z_rotation_counter, \ + x_translation = X_OFFSET, \ + y_translation = Y_OFFSET, \ + zoom = ZOOM_AMOUNT, \ + fov = FOV_WINDOW) + + x_rotation_counter += X_ROTATION_DEGREE_RATE + y_rotation_counter += Y_ROTATION_DEGREE_RATE + z_rotation_counter += Z_ROTATION_DEGREE_RATE + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/sharpen_filter.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/sharpen_filter.py new file mode 100644 index 000000000..0f541e203 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/sharpen_filter.py @@ -0,0 +1,21 @@ +# Sharpen Filter Example +# +# This example shows off using the laplacian filter to sharpen images. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # Run the kernel on every pixel of the image. + img.laplacian(1, sharpen=True) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/ulab.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/ulab.py new file mode 100644 index 000000000..b7cfa4927 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/ulab.py @@ -0,0 +1,19 @@ +# Ulab is a numpy-like module for micropython, meant to simplify and speed up common +# mathematical operations on arrays. This basic example shows mean/std on an image. +# +# NOTE: ndarrays cause the heap to be fragmented easily. If you run out of memory, +# there's not much that can be done about it, lowering the resolution might help. + +import sensor, image, time +from ulab import numpy as np + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QQVGA) # Set frame size to QVGA (320x240) +clock = time.clock() # Create a clock object to track the FPS. + +while (True): + img = sensor.snapshot() # Take a picture and return the image. + a = np.array(img, dtype=np.uint8) + print("mean: %d std:%d"%(np.mean(a), np.std(a))) + diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/unsharp_filter.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/unsharp_filter.py new file mode 100644 index 000000000..eb8eb2270 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/unsharp_filter.py @@ -0,0 +1,21 @@ +# Unsharp Filter Example +# +# This example shows off using the guassian filter to unsharp mask filter images. + +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # Run the kernel on every pixel of the image. + img.gaussian(1, unsharp=True) + + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/vflip_hmirror_transpose.py b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/vflip_hmirror_transpose.py new file mode 100644 index 000000000..bf43c4e73 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/vflip_hmirror_transpose.py @@ -0,0 +1,33 @@ +# Vertical Flip - Horizontal Mirror - Transpose +# +# This example shows off how to vertically flip, horizontally mirror, or +# transpose an image. Note that: +# +# vflip=False, hmirror=False, transpose=False -> 0 degree rotation +# vflip=True, hmirror=False, transpose=True -> 90 degree rotation +# vflip=True, hmirror=True, transpose=False -> 180 degree rotation +# vflip=False, hmirror=True, transpose=True -> 270 degree rotation + +import sensor, image, time, pyb + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +mills = pyb.millis() +counter = 0 + +while(True): + clock.tick() + + img = sensor.snapshot().replace(vflip=(counter//2)%2, + hmirror=(counter//4)%2, + transpose=(counter//8)%2) + + if (pyb.millis() > (mills + 1000)): + mills = pyb.millis() + counter += 1 + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/05-Snapshot/emboss_snapshot.py b/scripts/examples/Arduino/Nicla-Vision/05-Snapshot/emboss_snapshot.py new file mode 100644 index 000000000..69bc105c6 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/05-Snapshot/emboss_snapshot.py @@ -0,0 +1,33 @@ +# Emboss Snapshot Example +# +# Note: You will need an SD card to run this example. +# +# You can use your OpenMV Cam to save modified image files. + +import sensor, image, pyb + +RED_LED_PIN = 1 +BLUE_LED_PIN = 3 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. + +pyb.LED(RED_LED_PIN).on() +sensor.skip_frames(time = 2000) # Give the user time to get ready. + +pyb.LED(RED_LED_PIN).off() +pyb.LED(BLUE_LED_PIN).on() + +print("You're on camera!") +img = sensor.snapshot() + +img.morph(1, [+2, +1, +0,\ + +1, +1, -1,\ + +0, -1, -2]) # Emboss the image. + +img.save("example.jpg") # or "example.bmp" (or others) + +pyb.LED(BLUE_LED_PIN).off() +print("Done! Reset the camera to see the saved image.") diff --git a/scripts/examples/Arduino/Nicla-Vision/05-Snapshot/snapshot.py b/scripts/examples/Arduino/Nicla-Vision/05-Snapshot/snapshot.py new file mode 100644 index 000000000..8eb621e6d --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/05-Snapshot/snapshot.py @@ -0,0 +1,27 @@ +# Snapshot Example +# +# Note: You will need an SD card to run this example. +# +# You can use your OpenMV Cam to save image files. + +import sensor, image, pyb + +RED_LED_PIN = 1 +BLUE_LED_PIN = 3 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. + +pyb.LED(RED_LED_PIN).on() +sensor.skip_frames(time = 2000) # Give the user time to get ready. + +pyb.LED(RED_LED_PIN).off() +pyb.LED(BLUE_LED_PIN).on() + +print("You're on camera!") +sensor.snapshot().save("example.jpg") # or "example.bmp" (or others) + +pyb.LED(BLUE_LED_PIN).off() +print("Done! Reset the camera to see the saved image.") diff --git a/scripts/examples/Arduino/Nicla-Vision/05-Snapshot/snapshot_on_face_detection.py b/scripts/examples/Arduino/Nicla-Vision/05-Snapshot/snapshot_on_face_detection.py new file mode 100644 index 000000000..a716df263 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/05-Snapshot/snapshot_on_face_detection.py @@ -0,0 +1,51 @@ +# Snapshot on Face Detection Example +# +# Note: You will need an SD card to run this example. +# +# This example demonstrates using face tracking on your OpenMV Cam to take a +# picture. + +import sensor, image, pyb + +RED_LED_PIN = 1 +BLUE_LED_PIN = 3 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.HQVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. + +# Load up a face detection HaarCascade. This is object that your OpenMV Cam +# can use to detect faces using the find_features() method below. Your OpenMV +# Cam has fontalface HaarCascade built-in. By default, all the stages of the +# HaarCascade are loaded. However, You can adjust the number of stages to speed +# up processing at the expense of accuracy. The frontalface HaarCascade has 25 +# stages. +face_cascade = image.HaarCascade("frontalface", stages=25) + +while(True): + + pyb.LED(RED_LED_PIN).on() + print("About to start detecting faces...") + sensor.skip_frames(time = 2000) # Give the user time to get ready. + + pyb.LED(RED_LED_PIN).off() + print("Now detecting faces!") + pyb.LED(BLUE_LED_PIN).on() + + diff = 10 # We'll say we detected a face after 10 frames. + while(diff): + img = sensor.snapshot() + # Threshold can be between 0.0 and 1.0. A higher threshold results in a + # higher detection rate with more false positives. The scale value + # controls the matching scale allowing you to detect smaller faces. + faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5) + + if faces: + diff -= 1 + for r in faces: + img.draw_rectangle(r) + + pyb.LED(BLUE_LED_PIN).off() + print("Face detected! Saving image...") + sensor.snapshot().save("snapshot-%d.jpg" % pyb.rng()) # Save Pic. diff --git a/scripts/examples/Arduino/Nicla-Vision/05-Snapshot/snapshot_on_movement.py b/scripts/examples/Arduino/Nicla-Vision/05-Snapshot/snapshot_on_movement.py new file mode 100644 index 000000000..4e6c67956 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/05-Snapshot/snapshot_on_movement.py @@ -0,0 +1,44 @@ +# Snapshot on Movement Example +# +# Note: You will need an SD card to run this example. +# +# This example demonstrates using frame differencing with your OpenMV Cam to do +# motion detection. After motion is detected your OpenMV Cam will take picture. + +import sensor, image, pyb, os + +RED_LED_PIN = 1 +BLUE_LED_PIN = 3 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. + +if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory + +while(True): + + pyb.LED(RED_LED_PIN).on() + print("About to save background image...") + sensor.skip_frames(time = 2000) # Give the user time to get ready. + + pyb.LED(RED_LED_PIN).off() + sensor.snapshot().save("temp/bg.bmp") + print("Saved background image - Now detecting motion!") + pyb.LED(BLUE_LED_PIN).on() + + diff = 10 # We'll say we detected motion after 10 frames of motion. + while(diff): + img = sensor.snapshot() + img.difference("temp/bg.bmp") + stats = img.statistics() + # Stats 5 is the max of the lighting color channel. The below code + # triggers when the lighting max for the whole image goes above 20. + # The lighting difference maximum should be zero normally. + if (stats[5] > 20): + diff -= 1 + + pyb.LED(BLUE_LED_PIN).off() + print("Movement detected! Saving image...") + sensor.snapshot().save("temp/snapshot-%d.jpg" % pyb.rng()) # Save Pic. diff --git a/scripts/examples/Arduino/Nicla-Vision/05-Snapshot/time_lapse_photos.py b/scripts/examples/Arduino/Nicla-Vision/05-Snapshot/time_lapse_photos.py new file mode 100644 index 000000000..be6c370dd --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/05-Snapshot/time_lapse_photos.py @@ -0,0 +1,67 @@ +# Time Lapse Photos (Credit nedhorning) +# +# This example shows off how to take time lapse photos using your OpenMV +# Cam and using the RTC module along with a timer interrupt to achieve +# very low power operation. +# +# Note that if the USB is still plugged in when the camera is taking +# pictures it will run the bootloader each time. Please power the camera +# from something other than USB to not have the bootloader run. + +import pyb, machine, sensor, image, pyb, os + +# Create and init RTC object. This will allow us to set the current time for +# the RTC and let us set an interrupt to wake up later on. +rtc = pyb.RTC() +newFile = False + +try: + os.stat('time.txt') +except OSError: # If the log file doesn't exist then set the RTC and set newFile to True + # datetime format: year, month, day, weekday (Monday=1, Sunday=7), + # hours (24 hour clock), minutes, seconds, subseconds (counds down from 255 to 0) + rtc.datetime((2018, 3, 9, 5, 13, 0, 0, 0)) + newFile = True + +# Extract the date and time from the RTC object. +dateTime = rtc.datetime() +year = str(dateTime[0]) +month = '%02d' % dateTime[1] +day = '%02d' % dateTime[2] +hour = '%02d' % dateTime[4] +minute = '%02d' % dateTime[5] +second = '%02d' % dateTime[6] +subSecond = str(dateTime[7]) + +newName='I'+year+month+day+hour+minute+second # Image file name based on RTC + +# Enable RTC interrupts every 10 seconds, camera will RESET after wakeup from deepsleep Mode. +rtc.wakeup(10000) + +BLUE_LED_PIN = 3 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.VGA) +sensor.skip_frames(time = 1000) # Let new settings take affect. + +# Let folks know we are about to take a picture. +pyb.LED(BLUE_LED_PIN).on() + +if(newFile): # If log file does not exist then create it. + with open('time.txt', 'a') as timeFile: # Write text file to keep track of date, time and image number. + timeFile.write('Date and time format: year, month, day, hours, minutes, seconds, subseconds' + '\n') + timeFile.write(newName + ',' + year + ',' + month + ',' + day + ',' + hour + ',' + minute + ',' + second + ',' + subSecond + '\n') +else: + with open('time.txt', 'a') as timeFile: # Append to date, time and image number to text file. + timeFile.write(newName + ',' + year + ',' + month + ',' + day + ',' + hour + ',' + minute + ',' + second + ',' + subSecond + '\n') + +if not "images" in os.listdir(): os.mkdir("images") # Make a temp directory + +# Take photo and save to SD card +img = sensor.snapshot() +img.save('images/' + newName, quality=90) +pyb.LED(BLUE_LED_PIN).off() + +# Enter Deepsleep Mode (i.e. the OpenMV Cam effectively turns itself off except for the RTC). +machine.deepsleep() diff --git a/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/gif.py b/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/gif.py new file mode 100644 index 000000000..35f0933d7 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/gif.py @@ -0,0 +1,37 @@ +# GIF Video Recording Example +# +# Note: You will need an SD card to run this example. +# +# You can use your OpenMV Cam to record gif files. You can either feed the +# recorder object RGB565 frames or Grayscale frames. Use photo editing software +# like GIMP to compress and optimize the Gif before uploading it to the web. + +import sensor, image, time, gif, pyb + +RED_LED_PIN = 1 +BLUE_LED_PIN = 3 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +pyb.LED(RED_LED_PIN).on() +sensor.skip_frames(time = 2000) # Give the user time to get ready. + +pyb.LED(RED_LED_PIN).off() +pyb.LED(BLUE_LED_PIN).on() + +g = gif.Gif("example.gif", loop=True) + +print("You're on camera!") +for i in range(100): + clock.tick() + # clock.avg() returns the milliseconds between frames - gif delay is in + g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds. + print(clock.fps()) + +g.close() +pyb.LED(BLUE_LED_PIN).off() +print("Done! Reset the camera to see the saved recording.") diff --git a/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/gif_on_face_detection.py b/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/gif_on_face_detection.py new file mode 100644 index 000000000..0732ca1c8 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/gif_on_face_detection.py @@ -0,0 +1,65 @@ +# GIF Video Recording on Face Detection Example +# +# Note: You will need an SD card to run this example. +# +# You can use your OpenMV Cam to record gif files. You can either feed the +# recorder object RGB565 frames or Grayscale frames. Use photo editing software +# like GIMP to compress and optimize the Gif before uploading it to the web. +# +# This example demonstrates using face tracking on your OpenMV Cam to take a +# gif. + +import sensor, image, time, gif, pyb + +RED_LED_PIN = 1 +BLUE_LED_PIN = 3 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor. +sensor.set_framesize(sensor.QQVGA) # or sensor.HQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. + +# Load up a face detection HaarCascade. This is object that your OpenMV Cam +# can use to detect faces using the find_features() method below. Your OpenMV +# Cam has fontalface HaarCascade built-in. By default, all the stages of the +# HaarCascade are loaded. However, You can adjust the number of stages to speed +# up processing at the expense of accuracy. The frontalface HaarCascade has 25 +# stages. +face_cascade = image.HaarCascade("frontalface", stages=25) + +while(True): + + pyb.LED(RED_LED_PIN).on() + print("About to start detecting faces...") + sensor.skip_frames(time = 2000) # Give the user time to get ready. + + pyb.LED(RED_LED_PIN).off() + print("Now detecting faces!") + pyb.LED(BLUE_LED_PIN).on() + + diff = 10 # We'll say we detected a face after 10 frames. + while(diff): + img = sensor.snapshot() + # Threshold can be between 0.0 and 1.0. A higher threshold results in a + # higher detection rate with more false positives. The scale value + # controls the matching scale allowing you to detect smaller faces. + faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5) + + if faces: + diff -= 1 + for r in faces: + img.draw_rectangle(r) + + g = gif.Gif("example-%d.gif" % pyb.rng(), loop=True) + + clock = time.clock() # Tracks FPS. + print("You're on camera!") + for i in range(100): + clock.tick() + # clock.avg() returns the milliseconds between frames - gif delay is in + g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds. + print(clock.fps()) + + g.close() + pyb.LED(BLUE_LED_PIN).off() + print("Restarting...") diff --git a/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/gif_on_movement.py b/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/gif_on_movement.py new file mode 100644 index 000000000..81e013053 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/gif_on_movement.py @@ -0,0 +1,58 @@ +# GIF Video Recording on Movement Example +# +# Note: You will need an SD card to run this example. +# +# You can use your OpenMV Cam to record gif files. You can either feed the +# recorder object RGB565 frames or Grayscale frames. Use photo editing software +# like GIMP to compress and optimize the Gif before uploading it to the web. +# +# This example demonstrates using frame differencing with your OpenMV Cam to do +# motion detection. After motion is detected your OpenMV Cam will take video. + +import sensor, image, time, gif, pyb, os + +RED_LED_PIN = 1 +BLUE_LED_PIN = 3 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. + +if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory + +while(True): + + pyb.LED(RED_LED_PIN).on() + print("About to save background image...") + sensor.skip_frames(time = 2000) # Give the user time to get ready. + + pyb.LED(RED_LED_PIN).off() + sensor.snapshot().save("temp/bg.bmp") + print("Saved background image - Now detecting motion!") + pyb.LED(BLUE_LED_PIN).on() + + diff = 10 # We'll say we detected motion after 10 frames of motion. + while(diff): + img = sensor.snapshot() + img.difference("temp/bg.bmp") + stats = img.statistics() + # Stats 5 is the max of the lighting color channel. The below code + # triggers when the lighting max for the whole image goes above 20. + # The lighting difference maximum should be zero normally. + if (stats[5] > 20): + diff -= 1 + + g = gif.Gif("example-%d.gif" % pyb.rng(), loop=True) + + clock = time.clock() # Tracks FPS. + print("You're on camera!") + for i in range(100): + clock.tick() + # clock.avg() returns the milliseconds between frames - gif delay is in + g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds. + print(clock.fps()) + + g.close() + pyb.LED(BLUE_LED_PIN).off() + print("Restarting...") diff --git a/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/imageio_memory.py b/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/imageio_memory.py new file mode 100644 index 000000000..d64341be4 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/imageio_memory.py @@ -0,0 +1,33 @@ +# Image Memory Stream I/O Example +# +# This example shows how to use the ImageIO stream to record frames in memory and play them back. +# Note: While this should work on any board, the board should have an SDRAM to be of any use. +import sensor, image, time + +# Number of frames to pre-allocate and record +N_FRAMES = 500 + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) + +# This frame size must match the image size passed to ImageIO +sensor.set_windowing((120, 120)) +sensor.skip_frames(time = 2000) + +clock = time.clock() + +# Write to memory stream +stream = image.ImageIO((120, 120, sensor.RGB565), N_FRAMES) + +for i in range(0, N_FRAMES): + clock.tick() + stream.write(sensor.snapshot()) + print(clock.fps()) + +while (True): + # Rewind stream and play back + stream.seek(0) + for i in range(0, N_FRAMES): + img = stream.read(copy_to_fb=True, pause=True) + # Do machine vision algorithms on the image here. diff --git a/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/imageio_read.py b/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/imageio_read.py new file mode 100644 index 000000000..430c2fe30 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/imageio_read.py @@ -0,0 +1,32 @@ +# Image Reader Example +# +# USE THIS EXAMPLE WITH A USD CARD! +# +# This example shows how to use the Image Reader object to replay snapshots of what your +# OpenMV Cam saw saved by the Image Writer object for testing machine vision algorithms. + +# Altered to allow full speed reading from SD card for extraction of sequences to the network etc. +# Set the new pause parameter to false + +import sensor, image, time + +snapshot_source = False # Set to true once finished to pull data from sensor. + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QQVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +stream = None +if snapshot_source == False: + stream = image.ImageIO("/stream.bin", "r") + +while(True): + clock.tick() + if snapshot_source: + img = sensor.snapshot() + else: + img = stream.read(copy_to_fb=True, loop=True, pause=True) + # Do machine vision algorithms on the image here. + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/imageio_write.py b/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/imageio_write.py new file mode 100644 index 000000000..aa3f5bff0 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/imageio_write.py @@ -0,0 +1,36 @@ +# Image Writer Example +# +# USE THIS EXAMPLE WITH A USD CARD! Reset the camera after recording to see the file. +# +# This example shows how to use the Image Writer object to record snapshots of what your +# OpenMV Cam sees for later analysis using the Image Reader object. Images written to disk +# by the Image Writer object are stored in a simple file format readable by your OpenMV Cam. + +import sensor, image, pyb, time + +record_time = 10000 # 10 seconds in milliseconds + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QQVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +stream = image.ImageIO("/stream.bin", "w") + +# Red LED on means we are capturing frames. +pyb.LED(1).on() + +start = pyb.millis() +while pyb.elapsed_millis(start) < record_time: + clock.tick() + img = sensor.snapshot() + # Modify the image if you feel like here... + stream.write(img) + print(clock.fps()) + +stream.close() + +# Blue LED on means we are done. +pyb.LED(1).off() +pyb.LED(3).on() diff --git a/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/mjpeg.py b/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/mjpeg.py new file mode 100644 index 000000000..1e3f732e4 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/mjpeg.py @@ -0,0 +1,37 @@ +# MJPEG Video Recording Example +# +# Note: You will need an SD card to run this demo. +# +# You can use your OpenMV Cam to record mjpeg files. You can either feed the +# recorder object JPEG frames or RGB565/Grayscale frames. Once you've finished +# recording a Mjpeg file you can use VLC to play it. If you are on Ubuntu then +# the built-in video player will work too. + +import sensor, image, time, mjpeg, pyb + +RED_LED_PIN = 1 +BLUE_LED_PIN = 3 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +pyb.LED(RED_LED_PIN).on() +sensor.skip_frames(time = 2000) # Give the user time to get ready. + +pyb.LED(RED_LED_PIN).off() +pyb.LED(BLUE_LED_PIN).on() + +m = mjpeg.Mjpeg("example.mjpeg") + +print("You're on camera!") +for i in range(200): + clock.tick() + m.add_frame(sensor.snapshot()) + print(clock.fps()) + +m.close(clock.fps()) +pyb.LED(BLUE_LED_PIN).off() +print("Done! Reset the camera to see the saved recording.") diff --git a/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/mjpeg_on_face_detection.py b/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/mjpeg_on_face_detection.py new file mode 100644 index 000000000..fd567a4f6 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/mjpeg_on_face_detection.py @@ -0,0 +1,65 @@ +# MJPEG Video Recording on Face Detection Example +# +# Note: You will need an SD card to run this example. +# +# You can use your OpenMV Cam to record mjpeg files. You can either feed the +# recorder object JPEG frames or RGB565/Grayscale frames. Once you've finished +# recording a Mjpeg file you can use VLC to play it. If you are on Ubuntu then +# the built-in video player will work too. +# +# This example demonstrates using face tracking on your OpenMV Cam to take a +# mjpeg. + +import sensor, image, time, mjpeg, pyb + +RED_LED_PIN = 1 +BLUE_LED_PIN = 3 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor. +sensor.set_framesize(sensor.QQVGA) # or sensor.HQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. + +# Load up a face detection HaarCascade. This is object that your OpenMV Cam +# can use to detect faces using the find_features() method below. Your OpenMV +# Cam has fontalface HaarCascade built-in. By default, all the stages of the +# HaarCascade are loaded. However, You can adjust the number of stages to speed +# up processing at the expense of accuracy. The frontalface HaarCascade has 25 +# stages. +face_cascade = image.HaarCascade("frontalface", stages=25) + +while(True): + + pyb.LED(RED_LED_PIN).on() + print("About to start detecting faces...") + sensor.skip_frames(time = 2000) # Give the user time to get ready. + + pyb.LED(RED_LED_PIN).off() + print("Now detecting faces!") + pyb.LED(BLUE_LED_PIN).on() + + diff = 10 # We'll say we detected a face after 10 frames. + while(diff): + img = sensor.snapshot() + # Threshold can be between 0.0 and 1.0. A higher threshold results in a + # higher detection rate with more false positives. The scale value + # controls the matching scale allowing you to detect smaller faces. + faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5) + + if faces: + diff -= 1 + for r in faces: + img.draw_rectangle(r) + + m = mjpeg.Mjpeg("example-%d.mjpeg" % pyb.rng()) + + clock = time.clock() # Tracks FPS. + print("You're on camera!") + for i in range(200): + clock.tick() + m.add_frame(sensor.snapshot()) + print(clock.fps()) + + m.close(clock.fps()) + pyb.LED(BLUE_LED_PIN).off() + print("Restarting...") diff --git a/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/mjpeg_on_movement.py b/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/mjpeg_on_movement.py new file mode 100644 index 000000000..f74b0a8ae --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/mjpeg_on_movement.py @@ -0,0 +1,58 @@ +# MJPEG Video Recording on Movement Example +# +# Note: You will need an SD card to run this example. +# +# You can use your OpenMV Cam to record mjpeg files. You can either feed the +# recorder object JPEG frames or RGB565/Grayscale frames. Once you've finished +# recording a Mjpeg file you can use VLC to play it. If you are on Ubuntu then +# the built-in video player will work too. +# +# This example demonstrates using frame differencing with your OpenMV Cam to do +# motion detection. After motion is detected your OpenMV Cam will take video. + +import sensor, image, time, mjpeg, pyb, os + +RED_LED_PIN = 1 +BLUE_LED_PIN = 3 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. + +if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory + +while(True): + + pyb.LED(RED_LED_PIN).on() + print("About to save background image...") + sensor.skip_frames(time = 2000) # Give the user time to get ready. + + pyb.LED(RED_LED_PIN).off() + sensor.snapshot().save("temp/bg.bmp") + print("Saved background image - Now detecting motion!") + pyb.LED(BLUE_LED_PIN).on() + + diff = 10 # We'll say we detected motion after 10 frames of motion. + while(diff): + img = sensor.snapshot() + img.difference("temp/bg.bmp") + stats = img.statistics() + # Stats 5 is the max of the lighting color channel. The below code + # triggers when the lighting max for the whole image goes above 20. + # The lighting difference maximum should be zero normally. + if (stats[5] > 20): + diff -= 1 + + m = mjpeg.Mjpeg("example-%d.mjpeg" % pyb.rng()) + + clock = time.clock() # Tracks FPS. + print("You're on camera!") + for i in range(200): + clock.tick() + m.add_frame(sensor.snapshot()) + print(clock.fps()) + + m.close(clock.fps()) + pyb.LED(BLUE_LED_PIN).off() + print("Restarting...") diff --git a/scripts/examples/Arduino/Nicla-Vision/07-Face-Detection/face_detection.py b/scripts/examples/Arduino/Nicla-Vision/07-Face-Detection/face_detection.py new file mode 100644 index 000000000..7648ea0c7 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/07-Face-Detection/face_detection.py @@ -0,0 +1,46 @@ +# Face Detection Example +# +# This example shows off the built-in face detection feature of the OpenMV Cam. +# +# Face detection works by using the Haar Cascade feature detector on an image. A +# Haar Cascade is a series of simple area contrasts checks. For the built-in +# frontalface detector there are 25 stages of checks with each stage having +# hundreds of checks a piece. Haar Cascades run fast because later stages are +# only evaluated if previous stages pass. Additionally, your OpenMV Cam uses +# a data structure called the integral image to quickly execute each area +# contrast check in constant time (the reason for feature detection being +# grayscale only is because of the space requirment for the integral image). + +import sensor, time, image + +# Reset sensor +sensor.reset() +sensor.set_framesize(sensor.QVGA) +sensor.set_pixformat(sensor.GRAYSCALE) + +# Load Haar Cascade +# By default this will use all stages, lower satges is faster but less accurate. +face_cascade = image.HaarCascade("frontalface", stages=25) +print(face_cascade) + +# FPS clock +clock = time.clock() + +while (True): + clock.tick() + + # Capture snapshot + img = sensor.snapshot() + + # Find objects. + # Note: Lower scale factor scales-down the image more and detects smaller objects. + # Higher threshold results in a higher detection rate, with more false positives. + objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25) + + # Draw objects + for r in objects: + img.draw_rectangle(r) + + # Print FPS. + # Note: Actual FPS is higher, streaming the FB makes it slower. + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/07-Face-Detection/face_recognition.py b/scripts/examples/Arduino/Nicla-Vision/07-Face-Detection/face_recognition.py new file mode 100644 index 000000000..8a514664a --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/07-Face-Detection/face_recognition.py @@ -0,0 +1,27 @@ +# Face recognition with LBP descriptors. +# See Timo Ahonen's "Face Recognition with Local Binary Patterns". +# +# Before running the example: +# 1) Download the AT&T faces database http://www.cl.cam.ac.uk/Research/DTG/attarchive/pub/data/att_faces.zip +# 2) Exract and copy the orl_faces directory to the SD card root. +# +# NOTE: This is just a PoC implementation of the paper mentioned above, it does Not work well in real life conditions. + +import sensor, time, image + +SUB = "s2" +NUM_SUBJECTS = 5 +NUM_SUBJECTS_IMGS = 10 + +img = image.Image("orl_faces/%s/1.pgm"%(SUB)).mask_ellipse() +d0 = img.find_lbp((0, 0, img.width(), img.height())) +img = None + +print("") +for s in range(1, NUM_SUBJECTS+1): + dist = 0 + for i in range(2, NUM_SUBJECTS_IMGS+1): + img = image.Image("orl_faces/s%d/%d.pgm"%(s, i)).mask_ellipse() + d1 = img.find_lbp((0, 0, img.width(), img.height())) + dist += image.match_descriptor(d0, d1) + print("Average dist for subject %d: %d"%(s, dist/NUM_SUBJECTS_IMGS)) diff --git a/scripts/examples/Arduino/Nicla-Vision/07-Face-Detection/face_tracking.py b/scripts/examples/Arduino/Nicla-Vision/07-Face-Detection/face_tracking.py new file mode 100644 index 000000000..c3bc6e481 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/07-Face-Detection/face_tracking.py @@ -0,0 +1,63 @@ +# Face Tracking Example +# +# This example shows off using the keypoints feature of your OpenMV Cam to track +# a face after it has been detected by a Haar Cascade. The first part of this +# script finds a face in the image using the frontalface Haar Cascade. +# After which the script uses the keypoints feature to automatically learn your +# face and track it. Keypoints can be used to automatically track anything. +import sensor, time, image + +# Reset sensor +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) + +# Load Haar Cascade +# By default this will use all stages, lower satges is faster but less accurate. +face_cascade = image.HaarCascade("frontalface", stages=25) +print(face_cascade) + +# First set of keypoints +kpts1 = None + +# Find a face! +while (kpts1 == None): + img = sensor.snapshot() + img.draw_string(0, 0, "Looking for a face...") + # Find faces + objects = img.find_features(face_cascade, threshold=0.5, scale=1.25) + if objects: + # Expand the ROI by 31 pixels in every direction + face = (objects[0][0]-31, objects[0][1]-31,objects[0][2]+31*2, objects[0][3]+31*2) + # Extract keypoints using the detect face size as the ROI + kpts1 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, roi=face) + # Draw a rectangle around the first face + img.draw_rectangle(objects[0]) + +# Draw keypoints +print(kpts1) +img.draw_keypoints(kpts1, size=24) +img = sensor.snapshot() +time.sleep_ms(2000) + +# FPS clock +clock = time.clock() + +while (True): + clock.tick() + img = sensor.snapshot() + # Extract keypoints from the whole frame + kpts2 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, normalized=True) + + if (kpts2): + # Match the first set of keypoints with the second one + c=image.match_descriptor(kpts1, kpts2, threshold=85) + match = c[6] # C[6] contains the number of matches. + if (match>5): + img.draw_rectangle(c[2:6]) + img.draw_cross(c[0], c[1], size=10) + print(kpts2, "matched:%d dt:%d"%(match, c[7])) + + # Draw FPS + img.draw_string(0, 0, "FPS:%.2f"%(clock.fps())) diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/edges.py b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/edges.py new file mode 100644 index 000000000..6faa6651f --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/edges.py @@ -0,0 +1,19 @@ +# Edge detection with Canny: +# +# This example demonstrates the Canny edge detector. +import sensor, image, time + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. + +clock = time.clock() # Tracks FPS. +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + # Use Canny edge detector + img.find_edges(image.EDGE_CANNY, threshold=(50, 80)) + # Faster simpler edge detection + #img.find_edges(image.EDGE_SIMPLE, threshold=(100, 255)) + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_circles.py b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_circles.py new file mode 100644 index 000000000..a208b4a5b --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_circles.py @@ -0,0 +1,39 @@ +# Find Circles Example +# +# This example shows off how to find circles in the image using the Hough +# Transform. https://en.wikipedia.org/wiki/Circle_Hough_Transform +# +# Note that the find_circles() method will only find circles which are completely +# inside of the image. Circles which go outside of the image/roi are ignored... + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) # grayscale is faster +sensor.set_framesize(sensor.QQVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + img = sensor.snapshot().lens_corr(1.8) + + # Circle objects have four values: x, y, r (radius), and magnitude. The + # magnitude is the strength of the detection of the circle. Higher is + # better... + + # `threshold` controls how many circles are found. Increase its value + # to decrease the number of circles detected... + + # `x_margin`, `y_margin`, and `r_margin` control the merging of similar + # circles in the x, y, and r (radius) directions. + + # r_min, r_max, and r_step control what radiuses of circles are tested. + # Shrinking the number of tested circle radiuses yields a big performance boost. + + for c in img.find_circles(threshold = 2000, x_margin = 10, y_margin = 10, r_margin = 10, + r_min = 2, r_max = 100, r_step = 2): + img.draw_circle(c.x(), c.y(), c.r(), color = (255, 0, 0)) + print(c) + + print("FPS %f" % clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_line_segments.py b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_line_segments.py new file mode 100644 index 000000000..4aa42cf17 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_line_segments.py @@ -0,0 +1,39 @@ +# Find Line Segments Example +# +# This example shows off how to find line segments in the image. For each line object +# found in the image a line object is returned which includes the line's rotation. + +# find_line_segments() finds finite length lines (but is slow). +# Use find_line_segments() to find non-infinite lines (and is fast). + +enable_lens_corr = False # turn on for straighter lines... + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) # grayscale is faster +sensor.set_framesize(sensor.QQVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +# All lines also have `x1()`, `y1()`, `x2()`, and `y2()` methods to get their end-points +# and a `line()` method to get all the above as one 4 value tuple for `draw_line()`. + +while(True): + clock.tick() + img = sensor.snapshot() + if enable_lens_corr: img.lens_corr(1.8) # for 2.8mm lens... + + # `merge_distance` controls the merging of nearby lines. At 0 (the default), no + # merging is done. At 1, any line 1 pixel away from another is merged... and so + # on as you increase this value. You may wish to merge lines as line segment + # detection produces a lot of line segment results. + + # `max_theta_diff` controls the maximum amount of rotation difference between + # any two lines about to be merged. The default setting allows for 15 degrees. + + for l in img.find_line_segments(merge_distance = 0, max_theta_diff = 5): + img.draw_line(l.line(), color = (255, 0, 0)) + # print(l) + + print("FPS %f" % clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_lines.py b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_lines.py new file mode 100644 index 000000000..6c45fcfbc --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_lines.py @@ -0,0 +1,57 @@ +# Find Lines Example +# +# This example shows off how to find lines in the image. For each line object +# found in the image a line object is returned which includes the line's rotation. + +# Note: Line detection is done by using the Hough Transform: +# http://en.wikipedia.org/wiki/Hough_transform +# Please read about it above for more information on what `theta` and `rho` are. + +# find_lines() finds infinite length lines. Use find_line_segments() to find non-infinite lines. + +enable_lens_corr = False # turn on for straighter lines... + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) # grayscale is faster +sensor.set_framesize(sensor.QQVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +# All line objects have a `theta()` method to get their rotation angle in degrees. +# You can filter lines based on their rotation angle. + +min_degree = 0 +max_degree = 179 + +# All lines also have `x1()`, `y1()`, `x2()`, and `y2()` methods to get their end-points +# and a `line()` method to get all the above as one 4 value tuple for `draw_line()`. + +while(True): + clock.tick() + img = sensor.snapshot() + if enable_lens_corr: img.lens_corr(1.8) # for 2.8mm lens... + + # `threshold` controls how many lines in the image are found. Only lines with + # edge difference magnitude sums greater than `threshold` are detected... + + # More about `threshold` - each pixel in the image contributes a magnitude value + # to a line. The sum of all contributions is the magintude for that line. Then + # when lines are merged their magnitudes are added togheter. Note that `threshold` + # filters out lines with low magnitudes before merging. To see the magnitude of + # un-merged lines set `theta_margin` and `rho_margin` to 0... + + # `theta_margin` and `rho_margin` control merging similar lines. If two lines + # theta and rho value differences are less than the margins then they are merged. + + for l in img.find_lines(threshold = 1000, theta_margin = 25, rho_margin = 25): + if (min_degree <= l.theta()) and (l.theta() <= max_degree): + img.draw_line(l.line(), color = (255, 0, 0)) + # print(l) + + print("FPS %f" % clock.fps()) + +# About negative rho values: +# +# A [theta+0:-rho] tuple is the same as [theta+180:+rho]. diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_rects.py b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_rects.py new file mode 100644 index 000000000..5fafba626 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_rects.py @@ -0,0 +1,31 @@ +# Find Rects Example +# +# This example shows off how to find rectangles in the image using the quad threshold +# detection code from our April Tags code. The quad threshold detection algorithm +# detects rectangles in an extremely robust way and is much better than Hough +# Transform based methods. For example, it can still detect rectangles even when lens +# distortion causes those rectangles to look bent. Rounded rectangles are no problem! +# (But, given this the code will also detect small radius circles too)... + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) # grayscale is faster (160x120 max on OpenMV-M7) +sensor.set_framesize(sensor.QQVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + img = sensor.snapshot() + + # `threshold` below should be set to a high enough value to filter out noise + # rectangles detected in the image which have low edge magnitudes. Rectangles + # have larger edge magnitudes the larger and more contrasty they are... + + for r in img.find_rects(threshold = 10000): + img.draw_rectangle(r.rect(), color = (255, 0, 0)) + for p in r.corners(): img.draw_circle(p[0], p[1], 5, color = (0, 255, 0)) + print(r) + + print("FPS %f" % clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/hog.py b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/hog.py new file mode 100644 index 000000000..32fa2c5ab --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/hog.py @@ -0,0 +1,25 @@ +# Histogram of Oriented Gradients (HoG) Example +# +# This example demonstrates HoG visualization. +# +# Note: Due to JPEG artifacts, the HoG visualization looks blurry. To see the +# image without JPEG artifacts, uncomment the lines that save the image to uSD. + +import sensor, image, time + +sensor.reset() +sensor.set_framesize(sensor.QVGA) +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.skip_frames(time = 2000) + +clock = time.clock() # Tracks FPS. +while (True): + clock.tick() + img = sensor.snapshot() + img.find_hog() + + # Uncomment to save raw FB to file and exit the loop + #img.save("/hog.pgm") + #break + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/keypoints.py b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/keypoints.py new file mode 100644 index 000000000..1d2ef1be6 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/keypoints.py @@ -0,0 +1,51 @@ +# Object tracking with keypoints example. +# Show the camera an object and then run the script. A set of keypoints will be extracted +# once and then tracked in the following frames. If you want a new set of keypoints re-run +# the script. NOTE: see the docs for arguments to tune find_keypoints and match_keypoints. +import sensor, time, image + +# Reset sensor +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) + +def draw_keypoints(img, kpts): + if kpts: + print(kpts) + img.draw_keypoints(kpts) + img = sensor.snapshot() + time.sleep_ms(1000) + +kpts1 = None +# NOTE: uncomment to load a keypoints descriptor from file +#kpts1 = image.load_descriptor("/desc.orb") +#img = sensor.snapshot() +#draw_keypoints(img, kpts1) + +clock = time.clock() +while (True): + clock.tick() + img = sensor.snapshot() + if (kpts1 == None): + # NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid. + kpts1 = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2) + draw_keypoints(img, kpts1) + else: + # NOTE: When extracting keypoints to match the first descriptor, we use normalized=True to extract + # keypoints from the first scale only, which will match one of the scales in the first descriptor. + kpts2 = img.find_keypoints(max_keypoints=150, threshold=10, normalized=True) + if (kpts2): + match = image.match_descriptor(kpts1, kpts2, threshold=85) + if (match.count()>10): + # If we have at least n "good matches" + # Draw bounding rectangle and cross. + img.draw_rectangle(match.rect()) + img.draw_cross(match.cx(), match.cy(), size=10) + + print(kpts2, "matched:%d dt:%d"%(match.count(), match.theta())) + # NOTE: uncomment if you want to draw the keypoints + #img.draw_keypoints(kpts2, size=KEYPOINTS_SIZE, matched=True) + + # Draw FPS + img.draw_string(0, 0, "FPS:%.2f"%(clock.fps())) diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/keypoints_save.py b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/keypoints_save.py new file mode 100644 index 000000000..f5436f79d --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/keypoints_save.py @@ -0,0 +1,30 @@ +# Keypoints descriptor example. +# This example shows how to save a keypoints descriptor to file. Show the camera an object +# and then run the script. The script will extract and save a keypoints descriptor and the image. +# You can use the keypoints_editor.py util to remove unwanted keypoints. +# +# NOTE: Please reset the camera after running this script to see the new file. +import sensor, time, image + +# Reset sensor +sensor.reset() +sensor.set_framesize(sensor.QVGA) +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.skip_frames(time = 2000) + +FILE_NAME = "desc" +img = sensor.snapshot() +# NOTE: See the docs for other arguments +# NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid. +kpts = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2) + +if (kpts == None): + raise(Exception("Couldn't find any keypoints!")) + +image.save_descriptor(kpts, "/%s.orb"%(FILE_NAME)) +img.save("/%s.pgm"%(FILE_NAME)) + +img.draw_keypoints(kpts) +sensor.snapshot() +time.sleep_ms(1000) +raise(Exception("Done! Please reset the camera")) diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/lbp.py b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/lbp.py new file mode 100644 index 000000000..3894285c5 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/lbp.py @@ -0,0 +1,49 @@ +# Local Binary Patterns (LBP) Example +# +# This example shows off how to use the local binary pattern feature descriptor +# on your OpenMV Cam. LBP descriptors work like Freak feature descriptors. +# +# WARNING: LBP supports needs to be reworked! As of right now this feature needs +# a lot of work to be made into somethin useful. This script will reamin to show +# that the functionality exists, but, in its current state is inadequate. + +import sensor, time, image +sensor.reset() + +# Reset sensor +sensor.reset() +sensor.set_framesize(sensor.HQVGA) +sensor.set_pixformat(sensor.GRAYSCALE) + +# Load Haar Cascade +# By default this will use all stages, lower satges is faster but less accurate. +face_cascade = image.HaarCascade("frontalface", stages=25) +print(face_cascade) + +# Skip a few frames to allow the sensor settle down +# Note: This takes more time when exec from the IDE. +for i in range(0, 30): + img = sensor.snapshot() + img.draw_string(0, 0, "Please wait...") + +d0 = None +#d0 = image.load_descriptor("/desc.lbp") +clock = time.clock() + +while (True): + clock.tick() + img = sensor.snapshot() + + objects = img.find_features(face_cascade, threshold=0.5, scale_factor=1.25) + if objects: + face = objects[0] + d1 = img.find_lbp(face) + if (d0 == None): + d0 = d1 + else: + dist = image.match_descriptor(d0, d1) + img.draw_string(0, 10, "Match %d%%"%(dist)) + + img.draw_rectangle(face) + # Draw FPS + img.draw_string(0, 0, "FPS:%.2f"%(clock.fps())) diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/linear_regression_fast.py b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/linear_regression_fast.py new file mode 100644 index 000000000..f200e4ace --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/linear_regression_fast.py @@ -0,0 +1,43 @@ +# Fast Linear Regression Example +# +# This example shows off how to use the get_regression() method on your OpenMV Cam +# to get the linear regression of a ROI. Using this method you can easily build +# a robot which can track lines which all point in the same general direction +# but are not actually connected. Use find_blobs() on lines that are nicely +# connected for better filtering options and control. +# +# This is called the fast linear regression because we use the least-squares +# method to fit the line. However, this method is NOT GOOD FOR ANY images that +# have a lot (or really any) outlier points which corrupt the line fit... + +THRESHOLD = (0, 100) # Grayscale threshold for dark things... +BINARY_VISIBLE = True # Does binary first so you can see what the linear regression + # is being run on... might lower FPS though. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.QQVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + img = sensor.snapshot().binary([THRESHOLD]) if BINARY_VISIBLE else sensor.snapshot() + + # Returns a line object similar to line objects returned by find_lines() and + # find_line_segments(). You have x1(), y1(), x2(), y2(), length(), + # theta() (rotation in degrees), rho(), and magnitude(). + # + # magnitude() represents how well the linear regression worked. It goes from + # (0, INF] where 0 is returned for a circle. The more linear the + # scene is the higher the magnitude. + line = img.get_regression([(255,255) if BINARY_VISIBLE else THRESHOLD]) + + if (line): img.draw_line(line.line(), color = 127) + print("FPS %f, mag = %s" % (clock.fps(), str(line.magnitude()) if (line) else "N/A")) + +# About negative rho values: +# +# A [theta+0:-rho] tuple is the same as [theta+180:+rho]. diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/linear_regression_robust.py b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/linear_regression_robust.py new file mode 100644 index 000000000..9f24c618d --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/linear_regression_robust.py @@ -0,0 +1,45 @@ +# Robust Linear Regression Example +# +# This example shows off how to use the get_regression() method on your OpenMV Cam +# to get the linear regression of a ROI. Using this method you can easily build +# a robot which can track lines which all point in the same general direction +# but are not actually connected. Use find_blobs() on lines that are nicely +# connected for better filtering options and control. +# +# We're using the robust=True argument for get_regression() in this script which +# computes the linear regression using a much more robust algorithm... but potentially +# much slower. The robust algorithm runs in O(N^2) time on the image. So, YOU NEED +# TO LIMIT THE NUMBER OF PIXELS the robust algorithm works on or it can actually +# take seconds for the algorithm to give you a result... THRESHOLD VERY CAREFULLY! + +THRESHOLD = (0, 100) # Grayscale threshold for dark things... +BINARY_VISIBLE = True # Does binary first so you can see what the linear regression + # is being run on... might lower FPS though. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.QQQVGA) # 80x60 (4,800 pixels) - O(N^2) max = 2,3040,000. +sensor.skip_frames(time = 2000) # WARNING: If you use QQVGA it may take seconds +clock = time.clock() # to process a frame sometimes. + +while(True): + clock.tick() + img = sensor.snapshot().binary([THRESHOLD]) if BINARY_VISIBLE else sensor.snapshot() + + # Returns a line object similar to line objects returned by find_lines() and + # find_line_segments(). You have x1(), y1(), x2(), y2(), length(), + # theta() (rotation in degrees), rho(), and magnitude(). + # + # magnitude() represents how well the linear regression worked. It means something + # different for the robust linear regression. In general, the larger the value the + # better... + line = img.get_regression([(255,255) if BINARY_VISIBLE else THRESHOLD], robust = True) + + if (line): img.draw_line(line.line(), color = 127) + print("FPS %f, mag = %s" % (clock.fps(), str(line.magnitude()) if (line) else "N/A")) + +# About negative rho values: +# +# A [theta+0:-rho] tuple is the same as [theta+180:+rho]. diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/template_matching.py b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/template_matching.py new file mode 100644 index 000000000..e33f760bf --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/template_matching.py @@ -0,0 +1,44 @@ +# Template Matching Example - Normalized Cross Correlation (NCC) +# +# This example shows off how to use the NCC feature of your OpenMV Cam to match +# image patches to parts of an image... expect for extremely controlled enviorments +# NCC is not all to useful. +# +# WARNING: NCC supports needs to be reworked! As of right now this feature needs +# a lot of work to be made into somethin useful. This script will reamin to show +# that the functionality exists, but, in its current state is inadequate. + +import time, sensor, image +from image import SEARCH_EX, SEARCH_DS + +# Reset sensor +sensor.reset() +# Max resolution for template matching with SEARCH_EX is QQVGA +sensor.set_framesize(sensor.QQVGA) +# You can set windowing to reduce the search image. +#sensor.set_windowing(((640-80)//2, (480-60)//2, 80, 60)) +sensor.set_pixformat(sensor.GRAYSCALE) + +# Load template. +# Template should be a small (eg. 32x32 pixels) grayscale image. +template = image.Image("/template.pgm") + +clock = time.clock() + +# Run template matching +while (True): + clock.tick() + img = sensor.snapshot() + + # find_template(template, threshold, [roi, step, search]) + # ROI: The region of interest tuple (x, y, w, h). + # Step: The loop step used (y+=step, x+=step) use a bigger step to make it faster. + # Search is either image.SEARCH_EX for exhaustive search or image.SEARCH_DS for diamond search + # + # Note1: ROI has to be smaller than the image and bigger than the template. + # Note2: In diamond search, step and ROI are both ignored. + r = img.find_template(template, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) + if r: + img.draw_rectangle(r) + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/16-Codes/find_barcodes.py b/scripts/examples/Arduino/Nicla-Vision/16-Codes/find_barcodes.py new file mode 100644 index 000000000..241b5560a --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/16-Codes/find_barcodes.py @@ -0,0 +1,63 @@ +# Barcode Example +# +# This example shows off how easy it is to detect bar codes using the +# OpenMV Cam M7. Barcode detection does not work on the M4 Camera. + +import sensor, image, time, math + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.VGA) +sensor.set_windowing((640, 80)) # V Res of 80 == less work (40 for 2X the speed). +sensor.skip_frames(time = 2000) +clock = time.clock() + +# Barcode detection can run at the full 640x480 resolution of your OpenMV Cam's +# OV7725 camera module. Barcode detection will also work in RGB565 mode but at +# a lower resolution. That said, barcode detection requires a higher resolution +# to work well so it should always be run at 640x480 in grayscale... + +def barcode_name(code): + if(code.type() == image.EAN2): + return "EAN2" + if(code.type() == image.EAN5): + return "EAN5" + if(code.type() == image.EAN8): + return "EAN8" + if(code.type() == image.UPCE): + return "UPCE" + if(code.type() == image.ISBN10): + return "ISBN10" + if(code.type() == image.UPCA): + return "UPCA" + if(code.type() == image.EAN13): + return "EAN13" + if(code.type() == image.ISBN13): + return "ISBN13" + if(code.type() == image.I25): + return "I25" + if(code.type() == image.DATABAR): + return "DATABAR" + if(code.type() == image.DATABAR_EXP): + return "DATABAR_EXP" + if(code.type() == image.CODABAR): + return "CODABAR" + if(code.type() == image.CODE39): + return "CODE39" + if(code.type() == image.PDF417): + return "PDF417" + if(code.type() == image.CODE93): + return "CODE93" + if(code.type() == image.CODE128): + return "CODE128" + +while(True): + clock.tick() + img = sensor.snapshot() + codes = img.find_barcodes() + for code in codes: + img.draw_rectangle(code.rect()) + print_args = (barcode_name(code), code.payload(), (180 * code.rotation()) / math.pi, code.quality(), clock.fps()) + print("Barcode %s, Payload \"%s\", rotation %f (degrees), quality %d, FPS %f" % print_args) + if not codes: + print("FPS %f" % clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/16-Codes/find_datamatrices.py b/scripts/examples/Arduino/Nicla-Vision/16-Codes/find_datamatrices.py new file mode 100644 index 000000000..5a6cd8714 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/16-Codes/find_datamatrices.py @@ -0,0 +1,25 @@ +# Find Data Matrices Example +# +# This example shows off how easy it is to detect data matrices using the +# OpenMV Cam M7. Data matrices detection does not work on the M4 Camera. + +import sensor, image, time, math + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + img = sensor.snapshot() + img.lens_corr(1.8) # strength of 1.8 is good for the 2.8mm lens. + + matrices = img.find_datamatrices() + for matrix in matrices: + img.draw_rectangle(matrix.rect(), color = (255, 0, 0)) + print_args = (matrix.rows(), matrix.columns(), matrix.payload(), (180 * matrix.rotation()) / math.pi, clock.fps()) + print("Matrix [%d:%d], Payload \"%s\", rotation %f (degrees), FPS %f" % print_args) + if not matrices: + print("FPS %f" % clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/16-Codes/find_datamatrices_w_lens_zoom.py b/scripts/examples/Arduino/Nicla-Vision/16-Codes/find_datamatrices_w_lens_zoom.py new file mode 100644 index 000000000..bce6bc8d3 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/16-Codes/find_datamatrices_w_lens_zoom.py @@ -0,0 +1,25 @@ +# Find Data Matrices w/ Lens Zoom Example +# +# This example shows off how easy it is to detect data matrices using the +# OpenMV Cam M7. Data matrices detection does not work on the M4 Camera. + +import sensor, image, time, math + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.VGA) +sensor.set_windowing((320, 240)) # 2x Zoom +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + img = sensor.snapshot() + + matrices = img.find_datamatrices() + for matrix in matrices: + img.draw_rectangle(matrix.rect(), color = (255, 0, 0)) + print_args = (matrix.rows(), matrix.columns(), matrix.payload(), (180 * matrix.rotation()) / math.pi, clock.fps()) + print("Matrix [%d:%d], Payload \"%s\", rotation %f (degrees), FPS %f" % print_args) + if not matrices: + print("FPS %f" % clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/16-Codes/qrcodes_with_lens_corr.py b/scripts/examples/Arduino/Nicla-Vision/16-Codes/qrcodes_with_lens_corr.py new file mode 100644 index 000000000..55899e509 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/16-Codes/qrcodes_with_lens_corr.py @@ -0,0 +1,21 @@ +# QRCode Example +# +# This example shows the power of the OpenMV Cam to detect QR Codes +# using lens correction (see the qrcodes_with_lens_corr.py script for higher performance). + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + img = sensor.snapshot() + img.lens_corr(1.8) # strength of 1.8 is good for the 2.8mm lens. + for code in img.find_qrcodes(): + img.draw_rectangle(code.rect(), color = (255, 0, 0)) + print(code) + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/16-Codes/qrcodes_with_lens_zoom.py b/scripts/examples/Arduino/Nicla-Vision/16-Codes/qrcodes_with_lens_zoom.py new file mode 100644 index 000000000..5fbb51d2a --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/16-Codes/qrcodes_with_lens_zoom.py @@ -0,0 +1,21 @@ +# QRCode Example +# +# This example shows the power of the OpenMV Cam to detect QR Codes +# without needing lens correction. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.VGA) +sensor.set_windowing((240, 240)) # look at center 240x240 pixels of the VGA resolution. +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + img = sensor.snapshot() + for code in img.find_qrcodes(): + img.draw_rectangle(code.rect(), color = 127) + print(code) + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/19-Low-Power/deep_sleep.py b/scripts/examples/Arduino/Nicla-Vision/19-Low-Power/deep_sleep.py new file mode 100644 index 000000000..5dd595604 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/19-Low-Power/deep_sleep.py @@ -0,0 +1,26 @@ +# Deep Sleep Mode Example +# This example demonstrates the low-power deep sleep mode plus sensor shutdown. +# Note the camera will reset after wake-up from deep sleep. To find out if the cause of reset +# is deep sleep, call the machine.reset_cause() function and test for machine.DEEPSLEEP_RESET +import pyb, machine, sensor + +# Create and init RTC object. +rtc = pyb.RTC() + +# (year, month, day[, hour[, minute[, second[, microsecond[, tzinfo]]]]]) +rtc.datetime((2014, 5, 1, 4, 13, 0, 0, 0)) + +# Print RTC info. +print(rtc.datetime()) + +sensor.reset() + +# Shutdown the sensor (pulls PWDN high). +sensor.shutdown(True) + +# Enable RTC interrupts every 30 seconds. +# Note the camera will RESET after wakeup from Deepsleep Mode. +rtc.wakeup(30000) + +# Enter Deepsleep Mode. +machine.deepsleep() diff --git a/scripts/examples/Arduino/Nicla-Vision/19-Low-Power/extint_wakeup.py b/scripts/examples/Arduino/Nicla-Vision/19-Low-Power/extint_wakeup.py new file mode 100644 index 000000000..d94a8118d --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/19-Low-Power/extint_wakeup.py @@ -0,0 +1,21 @@ +# ExtInt Wake-Up from Stop Mode Example +# This example demonstrates using external interrupts to wake up from low-power mode. + +import time, pyb, machine +from pyb import Pin, ExtInt + +def callback(line): + pass + +led = pyb.LED(3) +pin = Pin("GPIO0", Pin.IN, Pin.PULL_UP) +ext = ExtInt(pin, ExtInt.IRQ_FALLING, Pin.PULL_UP, callback) + +# Enter Stop Mode. Note the IDE will disconnect. +machine.sleep() + +while (True): + led.on() + time.sleep_ms(100) + led.off() + time.sleep_ms(100) diff --git a/scripts/examples/Arduino/Nicla-Vision/19-Low-Power/stop_mode.py b/scripts/examples/Arduino/Nicla-Vision/19-Low-Power/stop_mode.py new file mode 100644 index 000000000..7c3c4abdc --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/19-Low-Power/stop_mode.py @@ -0,0 +1,19 @@ +# Stop Mode Example +# This example demonstrates using the low-power Stop Mode. + +import time, pyb, machine + +# Create and init RTC object. +rtc = pyb.RTC() +# (year, month, day[, hour[, minute[, second[, microsecond[, tzinfo]]]]]) +rtc.datetime((2014, 5, 1, 4, 13, 0, 0, 0)) + +# Print RTC info. +print(rtc.datetime()) + +# Enable RTC interrupts every 5 seconds. +rtc.wakeup(5000) + +# Enter Stop Mode. +# Note the IDE will disconnect. +machine.sleep() diff --git a/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/in_memory_basic_frame_differencing.py b/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/in_memory_basic_frame_differencing.py new file mode 100644 index 000000000..138955f86 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/in_memory_basic_frame_differencing.py @@ -0,0 +1,46 @@ +# In Memory Basic Frame Differencing Example +# +# This example demonstrates using frame differencing with your OpenMV Cam. It's +# called basic frame differencing because there's no background image update. +# So, as time passes the background image may change resulting in issues. + +import sensor, image, pyb, os, time + +TRIGGER_THRESHOLD = 5 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +# Take from the main frame buffer's RAM to allocate a second frame buffer. +# There's a lot more RAM in the frame buffer than in the MicroPython heap. +# However, after doing this you have a lot less RAM for some algorithms... +# So, be aware that it's a lot easier to get out of RAM issues now. However, +# frame differencing doesn't use a lot of the extra space in the frame buffer. +# But, things like AprilTags do and won't work if you do this... +extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) + +print("About to save background image...") +sensor.skip_frames(time = 2000) # Give the user time to get ready. +extra_fb.replace(sensor.snapshot()) +print("Saved background image - Now frame differencing!") + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # Replace the image with the "abs(NEW-OLD)" frame difference. + img.difference(extra_fb) + + hist = img.get_histogram() + # This code below works by comparing the 99th percentile value (e.g. the + # non-outlier max value against the 90th percentile value (e.g. a non-max + # value. The difference between the two values will grow as the difference + # image seems more pixels change. + diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value() + triggered = diff > TRIGGER_THRESHOLD + + print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/in_memory_structural_similarity.py b/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/in_memory_structural_similarity.py new file mode 100644 index 000000000..53a32c4ff --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/in_memory_structural_similarity.py @@ -0,0 +1,38 @@ +# Structural Similarity (SSIM) Example +# +# This example shows off how to use the SSIM algorithm on your OpenMV Cam +# to detect differences between two images. The SSIM algorithm compares +# 8x8 blocks of pixels between two images to determine a similarity +# score between two images. + +import sensor, image, pyb, os, time + +# The image has likely changed if the sim.min() is lower than this. +MIN_TRIGGER_THRESHOLD = -0.4 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +# Take from the main frame buffer's RAM to allocate a second frame buffer. +# There's a lot more RAM in the frame buffer than in the MicroPython heap. +# However, after doing this you have a lot less RAM for some algorithms... +# So, be aware that it's a lot easier to get out of RAM issues now. However, +# frame differencing doesn't use a lot of the extra space in the frame buffer. +# But, things like AprilTags do and won't work if you do this... +extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) + +print("About to save background image...") +sensor.skip_frames(time = 2000) # Give the user time to get ready. +extra_fb.replace(sensor.snapshot()) +print("Saved background image!") + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + sim = img.get_similarity(extra_fb) + change = "- Change -" if sim.min() < MIN_TRIGGER_THRESHOLD else "- No Change -" + + print(clock.fps(), change, sim) diff --git a/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/on_disk_basic_frame_differencing.py b/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/on_disk_basic_frame_differencing.py new file mode 100644 index 000000000..a168a0c50 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/on_disk_basic_frame_differencing.py @@ -0,0 +1,42 @@ +# Basic Frame Differencing Example +# +# Note: You will need an SD card to run this example. +# +# This example demonstrates using frame differencing with your OpenMV Cam. It's +# called basic frame differencing because there's no background image update. +# So, as time passes the background image may change resulting in issues. + +import sensor, image, pyb, os, time + +TRIGGER_THRESHOLD = 5 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory + +print("About to save background image...") +sensor.skip_frames(time = 2000) # Give the user time to get ready. +sensor.snapshot().save("temp/bg.bmp") +print("Saved background image - Now frame differencing!") + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # Replace the image with the "abs(NEW-OLD)" frame difference. + img.difference("temp/bg.bmp") + + hist = img.get_histogram() + # This code below works by comparing the 99th percentile value (e.g. the + # non-outlier max value against the 90th percentile value (e.g. a non-max + # value. The difference between the two values will grow as the difference + # image seems more pixels change. + diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value() + triggered = diff > TRIGGER_THRESHOLD + + print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while + # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/on_disk_structural_similarity.py b/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/on_disk_structural_similarity.py new file mode 100644 index 000000000..bff55ecc7 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/on_disk_structural_similarity.py @@ -0,0 +1,34 @@ +# Structural Similarity (SSIM) Example +# +# Note: You will need an SD card to run this example. +# +# This example shows off how to use the SSIM algorithm on your OpenMV Cam +# to detect differences between two images. The SSIM algorithm compares +# 8x8 blocks of pixels between two images to determine a similarity +# score between two images. + +import sensor, image, pyb, os, time + +# The image has likely changed if the sim.min() is lower than this. +MIN_TRIGGER_THRESHOLD = -0.4 + +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time = 2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. + +if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory + +print("About to save background image...") +sensor.skip_frames(time = 2000) # Give the user time to get ready. +sensor.snapshot().save("temp/bg.bmp") +print("Saved background image!") + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + sim = img.get_similarity("temp/bg.bmp") + change = "- Change -" if sim.min() < MIN_TRIGGER_THRESHOLD else "- No Change -" + + print(clock.fps(), change, sim) diff --git a/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/absolute-rotation-scale.py b/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/absolute-rotation-scale.py new file mode 100644 index 000000000..1dafa06ee --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/absolute-rotation-scale.py @@ -0,0 +1,67 @@ +# Absolute Optical Flow Rotation/Scale +# +# This example shows off using your OpenMV Cam to measure +# rotation/scale by comparing the current and a previous +# image against each other. Note that only rotation/scale is +# handled - not X and Y translation in this mode. + +# To run this demo effectively please mount your OpenMV Cam on a steady +# base and SLOWLY rotate the camera around the lens and move the camera +# forward/backwards to see the numbers change. +# I.e. Z direction changes only. + +import sensor, image, time, math + +# NOTE!!! You have to use a small power of 2 resolution when using +# find_displacement(). This is because the algorithm is powered by +# something called phase correlation which does the image comparison +# using FFTs. A non-power of 2 resolution requires padding to a power +# of 2 which reduces the usefulness of the algorithm results. Please +# use a resolution like B64X64 or B64X32 (2x faster). + +# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, +# 128x64, and 128x128. If you want a resolution of 32x32 you can create +# it by doing "img.pool(2, 2)" on a 64x64 image. + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +# Take from the main frame buffer's RAM to allocate a second frame buffer. +# There's a lot more RAM in the frame buffer than in the MicroPython heap. +# However, after doing this you have a lot less RAM for some algorithms... +# So, be aware that it's a lot easier to get out of RAM issues now. +extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) +extra_fb.replace(sensor.snapshot()) + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works. + # Put in a z_rotation value below and you should see the r output be equal to that. + if(0): + expected_rotation = 20.0 + img.rotation_corr(z_rotation=expected_rotation) + + # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works. + # Put in a zoom value below and you should see the z output be equal to that. + if(0): + expected_zoom = 0.8 + img.rotation_corr(zoom=expected_zoom) + + # For this example we never update the old image to measure absolute change. + displacement = extra_fb.find_displacement(img, logpolar=True) + + # Offset results are noisy without filtering so we drop some accuracy. + rotation_change = int(math.degrees(displacement.rotation()) * 5) / 5.0 + zoom_amount = displacement.scale() + + if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. + print("{0:+f}r {1:+f}z {2} {3} FPS".format(rotation_change, zoom_amount, \ + displacement.response(), + clock.fps())) + else: + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/absolute-translation.py b/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/absolute-translation.py new file mode 100644 index 000000000..f4dd2e49f --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/absolute-translation.py @@ -0,0 +1,55 @@ +# Absolute Optical Flow Translation +# +# This example shows off using your OpenMV Cam to measure translation +# in the X and Y direction by comparing the current and a previous +# image against each other. Note that only X and Y translation is +# handled - not rotation/scale in this mode. + +# To run this demo effectively please mount your OpenMV Cam on a steady +# base and SLOWLY translate it to the left, right, up, and down and +# watch the numbers change. Note that you can see displacement numbers +# up +- half of the hoizontal and vertical resolution. + +import sensor, image, time + +# NOTE!!! You have to use a small power of 2 resolution when using +# find_displacement(). This is because the algorithm is powered by +# something called phase correlation which does the image comparison +# using FFTs. A non-power of 2 resolution requires padding to a power +# of 2 which reduces the usefulness of the algorithm results. Please +# use a resolution like B64X64 or B64X32 (2x faster). + +# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, +# 128x64, and 128x128. If you want a resolution of 32x32 you can create +# it by doing "img.pool(2, 2)" on a 64x64 image. + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +# Take from the main frame buffer's RAM to allocate a second frame buffer. +# There's a lot more RAM in the frame buffer than in the MicroPython heap. +# However, after doing this you have a lot less RAM for some algorithms... +# So, be aware that it's a lot easier to get out of RAM issues now. +extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) +extra_fb.replace(sensor.snapshot()) + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # For this example we never update the old image to measure absolute change. + displacement = extra_fb.find_displacement(img) + + # Offset results are noisy without filtering so we drop some accuracy. + sub_pixel_x = int(displacement.x_translation() * 5) / 5.0 + sub_pixel_y = int(displacement.y_translation() * 5) / 5.0 + + if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. + print("{0:+f}x {1:+f}y {2} {3} FPS".format(sub_pixel_x, sub_pixel_y, + displacement.response(), + clock.fps())) + else: + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/differential-rotation-scale.py b/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/differential-rotation-scale.py new file mode 100644 index 000000000..8e1b54c64 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/differential-rotation-scale.py @@ -0,0 +1,67 @@ +# Differential Optical Flow Rotation/Scale +# +# This example shows off using your OpenMV Cam to measure +# rotation/scale by comparing the current and the previous +# image against each other. Note that only rotation/scale is +# handled - not X and Y translation in this mode. + +# To run this demo effectively please mount your OpenMV Cam on a steady +# base and SLOWLY rotate the camera around the lens and move the camera +# forward/backwards to see the numbers change. +# I.e. Z direction changes only. + +import sensor, image, time, math + +# NOTE!!! You have to use a small power of 2 resolution when using +# find_displacement(). This is because the algorithm is powered by +# something called phase correlation which does the image comparison +# using FFTs. A non-power of 2 resolution requires padding to a power +# of 2 which reduces the usefulness of the algorithm results. Please +# use a resolution like B64X64 or B64X32 (2x faster). + +# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, +# 128x64, and 128x128. If you want a resolution of 32x32 you can create +# it by doing "img.pool(2, 2)" on a 64x64 image. + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +# Take from the main frame buffer's RAM to allocate a second frame buffer. +# There's a lot more RAM in the frame buffer than in the MicroPython heap. +# However, after doing this you have a lot less RAM for some algorithms... +# So, be aware that it's a lot easier to get out of RAM issues now. +extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) +extra_fb.replace(sensor.snapshot()) + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works. + # Put in a z_rotation value below and you should see the r output be equal to that. + if(0): + expected_rotation = 20.0 + extra_fb.rotation_corr(z_rotation=(-expected_rotation)) + + # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works. + # Put in a zoom value below and you should see the z output be equal to that. + if(0): + expected_zoom = 0.8 + extra_fb.rotation_corr(zoom=(2.00-expected_zoom)) + + displacement = extra_fb.find_displacement(img, logpolar=True) + extra_fb.replace(img) + + # Offset results are noisy without filtering so we drop some accuracy. + rotation_change = int(math.degrees(displacement.rotation()) * 5) / 5.0 + zoom_amount = displacement.scale() + + if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. + print("{0:+f}r {1:+f}z {2} {3} FPS".format(rotation_change, zoom_amount, \ + displacement.response(), + clock.fps())) + else: + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/differential-translation.py b/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/differential-translation.py new file mode 100644 index 000000000..04416cf09 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/differential-translation.py @@ -0,0 +1,55 @@ +# Differential Optical Flow Translation +# +# This example shows off using your OpenMV Cam to measure translation +# in the X and Y direction by comparing the current and the previous +# image against each other. Note that only X and Y translation is +# handled - not rotation/scale in this mode. + +# To run this demo effectively please mount your OpenMV Cam on a steady +# base and QUICKLY translate it to the left, right, up, and down and +# watch the numbers change. Note that you can see displacement numbers +# up +- half of the hoizontal and vertical resolution. + +import sensor, image, time + +# NOTE!!! You have to use a small power of 2 resolution when using +# find_displacement(). This is because the algorithm is powered by +# something called phase correlation which does the image comparison +# using FFTs. A non-power of 2 resolution requires padding to a power +# of 2 which reduces the usefulness of the algorithm results. Please +# use a resolution like B64X64 or B64X32 (2x faster). + +# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, +# 128x64, and 128x128. If you want a resolution of 32x32 you can create +# it by doing "img.pool(2, 2)" on a 64x64 image. + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +# Take from the main frame buffer's RAM to allocate a second frame buffer. +# There's a lot more RAM in the frame buffer than in the MicroPython heap. +# However, after doing this you have a lot less RAM for some algorithms... +# So, be aware that it's a lot easier to get out of RAM issues now. +extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) +extra_fb.replace(sensor.snapshot()) + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + displacement = extra_fb.find_displacement(img) + extra_fb.replace(img) + + # Offset results are noisy without filtering so we drop some accuracy. + sub_pixel_x = int(displacement.x_translation() * 5) / 5.0 + sub_pixel_y = int(displacement.y_translation() * 5) / 5.0 + + if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. + print("{0:+f}x {1:+f}y {2} {3} FPS".format(sub_pixel_x, sub_pixel_y, + displacement.response(), + clock.fps())) + else: + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-absolute-rotation-scale.py b/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-absolute-rotation-scale.py new file mode 100644 index 000000000..414a105e6 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-absolute-rotation-scale.py @@ -0,0 +1,73 @@ +# Image Patches Absolute Optical Flow Rotation/Scale +# +# This example shows off using your OpenMV Cam to measure +# rotation/scale by comparing the current and a previous +# image against each other. Note that only rotation/scale is +# handled - not X and Y translation in this mode. +# +# However, this examples goes beyond doing optical flow on the whole +# image at once. Instead it breaks up the process by working on groups +# of pixels in the image. This gives you a "new" image of results. +# +# NOTE that surfaces need to have some type of "edge" on them for the +# algorithm to work. A featureless surface produces crazy results. + +# NOTE: Unless you have a very nice test rig this example is hard to see usefulness of... + +BLOCK_W = 16 # pow2 +BLOCK_H = 16 # pow2 + +# To run this demo effectively please mount your OpenMV Cam on a steady +# base and SLOWLY rotate the camera around the lens and move the camera +# forward/backwards to see the numbers change. +# I.e. Z direction changes only. + +import sensor, image, time, math + +# NOTE!!! You have to use a small power of 2 resolution when using +# find_displacement(). This is because the algorithm is powered by +# something called phase correlation which does the image comparison +# using FFTs. A non-power of 2 resolution requires padding to a power +# of 2 which reduces the usefulness of the algorithm results. Please +# use a resolution like B128X128 or B128X64 (2x faster). + +# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, +# 128x64, and 128x128. If you want a resolution of 32x32 you can create +# it by doing "img.pool(2, 2)" on a 64x64 image. + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) +sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)... +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +# Take from the main frame buffer's RAM to allocate a second frame buffer. +# There's a lot more RAM in the frame buffer than in the MicroPython heap. +# However, after doing this you have a lot less RAM for some algorithms... +# So, be aware that it's a lot easier to get out of RAM issues now. +extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) +extra_fb.replace(sensor.snapshot()) + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + for y in range(0, sensor.height(), BLOCK_H): + for x in range(0, sensor.width(), BLOCK_W): + # For this example we never update the old image to measure absolute change. + displacement = extra_fb.find_displacement(img, logpolar=True, \ + roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) + + # Below 0.1 or so (YMMV) and the results are just noise. + if(displacement.response() > 0.1): + rotation_change = displacement.rotation() + zoom_amount = displacement.scale() + pixel_x = x + (BLOCK_W//2) + int(math.sin(rotation_change) * zoom_amount * (BLOCK_W//4)) + pixel_y = y + (BLOCK_H//2) + int(math.cos(rotation_change) * zoom_amount * (BLOCK_H//4)) + img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ + color = 255) + else: + img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \ + color = 0) + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-absolute-translation.py b/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-absolute-translation.py new file mode 100644 index 000000000..0bfae8ca6 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-absolute-translation.py @@ -0,0 +1,69 @@ +# Image Patches Absolute Optical Flow Translation +# +# This example shows off using your OpenMV Cam to measure translation +# in the X and Y direction by comparing the current and a previous +# image against each other. Note that only X and Y translation is +# handled - not rotation/scale in this mode. +# +# However, this examples goes beyond doing optical flow on the whole +# image at once. Instead it breaks up the process by working on groups +# of pixels in the image. This gives you a "new" image of results. +# +# NOTE that surfaces need to have some type of "edge" on them for the +# algorithm to work. A featureless surface produces crazy results. + +BLOCK_W = 16 # pow2 +BLOCK_H = 16 # pow2 + +# To run this demo effectively please mount your OpenMV Cam on a steady +# base and SLOWLY translate it to the left, right, up, and down and +# watch the numbers change. Note that you can see displacement numbers +# up +- half of the hoizontal and vertical resolution. + +import sensor, image, time + +# NOTE!!! You have to use a small power of 2 resolution when using +# find_displacement(). This is because the algorithm is powered by +# something called phase correlation which does the image comparison +# using FFTs. A non-power of 2 resolution requires padding to a power +# of 2 which reduces the usefulness of the algorithm results. Please +# use a resolution like B128X128 or B128X64 (2x faster). + +# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, +# 128x64, and 128x128. If you want a resolution of 32x32 you can create +# it by doing "img.pool(2, 2)" on a 64x64 image. + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) +sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)... +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +# Take from the main frame buffer's RAM to allocate a second frame buffer. +# There's a lot more RAM in the frame buffer than in the MicroPython heap. +# However, after doing this you have a lot less RAM for some algorithms... +# So, be aware that it's a lot easier to get out of RAM issues now. +extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) +extra_fb.replace(sensor.snapshot()) + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + for y in range(0, sensor.height(), BLOCK_H): + for x in range(0, sensor.width(), BLOCK_W): + # For this example we never update the old image to measure absolute change. + displacement = extra_fb.find_displacement(img, \ + roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) + + # Below 0.1 or so (YMMV) and the results are just noise. + if(displacement.response() > 0.1): + pixel_x = x + (BLOCK_W//2) + int(displacement.x_translation()) + pixel_y = y + (BLOCK_H//2) + int(displacement.y_translation()) + img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ + color = 255) + else: + img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \ + color = 0) + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-differential-rotation-scale.py b/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-differential-rotation-scale.py new file mode 100644 index 000000000..bb1bc2eea --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-differential-rotation-scale.py @@ -0,0 +1,73 @@ +# Image Patches Differential Optical Flow Rotation/Scale +# +# This example shows off using your OpenMV Cam to measure +# rotation/scale by comparing the current and the previous +# image against each other. Note that only rotation/scale is +# handled - not X and Y translation in this mode. +# +# However, this examples goes beyond doing optical flow on the whole +# image at once. Instead it breaks up the process by working on groups +# of pixels in the image. This gives you a "new" image of results. +# +# NOTE that surfaces need to have some type of "edge" on them for the +# algorithm to work. A featureless surface produces crazy results. + +# NOTE: Unless you have a very nice test rig this example is hard to see usefulness of... + +BLOCK_W = 16 # pow2 +BLOCK_H = 16 # pow2 + +# To run this demo effectively please mount your OpenMV Cam on a steady +# base and SLOWLY rotate the camera around the lens and move the camera +# forward/backwards to see the numbers change. +# I.e. Z direction changes only. + +import sensor, image, time, math + +# NOTE!!! You have to use a small power of 2 resolution when using +# find_displacement(). This is because the algorithm is powered by +# something called phase correlation which does the image comparison +# using FFTs. A non-power of 2 resolution requires padding to a power +# of 2 which reduces the usefulness of the algorithm results. Please +# use a resolution like B128X128 or B128X64 (2x faster). + +# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, +# 128x64, and 128x128. If you want a resolution of 32x32 you can create +# it by doing "img.pool(2, 2)" on a 64x64 image. + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) +sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)... +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +# Take from the main frame buffer's RAM to allocate a second frame buffer. +# There's a lot more RAM in the frame buffer than in the MicroPython heap. +# However, after doing this you have a lot less RAM for some algorithms... +# So, be aware that it's a lot easier to get out of RAM issues now. +extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) +extra_fb.replace(sensor.snapshot()) + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + for y in range(0, sensor.height(), BLOCK_H): + for x in range(0, sensor.width(), BLOCK_W): + displacement = extra_fb.find_displacement(img, logpolar=True, \ + roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) + + # Below 0.1 or so (YMMV) and the results are just noise. + if(displacement.response() > 0.1): + rotation_change = displacement.rotation() + zoom_amount = 1.0 + displacement.scale() + pixel_x = x + (BLOCK_W//2) + int(math.sin(rotation_change) * zoom_amount * (BLOCK_W//4)) + pixel_y = y + (BLOCK_H//2) + int(math.cos(rotation_change) * zoom_amount * (BLOCK_H//4)) + img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ + color = 255) + else: + img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \ + color = 0) + extra_fb.replace(img) + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-differential-translation.py b/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-differential-translation.py new file mode 100644 index 000000000..0a87c42f3 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-differential-translation.py @@ -0,0 +1,69 @@ +# Image Patches Differential Optical Flow Translation +# +# This example shows off using your OpenMV Cam to measure translation +# in the X and Y direction by comparing the current and the previous +# image against each other. Note that only X and Y translation is +# handled - not rotation/scale in this mode. +# +# However, this examples goes beyond doing optical flow on the whole +# image at once. Instead it breaks up the process by working on groups +# of pixels in the image. This gives you a "new" image of results. +# +# NOTE that surfaces need to have some type of "edge" on them for the +# algorithm to work. A featureless surface produces crazy results. + +BLOCK_W = 16 # pow2 +BLOCK_H = 16 # pow2 + +# To run this demo effectively please mount your OpenMV Cam on a steady +# base and SLOWLY translate it to the left, right, up, and down and +# watch the numbers change. Note that you can see displacement numbers +# up +- half of the hoizontal and vertical resolution. + +import sensor, image, time + +# NOTE!!! You have to use a small power of 2 resolution when using +# find_displacement(). This is because the algorithm is powered by +# something called phase correlation which does the image comparison +# using FFTs. A non-power of 2 resolution requires padding to a power +# of 2 which reduces the usefulness of the algorithm results. Please +# use a resolution like B128X128 or B128X64 (2x faster). + +# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, +# 128x64, and 128x128. If you want a resolution of 32x32 you can create +# it by doing "img.pool(2, 2)" on a 64x64 image. + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) +sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)... +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +# Take from the main frame buffer's RAM to allocate a second frame buffer. +# There's a lot more RAM in the frame buffer than in the MicroPython heap. +# However, after doing this you have a lot less RAM for some algorithms... +# So, be aware that it's a lot easier to get out of RAM issues now. +extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) +extra_fb.replace(sensor.snapshot()) + +while(True): + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. + + for y in range(0, sensor.height(), BLOCK_H): + for x in range(0, sensor.width(), BLOCK_W): + displacement = extra_fb.find_displacement(img, \ + roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) + + # Below 0.1 or so (YMMV) and the results are just noise. + if(displacement.response() > 0.1): + pixel_x = x + (BLOCK_W//2) + int(displacement.x_translation()) + pixel_y = y + (BLOCK_H//2) + int(displacement.y_translation()) + img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ + color = 255) + else: + img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \ + color = 0) + extra_fb.replace(img) + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/nn_stm32cubeai.py b/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/nn_stm32cubeai.py new file mode 100644 index 000000000..a1e260969 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/nn_stm32cubeai.py @@ -0,0 +1,34 @@ +# STM32 CUBE.AI on OpenMV MNIST Example +# See https://github.com/openmv/openmv/blob/master/src/stm32cubeai/README.MD + +import sensor, image, time, nn_st + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to Grayscale +sensor.set_framesize(sensor.QQQVGA) # Set frame size to 80x60 +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +# [CUBE.AI] Initialize the network +net = nn_st.loadnnst('network') + +nn_input_sz = 28 # The NN input is 28x28 + +while(True): + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + + # Crop in the middle (avoids vignetting) + img.crop((img.width()//2-nn_input_sz//2, + img.height()//2-nn_input_sz//2, + nn_input_sz, + nn_input_sz)) + + # Binarize the image + img.midpoint(2, bias=0.5, threshold=True, offset=5, invert=True) + + # [CUBE.AI] Run the inference + out = net.predict(img) + print('Network argmax output: {}'.format( out.index(max(out)) )) + img.draw_string(0, 0, str(out.index(max(out)))) + print('FPS {}'.format(clock.fps())) # Note: OpenMV Cam runs about half as fast when connected diff --git a/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_face_collection.py b/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_face_collection.py new file mode 100644 index 000000000..7ae684ded --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_face_collection.py @@ -0,0 +1,31 @@ +# Face Collection +# +# Use this script to gather face images for building a TensorFlow dataset. This script automatically +# zooms in the largest face in the field of view which you can then save using the data set editor. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) + +clock = time.clock() + +largest_face = None +largest_face_timeout = 0 + +while(True): + clock.tick() + + faces = sensor.snapshot().gamma_corr(contrast=1.5).find_features(image.HaarCascade("frontalface")) + + if faces: + largest_face = max(faces, key = lambda f: f[2] * f[3]) + largest_face_timeout = 20 + + if largest_face_timeout > 0: + sensor.get_fb().crop(roi=largest_face) + largest_face_timeout -= 1 + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_face_recognition.py b/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_face_recognition.py new file mode 100644 index 000000000..b31bc16b9 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_face_recognition.py @@ -0,0 +1,41 @@ +# Face Recognition +# +# Use this script to run a TensorFlow lite image classifier on faces detected within an image. +# The classifier is free to do facial recognition, expression detection, or whatever. + +import sensor, image, time, tf + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) + +clock = time.clock() + +net = tf.load("trained.tflite", load_to_fb=True) +labels = [l.rstrip('\n') for l in open("labels.txt")] + +while(True): + clock.tick() + + # Take a picture and brighten things up for the frontal face detector. + img = sensor.snapshot().gamma_corr(contrast=1.5) + + # Returns a list of rects (x, y, w, h) where faces are. + faces = img.find_features(image.HaarCascade("frontalface")) + + for f in faces: + + # Classify a face and get the class scores list + scores = net.classify(img, roi=f)[0].output() + + # Find the highest class score and lookup the label for that + label = labels[scores.index(max(scores))] + + # Draw a box around the face + img.draw_rectangle(f) + + # Draw the label above the face + img.draw_string(f[0]+3, f[1]-1, label, mono_space=False) + + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_mobilenet_search_just_center.py b/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_mobilenet_search_just_center.py new file mode 100644 index 000000000..e8f12f924 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_mobilenet_search_just_center.py @@ -0,0 +1,66 @@ +# TensorFlow Lite Mobilenet V1 Example +# +# Google's Mobilenet V1 detects 1000 classes of objects +# +# WARNING: Mobilenet is trained on ImageNet and isn't meant to classify anything +# in the real world. It's just designed to score well on the ImageNet dataset. +# This example just shows off running mobilenet on the OpenMV Cam. However, the +# default model is not really usable for anything. You have to use transfer +# learning to apply the model to a target problem by re-training the model. +# +# NOTE: This example only works on the OpenMV Cam H7 Pro (that has SDRAM) and better! +# To get the models please see the CNN Network library in OpenMV IDE under +# Tools -> Machine Vision. The labels are there too. +# You should insert a microSD card into your camera and copy-paste the mobilenet_labels.txt +# file and your chosen model into the root folder for ths script to work. +# +# In this example we slide the detector window over the image and get a list +# of activations. Note that use a CNN with a sliding window is extremely compute +# expensive so for an exhaustive search do not expect the CNN to be real-time. + +import sensor, image, time, os, tf + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.set_windowing((240, 240)) # Set 240x240 window. +sensor.skip_frames(time=2000) # Let the camera adjust. + +mobilenet_version = "1" # 1 +mobilenet_width = "0.5" # 1.0, 0.75, 0.50, 0.25 +mobilenet_resolution = "128" # 224, 192, 160, 128 + +mobilenet = "mobilenet_v%s_%s_%s_quant.tflite" % (mobilenet_version, mobilenet_width, mobilenet_resolution) +labels = [line.rstrip('\n') for line in open("mobilenet_labels.txt")] + +clock = time.clock() +while(True): + clock.tick() + + img = sensor.snapshot() + + # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not + # specified). A classification score output vector will be generated for each location. At each scale the + # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. + # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note + # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after + # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) + # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. + # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... + + # Setting x_overlap=-1 forces the window to stay centered in the ROI in the x direction always. If + # y_overlap is not -1 the method will search in all vertical positions. + + # Setting y_overlap=-1 forces the window to stay centered in the ROI in the y direction always. If + # x_overlap is not -1 the method will serach in all horizontal positions. + + # default settings just do one detection... change them to search the image... + for obj in tf.classify(mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=-1, y_overlap=-1): + print("**********\nTop 5 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) + img.draw_rectangle(obj.rect()) + # This combines the labels and confidence values into a list of tuples + # and then sorts that list by the confidence values. + sorted_list = sorted(zip(labels, obj.output()), key = lambda x: x[1], reverse = True) + for i in range(5): + print("%s = %f" % (sorted_list[i][0], sorted_list[i][1])) + print(clock.fps(), "fps") diff --git a/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_mobilenet_search_whole_window.py b/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_mobilenet_search_whole_window.py new file mode 100644 index 000000000..da7869c31 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_mobilenet_search_whole_window.py @@ -0,0 +1,60 @@ +# TensorFlow Lite Mobilenet V1 Example +# +# Google's Mobilenet V1 detects 1000 classes of objects +# +# WARNING: Mobilenet is trained on ImageNet and isn't meant to classify anything +# in the real world. It's just designed to score well on the ImageNet dataset. +# This example just shows off running mobilenet on the OpenMV Cam. However, the +# default model is not really usable for anything. You have to use transfer +# learning to apply the model to a target problem by re-training the model. +# +# NOTE: This example only works on the OpenMV Cam H7 Pro (that has SDRAM) and better! +# To get the models please see the CNN Network library in OpenMV IDE under +# Tools -> Machine Vision. The labels are there too. +# You should insert a microSD card into your camera and copy-paste the mobilenet_labels.txt +# file and your chosen model into the root folder for ths script to work. +# +# In this example we slide the detector window over the image and get a list +# of activations. Note that use a CNN with a sliding window is extremely compute +# expensive so for an exhaustive search do not expect the CNN to be real-time. + +import sensor, image, time, os, tf + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.set_windowing((240, 240)) # Set 240x240 window. +sensor.skip_frames(time=2000) # Let the camera adjust. + +mobilenet_version = "1" # 1 +mobilenet_width = "0.5" # 1.0, 0.75, 0.50, 0.25 +mobilenet_resolution = "128" # 224, 192, 160, 128 + +mobilenet = "mobilenet_v%s_%s_%s_quant.tflite" % (mobilenet_version, mobilenet_width, mobilenet_resolution) +labels = [line.rstrip('\n') for line in open("mobilenet_labels.txt")] + +clock = time.clock() +while(True): + clock.tick() + + img = sensor.snapshot() + + # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not + # specified). A classification score output vector will be generated for each location. At each scale the + # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. + # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note + # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after + # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) + # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. + # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... + + # default settings just do one detection... change them to search the image... + for obj in tf.classify(mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0): + print("**********\nTop 5 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) + img.draw_rectangle(obj.rect()) + # This combines the labels and confidence values into a list of tuples + # and then sorts that list by the confidence values. + sorted_list = sorted(zip(labels, obj.output()), key = lambda x: x[1], reverse = True) + for i in range(5): + print("%s = %f" % (sorted_list[i][0], sorted_list[i][1])) + print(clock.fps(), "fps") diff --git a/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_object_detection.py b/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_object_detection.py new file mode 100644 index 000000000..48a2254ba --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_object_detection.py @@ -0,0 +1,51 @@ +# TensorFlow Lite Object Detection Example +# +# This example shows off object detection. Object detect is much more powerful than +# object classification. It can locate multiple objects in the image. + +import sensor, image, time, os, tf + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.set_windowing((240, 240)) # Set 240x240 window. +sensor.skip_frames(time=2000) # Let the camera adjust. + +net = tf.load('', load_to_fb=True) +labels = [] + +try: # Load labels if they exist + labels = [line.rstrip('\n') for line in open("labels.txt")] +except: + pass + +colors = [ # Add more colors if you are detecting more than 7 types of classes at once. + (255, 0, 0), + ( 0, 255, 0), + (255, 255, 0), + ( 0, 0, 255), + (255, 0, 255), + ( 0, 255, 255), + (255, 255, 255), +] + +clock = time.clock() +while(True): + clock.tick() + + img = sensor.snapshot() + + # detect() segments an object using the provided segmentation model. This produces mutliple + # grayscale images per object class that we are trying to detect. detect() then runs + # find_blobs() internally on the segmented images to find all blob locations and then returns + # the bound boxes of all blobs found per object class. So, detect() returns a list of lists of + # classification objects and the respective confidence level. + + for i, detection_list in enumerate(net.detect(img, thresholds=[(128, 255)])): + if (i < len(labels)): + print("********** %s **********" % labels[i]) + for d in detection_list: + print(d) + img.draw_rectangle(d.rect(), color=colors[i]) + + print(clock.fps(), "fps", end="\n\n") diff --git a/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_person_detection_search_just_center.py b/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_person_detection_search_just_center.py new file mode 100644 index 000000000..b1531f58b --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_person_detection_search_just_center.py @@ -0,0 +1,48 @@ +# TensorFlow Lite Person Dection Example +# +# Google's Person Detection Model detects if a person is in view. +# +# In this example we slide the detector window over the image and get a list +# of activations. Note that use a CNN with a sliding window is extremely compute +# expensive so for an exhaustive search do not expect the CNN to be real-time. + +import sensor, image, time, os, tf + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.set_windowing((240, 240)) # Set 240x240 window. +sensor.skip_frames(time=2000) # Let the camera adjust. + +# Load the built-in person detection network (the network is in your OpenMV Cam's firmware). +labels, net = tf.load_builtin_model('person_detection') + +clock = time.clock() +while(True): + clock.tick() + + img = sensor.snapshot() + + # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not + # specified). A classification score output vector will be generated for each location. At each scale the + # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. + # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note + # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after + # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) + # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. + # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... + + # Setting x_overlap=-1 forces the window to stay centered in the ROI in the x direction always. If + # y_overlap is not -1 the method will search in all vertical positions. + + # Setting y_overlap=-1 forces the window to stay centered in the ROI in the y direction always. If + # x_overlap is not -1 the method will serach in all horizontal positions. + + # default settings just do one detection... change them to search the image... + for obj in net.classify(img, min_scale=0.5, scale_mul=0.5, x_overlap=-1, y_overlap=-1): + print("**********\nDetections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) + for i in range(len(obj.output())): + print("%s = %f" % (labels[i], obj.output()[i])) + img.draw_rectangle(obj.rect()) + img.draw_string(obj.x()+3, obj.y()-1, labels[obj.output().index(max(obj.output()))], mono_space = False) + print(clock.fps(), "fps") diff --git a/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_person_detection_search_whole_window.py b/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_person_detection_search_whole_window.py new file mode 100644 index 000000000..ff05e03d6 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_person_detection_search_whole_window.py @@ -0,0 +1,42 @@ +# TensorFlow Lite Person Dection Example +# +# Google's Person Detection Model detects if a person is in view. +# +# In this example we slide the detector window over the image and get a list +# of activations. Note that use a CNN with a sliding window is extremely compute +# expensive so for an exhaustive search do not expect the CNN to be real-time. + +import sensor, image, time, os, tf + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.set_windowing((240, 240)) # Set 240x240 window. +sensor.skip_frames(time=2000) # Let the camera adjust. + +# Load the built-in person detection network (the network is in your OpenMV Cam's firmware). +labels, net = tf.load_builtin_model('person_detection') + +clock = time.clock() +while(True): + clock.tick() + + img = sensor.snapshot() + + # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not + # specified). A classification score output vector will be generated for each location. At each scale the + # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. + # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note + # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after + # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) + # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. + # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... + + # default settings just do one detection... change them to search the image... + for obj in net.classify(img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0): + print("**********\nDetections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) + for i in range(len(obj.output())): + print("%s = %f" % (labels[i], obj.output()[i])) + img.draw_rectangle(obj.rect()) + img.draw_string(obj.x()+3, obj.y()-1, labels[obj.output().index(max(obj.output()))], mono_space = False) + print(clock.fps(), "fps") diff --git a/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags.py b/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags.py new file mode 100644 index 000000000..bbb274f81 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags.py @@ -0,0 +1,55 @@ +# AprilTags Example +# +# This example shows the power of the OpenMV Cam to detect April Tags +# on the OpenMV Cam M7. The M4 versions cannot detect April Tags. + +import sensor, image, time, math + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QQVGA) # we run out of memory if the resolution is much bigger... +sensor.skip_frames(time = 2000) +clock = time.clock() + +# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work. + +# The apriltag code supports up to 6 tag families which can be processed at the same time. +# Returned tag objects will have their tag family and id within the tag family. + +tag_families = 0 +tag_families |= image.TAG16H5 # comment out to disable this family +tag_families |= image.TAG25H7 # comment out to disable this family +tag_families |= image.TAG25H9 # comment out to disable this family +tag_families |= image.TAG36H10 # comment out to disable this family +tag_families |= image.TAG36H11 # comment out to disable this family (default family) +tag_families |= image.ARTOOLKIT # comment out to disable this family + +# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively +# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which +# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve +# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a +# reason to use the other tags families just use TAG36H11 which is the default family. + +def family_name(tag): + if(tag.family() == image.TAG16H5): + return "TAG16H5" + if(tag.family() == image.TAG25H7): + return "TAG25H7" + if(tag.family() == image.TAG25H9): + return "TAG25H9" + if(tag.family() == image.TAG36H10): + return "TAG36H10" + if(tag.family() == image.TAG36H11): + return "TAG36H11" + if(tag.family() == image.ARTOOLKIT): + return "ARTOOLKIT" + +while(True): + clock.tick() + img = sensor.snapshot() + for tag in img.find_apriltags(families=tag_families): # defaults to TAG36H11 without "families". + img.draw_rectangle(tag.rect(), color = (255, 0, 0)) + img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0)) + print_args = (family_name(tag), tag.id(), (180 * tag.rotation()) / math.pi) + print("Tag Family %s, Tag ID %d, rotation %f (degrees)" % print_args) + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_3d_pose.py b/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_3d_pose.py new file mode 100644 index 000000000..c4cb289d6 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_3d_pose.py @@ -0,0 +1,55 @@ +# AprilTags Example +# +# This example shows the power of the OpenMV Cam to detect April Tags +# on the OpenMV Cam M7. The M4 versions cannot detect April Tags. + +import sensor, image, time, math + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QQVGA) # we run out of memory if the resolution is much bigger... +sensor.skip_frames(time = 2000) +clock = time.clock() + +# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work. + +# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively +# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which +# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve +# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a +# reason to use the other tags families just use TAG36H11 which is the default family. + +# The AprilTags library outputs the pose information for tags. This is the x/y/z translation and +# x/y/z rotation. The x/y/z rotation is in radians and can be converted to degrees. As for +# translation the units are dimensionless and you must apply a conversion function. + +# f_x is the x focal length of the camera. It should be equal to the lens focal length in mm +# divided by the x sensor size in mm times the number of pixels in the image. +# The below values are for the OV7725 camera with a 2.8 mm lens. + +# f_y is the y focal length of the camera. It should be equal to the lens focal length in mm +# divided by the y sensor size in mm times the number of pixels in the image. +# The below values are for the OV7725 camera with a 2.8 mm lens. + +# c_x is the image x center position in pixels. +# c_y is the image y center position in pixels. + +f_x = (2.8 / 3.984) * 160 # find_apriltags defaults to this if not set +f_y = (2.8 / 2.952) * 120 # find_apriltags defaults to this if not set +c_x = 160 * 0.5 # find_apriltags defaults to this if not set (the image.w * 0.5) +c_y = 120 * 0.5 # find_apriltags defaults to this if not set (the image.h * 0.5) + +def degrees(radians): + return (180 * radians) / math.pi + +while(True): + clock.tick() + img = sensor.snapshot() + for tag in img.find_apriltags(fx=f_x, fy=f_y, cx=c_x, cy=c_y): # defaults to TAG36H11 + img.draw_rectangle(tag.rect(), color = (255, 0, 0)) + img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0)) + print_args = (tag.x_translation(), tag.y_translation(), tag.z_translation(), \ + degrees(tag.x_rotation()), degrees(tag.y_rotation()), degrees(tag.z_rotation())) + # Translation units are unknown. Rotation units are in degrees. + print("Tx: %f, Ty %f, Tz %f, Rx %f, Ry %f, Rz %f" % print_args) + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_max_res.py b/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_max_res.py new file mode 100644 index 000000000..106650770 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_max_res.py @@ -0,0 +1,56 @@ +# AprilTags Max Res Example +# +# This example shows the power of the OpenMV Cam to detect April Tags +# on the OpenMV Cam M7. The M4 versions cannot detect April Tags. + +import sensor, image, time, math, omv + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.VGA) +sensor.set_windowing((240, 240)) +sensor.skip_frames(time = 2000) +clock = time.clock() + +# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work. + +# The apriltag code supports up to 6 tag families which can be processed at the same time. +# Returned tag objects will have their tag family and id within the tag family. + +tag_families = 0 +tag_families |= image.TAG16H5 # comment out to disable this family +tag_families |= image.TAG25H7 # comment out to disable this family +tag_families |= image.TAG25H9 # comment out to disable this family +tag_families |= image.TAG36H10 # comment out to disable this family +tag_families |= image.TAG36H11 # comment out to disable this family (default family) +tag_families |= image.ARTOOLKIT # comment out to disable this family + +# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively +# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which +# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve +# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a +# reason to use the other tags families just use TAG36H11 which is the default family. + +def family_name(tag): + if(tag.family() == image.TAG16H5): + return "TAG16H5" + if(tag.family() == image.TAG25H7): + return "TAG25H7" + if(tag.family() == image.TAG25H9): + return "TAG25H9" + if(tag.family() == image.TAG36H10): + return "TAG36H10" + if(tag.family() == image.TAG36H11): + return "TAG36H11" + if(tag.family() == image.ARTOOLKIT): + return "ARTOOLKIT" + +while(True): + clock.tick() + img = sensor.snapshot() + for tag in img.find_apriltags(families=tag_families): # defaults to TAG36H11 without "families". + img.draw_rectangle(tag.rect(), color = 127) + img.draw_cross(tag.cx(), tag.cy(), color = 127) + print_args = (family_name(tag), tag.id(), (180 * tag.rotation()) / math.pi) + print("Tag Family %s, Tag ID %d, rotation %f (degrees)" % print_args) + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_w_lens_zoom.py b/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_w_lens_zoom.py new file mode 100644 index 000000000..b15b05f4c --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_w_lens_zoom.py @@ -0,0 +1,31 @@ +# AprilTags Example +# +# This example shows the power of the OpenMV Cam to detect April Tags +# on the OpenMV Cam M7. The M4 versions cannot detect April Tags. + +import sensor, image, time, math + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.VGA) # we run out of memory if the resolution is much bigger... +sensor.set_windowing((160, 120)) # Look at center 160x120 pixels of the VGA resolution. +sensor.skip_frames(time = 2000) +clock = time.clock() + +# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work. + +# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively +# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which +# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve +# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a +# reason to use the other tags families just use TAG36H11 which is the default family. + +while(True): + clock.tick() + img = sensor.snapshot() + for tag in img.find_apriltags(): # defaults to TAG36H11 + img.draw_rectangle(tag.rect(), color = (255, 0, 0)) + img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0)) + print_args = (tag.id(), (180 * tag.rotation()) / math.pi) + print("Tag Family TAG36H11, Tag ID %d, rotation %f (degrees)" % print_args) + print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_small_apriltags.py b/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_small_apriltags.py new file mode 100644 index 000000000..d89db5046 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_small_apriltags.py @@ -0,0 +1,67 @@ +# Find Small Apriltags +# +# This script shows off how to use blob tracking as a pre-filter to +# finding Apriltags in the image using blob tracking to find the +# area of where the tag is first and then calling find_apriltags +# on that blob. + +# Note, this script works well assuming most parts of the image do not +# pass the thresholding test... otherwise, you don't get a distance +# benefit. + +import sensor, image, time, math, omv + +# Set the thresholds to find a white object (i.e. tag border) +thresholds = (150, 255) + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 200) # increase this to let the auto methods run for longer +clock = time.clock() + +# The apriltag code supports up to 6 tag families which can be processed at the same time. +# Returned tag objects will have their tag family and id within the tag family. +tag_families = 0 +tag_families |= image.TAG16H5 # comment out to disable this family +tag_families |= image.TAG25H7 # comment out to disable this family +tag_families |= image.TAG25H9 # comment out to disable this family +tag_families |= image.TAG36H10 # comment out to disable this family +tag_families |= image.TAG36H11 # comment out to disable this family (default family) +tag_families |= image.ARTOOLKIT # comment out to disable this family + +while(True): + clock.tick() + img = sensor.snapshot() + + # First, we find blobs that may be candidates for tags. + box_list = [] + + # AprilTags may fail due to not having enough ram given the image sie being passed. + tag_list = [] + + for blob in img.find_blobs([thresholds], pixels_threshold=100, area_threshold=100, merge=True): + # Next we look for a tag in an ROI that's bigger than the blob. + w = min(max(int(blob.w() * 1.2), 10), 160) # Not too small, not too big. + h = min(max(int(blob.h() * 1.2), 10), 160) # Not too small, not too big. + x = min(max(int(blob.x() + (blob.w()/4) - (w * 0.1)), 0), img.width()-1) + y = min(max(int(blob.y() + (blob.h()/4) - (h * 0.1)), 0), img.height()-1) + + box_list.append((x, y, w, h)) # We'll draw these later. + + # Since we constrict the roi size apriltags shouldn't run out of ram. + # But, if it does we handle it... + try: + tag_list.extend(img.find_apriltags(roi=(x,y,w,h), families=tag_families)) + except (MemoryError): # Don't catch all exceptions otherwise you can't stop the script. + pass + + for b in box_list: + img.draw_rectangle(b) + # Now print out the found tags + for tag in tag_list: + img.draw_rectangle(tag.rect()) + img.draw_cross(tag.cx(), tag.cy()) + for c in tag.corners(): + img.draw_circle(c[0], c[1], 5) + print("Tag:", tag.cx(), tag.cy(), tag.rotation(), tag.id()) diff --git a/scripts/examples/Arduino/Nicla-Vision/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py b/scripts/examples/Arduino/Nicla-Vision/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py new file mode 100644 index 000000000..6dd962dd7 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py @@ -0,0 +1,87 @@ +# Image Transfer - As The Remote Device +# +# This script is meant to talk to the "image_transfer_jpg_as_the_controller_device.py" on your computer. +# +# This script shows off how to transfer the frame buffer to your computer as a jpeg image. + +import image, network, omv, rpc, sensor, struct + +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time = 2000) + +# Turn off the frame buffer connection to the IDE from the OpenMV Cam side. +# +# This needs to be done when manually compressing jpeg images at higher quality +# so that the OpenMV Cam does not try to stream them to the IDE using a fall back +# mechanism if the JPEG image is too large to fit in the IDE JPEG frame buffer on the OpenMV Cam. + +omv.disable_fb(True) + +# The RPC library above is installed on your OpenMV Cam and provides mutliple classes for +# allowing your OpenMV Cam to be controlled over USB or LAN/WLAN. + +################################################################ +# Choose the interface you wish to control your OpenMV Cam over. +################################################################ + +# Uncomment the below line to setup your OpenMV Cam for control over a USB VCP. +# +interface = rpc.rpc_usb_vcp_slave() + +# Uncomment the below line to setup your OpenMV Cam for control over the lan. +# +# network_if = network.LAN() +# network_if.active(True) +# network_if.ifconfig('dhcp') +# +# interface = rpc.rpc_network_slave(network_if) + +# Uncomment the below line to setup your OpenMV Cam for control over the wlan. +# +# network_if = network.WLAN(network.STA_IF) +# network_if.active(True) +# network_if.connect('your-ssid', 'your-password') +# +# interface = rpc.rpc_network_slave(network_if) + +################################################################ +# Call Backs +################################################################ + +# When called sets the pixformat and framesize, takes a snapshot +# and then returns the frame buffer jpg size to store the image in. +# +# data is a pixformat string and framesize string. +def jpeg_image_snapshot(data): + pixformat, framesize = bytes(data).decode().split(",") + sensor.set_pixformat(eval(pixformat)) + sensor.set_framesize(eval(framesize)) + img = sensor.snapshot().compress(quality=90) + return struct.pack("IIIIIIIIIIII", data)[10] - TIMESTAMP +print ("Year:%d Month:%d Day:%d Time: %d:%d:%d" % (utime.localtime(t)[0:6])) diff --git a/scripts/examples/Arduino/Nicla-Vision/40-WiFi/scan.py b/scripts/examples/Arduino/Nicla-Vision/40-WiFi/scan.py new file mode 100644 index 000000000..8c90522c6 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/40-WiFi/scan.py @@ -0,0 +1,18 @@ +# Scan Example +# +# This example shows how to scan for networks with the WiFi shield. + +import time, network + +wlan = network.WLAN(network.STA_IF) +wlan.deinit() +wlan.active(True) + +print("Scanning...") +while (True): + scan_result = wlan.scan() + for ap in scan_result: + print("SSID: %s BSSID: %s Channel: %d RSSI: %d Auth: %d" + %(ap[0], ":".join(["%X"%i for i in ap[1]]), ap[2], ap[3], ap[4])) + print() + time.sleep_ms(1000) diff --git a/scripts/examples/Arduino/Nicla-Vision/40-WiFi/static_ip.py b/scripts/examples/Arduino/Nicla-Vision/40-WiFi/static_ip.py new file mode 100644 index 000000000..cc040b297 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/40-WiFi/static_ip.py @@ -0,0 +1,33 @@ +# NTP Example using static IP. +# +# This example shows how to get the current time using NTP with the WiFi shield. + +import network, socket, ustruct, utime + +SSID='' # Network SSID +KEY='' # Network key + +TIMESTAMP = 2208988800+946684800 + +# Init wlan module and connect to network +print("Trying to connect... (This may take a while)...") +wlan = network.WLAN(network.STA_IF) +wlan.deinit() +wlan.active(True) +# ifconfig must be called before connect() +wlan.ifconfig(('192.168.1.200', '255.255.255.0', '192.168.1.1', '192.168.1.1')) +wlan.connect(SSID, KEY, timeout=30000) + +# Create new socket +client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + +# Get addr info via DNS +addr = socket.getaddrinfo("pool.ntp.org", 123)[0][4] + +# Send query +client.sendto('\x1b' + 47 * '\0', addr) +data, address = client.recvfrom(1024) + +# Print time +t = ustruct.unpack(">IIIIIIIIIIII", data)[10] - TIMESTAMP +print ("Year:%d Month:%d Day:%d Time: %d:%d:%d" % (utime.localtime(t)[0:6])) diff --git a/scripts/examples/Arduino/Nicla-Vision/41-Bluetooth/ble_temperature.py b/scripts/examples/Arduino/Nicla-Vision/41-Bluetooth/ble_temperature.py new file mode 100644 index 000000000..7a48d0c91 --- /dev/null +++ b/scripts/examples/Arduino/Nicla-Vision/41-Bluetooth/ble_temperature.py @@ -0,0 +1,97 @@ +# This example demonstrates a simple temperature sensor peripheral. +# +# The sensor's local value updates every second, and it will notify +# any connected central every 10 seconds. + +import bluetooth +import random +import struct +import time +from ble_advertising import advertising_payload + +from micropython import const + +_IRQ_CENTRAL_CONNECT = const(1) +_IRQ_CENTRAL_DISCONNECT = const(2) +_IRQ_GATTS_INDICATE_DONE = const(20) + +_FLAG_READ = const(0x0002) +_FLAG_NOTIFY = const(0x0010) +_FLAG_INDICATE = const(0x0020) + +# org.bluetooth.service.environmental_sensing +_ENV_SENSE_UUID = bluetooth.UUID(0x181A) +# org.bluetooth.characteristic.temperature +_TEMP_CHAR = ( + bluetooth.UUID(0x2A6E), + _FLAG_READ | _FLAG_NOTIFY | _FLAG_INDICATE, +) +_ENV_SENSE_SERVICE = ( + _ENV_SENSE_UUID, + (_TEMP_CHAR,), +) + +# org.bluetooth.characteristic.gap.appearance.xml +_ADV_APPEARANCE_GENERIC_THERMOMETER = const(768) + + +class BLETemperature: + def __init__(self, ble, name="PORTENTA_BLE"): + self._ble = ble + self._ble.active(True) + self._ble.irq(self._irq) + ((self._handle,),) = self._ble.gatts_register_services((_ENV_SENSE_SERVICE,)) + self._connections = set() + self._payload = advertising_payload( + name=name, services=[_ENV_SENSE_UUID], appearance=_ADV_APPEARANCE_GENERIC_THERMOMETER + ) + self._advertise() + + def _irq(self, event, data): + # Track connections so we can send notifications. + if event == _IRQ_CENTRAL_CONNECT: + conn_handle, _, _ = data + self._connections.add(conn_handle) + elif event == _IRQ_CENTRAL_DISCONNECT: + conn_handle, _, _ = data + self._connections.remove(conn_handle) + # Start advertising again to allow a new connection. + self._advertise() + elif event == _IRQ_GATTS_INDICATE_DONE: + conn_handle, value_handle, status = data + + def set_temperature(self, temp_deg_c, notify=False, indicate=False): + # Data is sint16 in degrees Celsius with a resolution of 0.01 degrees Celsius. + # Write the local value, ready for a central to read. + self._ble.gatts_write(self._handle, struct.pack("