From f985ef0ef76a70edb14a1d0fe8171c680adda50b Mon Sep 17 00:00:00 2001 From: iabdalkader Date: Tue, 28 Jun 2022 22:45:23 +0200 Subject: [PATCH] scripts/examples: Refactor examples. --- .../00-Board-Control/adc_read_ext_channel.py} | 0 .../00-Board-Control}/adc_read_int_channel.py | 0 .../00-Board-Control/blinky.py | 14 + .../00-Board-Control}/can.py | 0 .../00-Board-Control}/cpufreq_scaling.py | 0 .../00-Board-Control}/dac_write.py | 0 .../00-Board-Control}/dac_write_timed.py | 0 .../00-Board-Control}/i2c_control.py | 0 .../00-Board-Control}/led_control.py | 0 .../00-Board-Control}/pin_control.py | 0 .../00-Board-Control}/pwm_control.py | 0 .../00-Board-Control}/rtc.py | 0 .../00-Board-Control}/servo_control.py | 0 .../00-Board-Control}/spi_control.py | 0 .../00-Board-Control}/timer_control.py | 0 .../00-Board-Control}/timer_tests.py | 0 .../00-Board-Control}/uart_control.py | 0 .../00-Board-Control}/usb_hid.py | 0 .../00-Board-Control}/usb_vcp.py | 0 .../00-Board-Control}/vsync_gpio_output.py | 0 .../01-WiFi-Shield}/connect.py | 0 .../01-WiFi-Shield}/dns.py | 0 .../01-WiFi-Shield}/fw_update.py | 0 .../01-WiFi-Shield}/http_client.py | 0 .../01-WiFi-Shield}/http_client_ssl.py | 0 .../01-WiFi-Shield}/http_post.py | 0 .../01-WiFi-Shield}/mjpeg_streamer.py | 0 .../01-WiFi-Shield}/mjpeg_streamer_ap.py | 0 .../01-WiFi-Shield}/mjpeg_streamer_fir.py | 0 .../01-WiFi-Shield}/mqtt_pub.py | 0 .../01-WiFi-Shield}/mqtt_sub.py | 0 .../01-WiFi-Shield}/ntp.py | 0 .../01-WiFi-Shield}/scan.py | 0 .../01-WiFi-Shield}/static_ip.py | 0 .../02-LCD-Shield}/lcd.py | 0 .../03-Servo-Shield}/main.py | 0 .../03-Servo-Shield}/pca9685.py | 0 .../03-Servo-Shield}/servo.py | 0 .../04-Thermopile-Shield}/thermal_camera.py | 0 .../04-Thermopile-Shield}/thermal_overlay.py | 0 .../thermal_overlay_lcd.py | 0 .../05-BLE-Shield}/ble.py | 0 .../motor-shield-power-driver.py | 0 .../06-Motor-Shield}/motor-shield-pwm.py | 0 .../06-Motor-Shield}/motor.py | 0 .../06-Motor-Shield}/stepper.py | 0 .../07-IMU-Shield}/imu_read.py | 0 .../08-Distance-Shield}/distance_read.py | 0 .../09-TV-Shield}/tv.py | 0 .../10-Light-Shield}/light.py | 0 .../11-Low-Power}/deep_sleep.py | 0 .../11-Low-Power}/extint_wakeup.py | 0 .../11-Low-Power}/sensor_sleep.py | 0 .../11-Low-Power}/stop_mode.py | 0 .../99-Tests/colorbar.py | 0 .../99-Tests/fps.py | 0 .../99-Tests/selftest.py | 0 .../99-Tests/unittests.py | 0 .../01-Basics => 00-OpenMV-Boards}/main.py | 0 .../00-Board-Control}/blinky.py | 0 .../00-Board-Control}/i2c_scanner.py | 0 .../01-Sensors/apds9960/ambient.py | 0 .../01-Sensors/apds9960/gesture.py | 0 .../01-Sensors/apds9960/proximity.py | 0 .../Nano-33-BLE-Sense/01-Sensors/hts221.py | 0 .../Nano-33-BLE-Sense/01-Sensors/lps22.py | 0 .../Nano-33-BLE-Sense/01-Sensors/lsm9ds1.py | 0 .../Nano-33-BLE-Sense/02-Audio}/audio_fft.py | 0 .../03-Bluetooth}/ble_blinky.py | 0 .../03-Bluetooth}/ble_scan.py | 0 .../03-Bluetooth}/ble_temperature.py | 0 .../04-Thermal/thermal_camera.py | 0 .../Nano-RP2040/00-Board-Control}/blinky.py | 0 .../00-Board-Control}/i2c_scanner.py | 0 .../Nano-RP2040/01-Sensors/lsm6dsox_basic.py | 0 .../Nano-RP2040/01-Sensors/lsm6dsox_mlc.py | 0 .../Nano-RP2040/03-Audio}/audio_fft.py | 0 .../Nano-RP2040/03-WiFi/ap_mode.py | 0 .../Nano-RP2040/03-WiFi/http_client.py | 0 .../Nano-RP2040/03-WiFi/ntp.py | 0 .../Nano-RP2040/03-WiFi/scan.py | 0 .../Nano-RP2040/04-Bluetooth}/ble_blinky.py | 0 .../04-Bluetooth}/ble_temperature.py | 0 .../04-Bluetooth}/temp_sensor_aioble.py | 0 .../Nano-RP2040/05-Thermal/thermal_camera.py | 0 .../00-Board-Control/adc_read_ext_channel.py} | 0 .../00-Board-Control}/adc_read_int_channel.py | 0 .../Nicla-Vision/00-Board-Control/blinky.py | 14 + .../Nicla-Vision/00-Board-Control}/can.py | 0 .../00-Board-Control}/cpufreq_scaling.py | 0 .../00-Board-Control}/i2c_control.py | 0 .../00-Board-Control}/led_control.py | 0 .../00-Board-Control}/pin_control.py | 0 .../00-Board-Control}/pwm_control.py | 0 .../Nicla-Vision/00-Board-Control}/rtc.py | 0 .../00-Board-Control}/spi_control.py | 0 .../00-Board-Control}/timer_control.py | 0 .../00-Board-Control}/uart_control.py | 0 .../Nicla-Vision/00-Board-Control}/usb_hid.py | 0 .../Nicla-Vision/00-Board-Control}/usb_vcp.py | 0 .../00-Board-Control}/vsync_gpio_output.py | 0 .../Nicla-Vision/01-Sensors/lsm6dsox_basic.py | 0 .../Nicla-Vision/01-Sensors/lsm6dsox_mlc.py | 0 .../Nicla-Vision/01-Sensors/vl53l1x_tof.py | 0 .../Nicla-Vision/02-Audio}/audio_fft.py | 0 .../Nicla-Vision/03-WiFi}/connect.py | 0 .../Nicla-Vision/03-WiFi}/dns.py | 0 .../Nicla-Vision/03-WiFi}/http_client.py | 0 .../Nicla-Vision/03-WiFi}/http_client_ssl.py | 0 .../Nicla-Vision/03-WiFi}/mjpeg_streamer.py | 0 .../Nicla-Vision/03-WiFi}/mqtt_pub.py | 0 .../Nicla-Vision/03-WiFi}/mqtt_sub.py | 0 .../Nicla-Vision/03-WiFi}/ntp.py | 0 .../Nicla-Vision/03-WiFi}/scan.py | 0 .../Nicla-Vision/03-WiFi}/static_ip.py | 0 .../04-Bluetooth}/ble_temperature.py | 0 .../Nicla-Vision/05-Low-Power}/deep_sleep.py | 0 .../05-Low-Power}/extint_wakeup.py | 0 .../Nicla-Vision/05-Low-Power}/stop_mode.py | 0 .../00-Board-Control/adc_read_ext_channel.py} | 0 .../00-Board-Control}/adc_read_int_channel.py | 0 .../Portenta-H7/00-Board-Control/blinky.py | 14 + .../Portenta-H7/00-Board-Control}/can.py | 0 .../00-Board-Control}/cpufreq_scaling.py | 0 .../00-Board-Control}/dac_write.py | 0 .../00-Board-Control}/dac_write_timed.py | 0 .../00-Board-Control}/i2c_control.py | 0 .../00-Board-Control}/led_control.py | 0 .../00-Board-Control}/pin_control.py | 0 .../00-Board-Control}/pwm_control.py | 0 .../Portenta-H7/00-Board-Control}/rtc.py | 0 .../00-Board-Control}/servo_control.py | 0 .../00-Board-Control}/spi_control.py | 0 .../00-Board-Control}/timer_control.py | 0 .../00-Board-Control}/timer_tests.py | 0 .../00-Board-Control}/uart_control.py | 0 .../Portenta-H7/00-Board-Control}/usb_hid.py | 0 .../Portenta-H7/00-Board-Control}/usb_vcp.py | 0 .../00-Board-Control}/vsync_gpio_output.py | 0 .../Portenta-H7/01-Audio}/audio_fft.py | 0 .../Portenta-H7/01-Audio}/micro_speech.py | 0 .../Portenta-H7/02-WiFi}/connect.py | 0 .../Portenta-H7/02-WiFi}/dns.py | 0 .../Portenta-H7/02-WiFi}/http_client.py | 0 .../Portenta-H7/02-WiFi}/http_client_ssl.py | 0 .../Portenta-H7/02-WiFi}/mjpeg_streamer.py | 0 .../Portenta-H7/02-WiFi}/mqtt_pub.py | 0 .../Portenta-H7/02-WiFi}/mqtt_sub.py | 0 .../Portenta-H7/02-WiFi}/ntp.py | 0 .../Portenta-H7/02-WiFi}/scan.py | 0 .../Portenta-H7/02-WiFi}/static_ip.py | 0 .../03-Bluetooth}/ble_temperature.py | 0 .../Portenta-H7/04-LoRa}/lora-example.py | 0 .../05-Ethernet}/eth_cable_test.py | 0 .../Portenta-H7/05-Ethernet}/http_client.py | 0 .../05-Ethernet}/http_client_ssl.py | 0 .../Portenta-H7/05-Ethernet}/peer_to_peer.py | 0 .../Portenta-H7/06-Low-Power}/deep_sleep.py | 0 .../06-Low-Power}/extint_wakeup.py | 0 .../himax_wakeup_on_motion_detection.py | 0 .../Portenta-H7/06-Low-Power}/sensor_sleep.py | 0 .../Portenta-H7/06-Low-Power}/stop_mode.py | 0 .../00-Basics => 02-HelloWorld}/helloworld.py | 0 .../00-Snapshot}/emboss_snapshot.py | 0 .../00-Snapshot}/snapshot.py | 0 .../snapshot_on_face_detection.py | 0 .../00-Snapshot}/snapshot_on_movement.py | 0 .../00-Snapshot}/time_lapse_photos.py | 0 .../01-Video-Recording}/gif.py | 0 .../gif_on_face_detection.py | 0 .../01-Video-Recording}/gif_on_movement.py | 0 .../01-Video-Recording}/imageio_memory.py | 0 .../01-Video-Recording}/imageio_read.py | 0 .../01-Video-Recording}/imageio_write.py | 0 .../01-Video-Recording}/mjpeg.py | 0 .../mjpeg_on_face_detection.py | 0 .../01-Video-Recording}/mjpeg_on_movement.py | 0 .../absolute-rotation-scale.py | 0 .../02-Optical-Flow}/absolute-translation.py | 0 .../differential-rotation-scale.py | 0 .../differential-translation.py | 0 .../image-patches-absolute-rotation-scale.py | 0 .../image-patches-absolute-translation.py | 0 ...age-patches-differential-rotation-scale.py | 0 .../image-patches-differential-translation.py | 0 .../03-Event-Cameras}/frogeye2020.py | 0 .../frogeye2020_with_tracking.py | 0 .../04-Global-Shutter}/high_fps.py | 0 .../04-Global-Shutter}/triggered_mode.py | 0 .../lepton_get_object_high_temp.py | 0 .../05-FLIR-Lepton}/lepton_get_object_temp.py | 0 .../lepton_get_object_temp_color.py | 0 .../lepton_get_object_temp_color_lcd.py | 0 .../lepton_get_object_temp_lcd.py | 0 ...lepton_hotspot_grayscale_color_tracking.py | 0 ...on_hotspot_grayscale_color_tracking_lcd.py | 0 .../lepton_hotspot_rgb565_color_tracking.py | 0 ...epton_hotspot_rgb565_color_tracking_lcd.py | 0 ...t_temp_hotspot_grayscale_color_tracking.py | 0 ...rget_temp_hotspot_rgb565_color_tracking.py | 0 .../06-Time-of-Flight}/tof_camera.py | 0 .../06-Time-of-Flight}/tof_overlay.py | 0 .../sensor_auto_gain_control.py | 0 .../sensor_exposure_control.py | 0 .../sensor_horizontal_mirror.py | 0 .../sensor_manual_whitebal_control.py | 0 .../sensor_vertical_flip.py | 0 .../sesnor_manual_gain_control.py | 0 .../100_fps_ir_led_tracking.py | 0 .../08-Readout-Control}/apriltag_tracking.py | 0 .../00-Drawing}/arrow_drawing.py | 0 .../00-Drawing}/circle_drawing.py | 0 .../00-Drawing}/copy2fb.py | 0 .../00-Drawing}/cross_drawing.py | 0 .../00-Drawing}/ellipse_drawing.py | 0 .../00-Drawing}/flood_fill.py | 0 .../00-Drawing}/image_drawing.py | 0 .../00-Drawing}/image_drawing_advanced.py | 0 .../image_drawing_alpha_blending_test.py | 0 ...ng_alpha_blending_with_color_table_test.py | 0 .../image_drawing_alpha_table_test.py | 0 ...awing_alpha_table_with_color_table_test.py | 0 .../image_drawing_scale_down_test.py | 0 .../image_drawing_scale_up_test.py | 0 .../image_drawing_with_custom_palette.py | 0 .../00-Drawing}/keypoints_drawing.py | 0 .../00-Drawing}/line_drawing.py | 0 .../00-Drawing}/rectangle_drawing.py | 0 .../00-Drawing}/text_drawing.py | 0 .../adaptive_histogram_equalization.py | 0 .../01-Image-Filters}/blur_filter.py | 0 .../01-Image-Filters}/cartoon_filter.py | 0 .../color_bilateral_filter.py | 0 .../01-Image-Filters}/color_binary_filter.py | 0 .../01-Image-Filters}/color_light_removal.py | 0 .../01-Image-Filters}/edge_filter.py | 0 .../01-Image-Filters}/erode_and_dilate.py | 0 .../01-Image-Filters}/gamma_correction.py | 0 .../grayscale_bilateral_filter.py | 0 .../grayscale_binary_filter.py | 0 .../grayscale_light_removal.py | 0 .../histogram_equalization.py | 0 .../01-Image-Filters}/kernel_filters.py | 0 .../01-Image-Filters}/lens_correction.py | 0 .../01-Image-Filters}/linear_polar.py | 0 .../01-Image-Filters}/log_polar.py | 0 .../mean_adaptive_threshold_filter.py | 0 .../01-Image-Filters}/mean_filter.py | 0 .../median_adaptive_threshold_filter.py | 0 .../01-Image-Filters}/median_filter.py | 0 .../midpoint_adaptive_threshold_filter.py | 0 .../01-Image-Filters}/midpoint_filter.py | 0 .../mode_adaptive_threshold_filter.py | 0 .../01-Image-Filters}/mode_filter.py | 0 .../01-Image-Filters}/negative.py | 0 .../perspective_and_rotation_correction.py | 0 .../perspective_correction.py | 0 .../01-Image-Filters}/rotation_correction.py | 0 .../01-Image-Filters}/sharpen_filter.py | 0 .../01-Image-Filters}/ulab.py | 0 .../01-Image-Filters}/unsharp_filter.py | 0 .../vflip_hmirror_transpose.py | 0 .../automatic_grayscale_color_tracking.py | 0 .../automatic_rgb565_color_tracking.py | 0 .../black_grayscale_line_following.py | 0 .../image_histogram_info.py | 0 .../image_statistics_info.py | 0 .../ir_beacon_grayscale_tracking.py | 0 .../ir_beacon_rgb565_tracking.py | 0 .../multi_color_blob_tracking.py | 0 .../multi_color_code_tracking.py | 0 .../single_color_code_tracking.py | 0 .../single_color_grayscale_blob_tracking.py | 0 .../single_color_rgb565_blob_tracking.py | 0 .../in_memory_advanced_frame_differencing.py | 0 .../in_memory_basic_frame_differencing.py | 0 .../in_memory_shadow_removal.py | 0 .../in_memory_structural_similarity.py | 0 .../on_disk_advanced_frame_differencing.py | 0 .../on_disk_basic_frame_differencing.py | 0 .../on_disk_shadow_removal.py | 0 .../on_disk_structural_similarity.py | 0 .../00-TensorFlow}/tf_face_collection.py | 0 .../00-TensorFlow}/tf_face_recognition.py | 0 .../tf_mobilenet_search_just_center.py | 0 .../tf_mobilenet_search_whole_window.py | 0 .../00-TensorFlow}/tf_object_detection.py | 0 .../tf_person_detection_search_just_center.py | 0 ...tf_person_detection_search_whole_window.py | 0 .../01-ST-CubeAI}/nn_stm32cubeai.py | 0 .../02-Haar-Cascade}/face_detection.py | 0 .../02-Haar-Cascade}/face_eye_detection.py | 0 .../02-Haar-Cascade}/face_recognition.py | 0 .../02-Haar-Cascade}/face_tracking.py | 0 .../02-Haar-Cascade}/iris_detection.py | 0 .../edges.py | 0 .../find_circles.py | 0 .../find_line_segments.py | 0 .../find_lines.py | 0 .../find_rects.py | 0 .../hog.py | 0 .../keypoints.py | 0 .../keypoints_save.py | 0 .../lbp.py | 0 .../linear_regression_fast.py | 0 .../linear_regression_robust.py | 0 .../selective_search.py | 0 .../template_matching.py | 0 .../16-Codes => 07-Barcodes}/find_barcodes.py | 0 .../find_datamatrices.py | 0 .../find_datamatrices_w_lens_zoom.py | 0 .../qrcodes_with_lens_corr.py | 0 .../qrcodes_with_lens_zoom.py | 0 .../find_apriltags.py | 0 .../find_apriltags_3d_pose.py | 0 .../find_apriltags_max_res.py | 0 .../find_apriltags_w_lens_zoom.py | 0 .../find_small_apriltags.py | 0 .../00-Arduino/arduino_i2c_slave.py | 0 .../00-Arduino/arduino_spi_slave.py | 0 .../00-Arduino/arduino_uart.py | 0 .../apriltags_pixy_i2c_emulation.py | 0 .../apriltags_pixy_spi_emulation.py | 0 .../apriltags_pixy_uart_emulation.py | 0 .../01-Pixy-Emulation}/pixy_i2c_emulation.py | 0 .../01-Pixy-Emulation}/pixy_spi_emulation.py | 0 .../01-Pixy-Emulation}/pixy_uart_emulation.py | 0 .../mavlink_apriltags_landing_target.py | 0 .../02-MAVLink}/mavlink_opticalflow.py | 0 .../03-Modbus}/modbus_apriltag.py | 0 .../03-Modbus}/modbus_rtu_slave.py | 0 ..._as_the_remote_device_for_your_computer.py | 0 ..._as_the_remote_device_for_your_computer.py | 0 ...e_transfer_raw_as_the_controller_device.py | 0 ...image_transfer_raw_as_the_remote_device.py | 0 ...pular_features_as_the_controller_device.py | 0 .../popular_features_as_the_remote_device.py | 0 .../36-Web-Servers/rtsp_video_server_lan.py | 0 .../36-Web-Servers/rtsp_video_server_wlan.py | 0 .../Arduino/Nicla-Vision/00-Basics/main.py | 33 -- .../02-Board-Control/native_emitters.py | 19 - .../Nicla-Vision/03-Drawing/copy2fb.py | 15 - .../05-Snapshot/snapshot_on_movement.py | 44 --- .../06-Video-Recording/gif_on_movement.py | 58 --- .../06-Video-Recording/mjpeg_on_movement.py | 58 --- .../07-Face-Detection/face_detection.py | 46 --- .../07-Face-Detection/face_tracking.py | 63 ---- .../09-Feature-Detection/edges.py | 19 - .../Nicla-Vision/09-Feature-Detection/hog.py | 25 -- .../09-Feature-Detection/keypoints.py | 51 --- .../09-Feature-Detection/keypoints_save.py | 30 -- .../Nicla-Vision/09-Feature-Detection/lbp.py | 49 --- .../09-Feature-Detection/template_matching.py | 44 --- .../Nicla-Vision/16-Codes/find_barcodes.py | 63 ---- .../16-Codes/find_datamatrices.py | 25 -- .../16-Codes/find_datamatrices_w_lens_zoom.py | 25 -- .../16-Codes/qrcodes_with_lens_corr.py | 21 -- .../16-Codes/qrcodes_with_lens_zoom.py | 21 -- .../in_memory_basic_frame_differencing.py | 46 --- .../in_memory_structural_similarity.py | 38 -- .../on_disk_basic_frame_differencing.py | 42 --- .../on_disk_structural_similarity.py | 34 -- .../25-Machine-Learning/nn_stm32cubeai.py | 34 -- .../25-Machine-Learning/tf_face_collection.py | 31 -- .../tf_face_recognition.py | 41 --- .../tf_mobilenet_search_just_center.py | 66 ---- .../tf_mobilenet_search_whole_window.py | 60 ---- .../26-April-Tags/find_apriltags.py | 55 --- .../26-April-Tags/find_apriltags_3d_pose.py | 55 --- .../26-April-Tags/find_apriltags_max_res.py | 56 --- .../find_apriltags_w_lens_zoom.py | 31 -- .../26-April-Tags/find_small_apriltags.py | 67 ---- .../popular_features_as_the_remote_device.py | 271 -------------- .../36-Web-Servers/rtsp_video_server_lan.py | 79 ---- .../36-Web-Servers/rtsp_video_server_wlan.py | 79 ---- .../Portenta-H7/01-Basics/helloworld.py | 17 - .../02-Board-Control/native_emitters.py | 19 - .../Portenta-H7/03-Drawing/arrow_drawing.py | 31 -- .../Portenta-H7/03-Drawing/circle_drawing.py | 31 -- .../Portenta-H7/03-Drawing/cross_drawing.py | 29 -- .../Portenta-H7/03-Drawing/ellipse_drawing.py | 35 -- .../Portenta-H7/03-Drawing/flood_fill.py | 35 -- .../Portenta-H7/03-Drawing/image_drawing.py | 25 -- .../03-Drawing/image_drawing_advanced.py | 93 ----- .../image_drawing_alpha_blending_test.py | 71 ---- ...ng_alpha_blending_with_color_table_test.py | 81 ----- .../image_drawing_alpha_table_test.py | 75 ---- ...awing_alpha_table_with_color_table_test.py | 85 ----- .../image_drawing_scale_down_test.py | 69 ---- .../03-Drawing/image_drawing_scale_up_test.py | 63 ---- .../image_drawing_with_custom_palette.py | 43 --- .../03-Drawing/keypoints_drawing.py | 31 -- .../Portenta-H7/03-Drawing/line_drawing.py | 31 -- .../03-Drawing/rectangle_drawing.py | 31 -- .../Portenta-H7/03-Drawing/text_drawing.py | 33 -- .../adaptive_histogram_equalization.py | 29 -- .../04-Image-Filters/blur_filter.py | 21 -- .../04-Image-Filters/cartoon_filter.py | 29 -- .../color_bilateral_filter.py | 33 -- .../04-Image-Filters/color_binary_filter.py | 61 ---- .../04-Image-Filters/color_light_removal.py | 25 -- .../04-Image-Filters/edge_filter.py | 21 -- .../04-Image-Filters/erode_and_dilate.py | 35 -- .../04-Image-Filters/gamma_correction.py | 21 -- .../grayscale_bilateral_filter.py | 33 -- .../grayscale_binary_filter.py | 45 --- .../grayscale_light_removal.py | 25 -- .../histogram_equalization.py | 19 - .../04-Image-Filters/kernel_filters.py | 27 -- .../04-Image-Filters/lens_correction.py | 21 -- .../04-Image-Filters/linear_polar.py | 21 -- .../Portenta-H7/04-Image-Filters/log_polar.py | 21 -- .../mean_adaptive_threshold_filter.py | 25 -- .../04-Image-Filters/mean_filter.py | 25 -- .../median_adaptive_threshold_filter.py | 27 -- .../04-Image-Filters/median_filter.py | 27 -- .../midpoint_adaptive_threshold_filter.py | 28 -- .../04-Image-Filters/midpoint_filter.py | 27 -- .../mode_adaptive_threshold_filter.py | 25 -- .../04-Image-Filters/mode_filter.py | 25 -- .../Portenta-H7/04-Image-Filters/negative.py | 19 - .../perspective_and_rotation_correction.py | 71 ---- .../perspective_correction.py | 39 -- .../04-Image-Filters/rotation_correction.py | 49 --- .../04-Image-Filters/sharpen_filter.py | 21 -- .../Portenta-H7/04-Image-Filters/ulab.py | 18 - .../04-Image-Filters/unsharp_filter.py | 21 -- .../vflip_hmirror_transpose.py | 33 -- .../05-Snapshot/emboss_snapshot.py | 33 -- .../Portenta-H7/05-Snapshot/snapshot.py | 27 -- .../05-Snapshot/snapshot_on_face_detection.py | 51 --- .../05-Snapshot/snapshot_on_movement.py | 44 --- .../05-Snapshot/time_lapse_photos.py | 67 ---- .../Portenta-H7/06-Video-Recording/gif.py | 37 -- .../gif_on_face_detection.py | 65 ---- .../06-Video-Recording/gif_on_movement.py | 58 --- .../06-Video-Recording/imageio_memory.py | 33 -- .../06-Video-Recording/imageio_read.py | 32 -- .../06-Video-Recording/imageio_write.py | 36 -- .../Portenta-H7/06-Video-Recording/mjpeg.py | 37 -- .../mjpeg_on_face_detection.py | 65 ---- .../06-Video-Recording/mjpeg_on_movement.py | 58 --- .../07-Face-Detection/face_recognition.py | 27 -- .../09-Feature-Detection/find_circles.py | 39 -- .../find_line_segments.py | 39 -- .../09-Feature-Detection/find_lines.py | 57 --- .../09-Feature-Detection/find_rects.py | 31 -- .../linear_regression_fast.py | 43 --- .../linear_regression_robust.py | 45 --- .../09-Feature-Detection/selective_search.py | 22 -- .../automatic_grayscale_color_tracking.py | 48 --- .../automatic_rgb565_color_tracking.py | 52 --- .../black_grayscale_line_following.py | 84 ----- .../10-Color-Tracking/image_histogram_info.py | 24 -- .../image_statistics_info.py | 22 -- .../ir_beacon_grayscale_tracking.py | 29 -- .../ir_beacon_rgb565_tracking.py | 29 -- .../multi_color_blob_tracking.py | 40 --- .../multi_color_code_tracking.py | 48 --- .../single_color_code_tracking.py | 42 --- .../single_color_grayscale_blob_tracking.py | 36 -- .../single_color_rgb565_blob_tracking.py | 40 --- .../Portenta-H7/16-Codes/find_barcodes.py | 64 ---- .../Portenta-H7/16-Codes/find_datamatrices.py | 26 -- .../16-Codes/find_datamatrices_w_lens_zoom.py | 26 -- .../16-Codes/qrcodes_with_lens_corr.py | 22 -- .../17-Pixy-Emulation/pixy_i2c_emulation.py | 328 ----------------- .../17-Pixy-Emulation/pixy_spi_emulation.py | 336 ------------------ .../17-Pixy-Emulation/pixy_uart_emulation.py | 310 ---------------- .../18-MAVLink/mavlink_opticalflow.py | 112 ------ .../in_memory_advanced_frame_differencing.py | 64 ---- .../in_memory_basic_frame_differencing.py | 46 --- .../in_memory_shadow_removal.py | 52 --- .../in_memory_structural_similarity.py | 38 -- .../on_disk_advanced_frame_differencing.py | 60 ---- .../on_disk_basic_frame_differencing.py | 42 --- .../on_disk_shadow_removal.py | 48 --- .../on_disk_structural_similarity.py | 34 -- .../himax_motion_detection.py | 40 --- .../sensor_auto_gain_control.py | 45 --- .../sensor_exposure_control.py | 66 ---- .../sensor_horizontal_mirror.py | 21 -- .../sensor_manual_whitebal_control.py | 38 -- .../21-Sensor-Control/sensor_vertical_flip.py | 21 -- .../sesnor_manual_gain_control.py | 66 ---- .../absolute-rotation-scale.py | 67 ---- .../22-Optical-Flow/absolute-translation.py | 55 --- .../differential-rotation-scale.py | 67 ---- .../differential-translation.py | 55 --- .../image-patches-absolute-rotation-scale.py | 73 ---- .../image-patches-absolute-translation.py | 69 ---- ...age-patches-differential-rotation-scale.py | 73 ---- .../image-patches-differential-translation.py | 69 ---- .../I2C_Lidar_Lite_V3_example_code.py | 63 ---- .../25-Machine-Learning/tf_face_collection.py | 31 -- .../tf_face_recognition.py | 41 --- .../tf_mobilenet_search_just_center.py | 66 ---- .../tf_mobilenet_search_whole_window.py | 60 ---- .../tf_object_detection.py | 51 --- .../tf_person_detection_search_just_center.py | 48 --- ...tf_person_detection_search_whole_window.py | 42 --- .../26-April-Tags/find_apriltags.py | 56 --- .../26-April-Tags/find_apriltags_3d_pose.py | 56 --- .../26-April-Tags/find_apriltags_max_res.py | 60 ---- .../find_apriltags_w_lens_zoom.py | 32 -- .../26-April-Tags/find_small_apriltags.py | 70 ---- ..._as_the_remote_device_for_your_computer.py | 87 ----- ..._as_the_remote_device_for_your_computer.py | 79 ---- ...e_transfer_raw_as_the_controller_device.py | 129 ------- ...image_transfer_raw_as_the_remote_device.py | 99 ------ ...pular_features_as_the_controller_device.py | 157 -------- .../popular_features_as_the_remote_device.py | 271 -------------- .../36-Web-Servers/rtsp_video_server_lan.py | 79 ---- .../36-Web-Servers/rtsp_video_server_wlan.py | 79 ---- .../Arduino/Portenta-H7/99-Tests/colorbar.py | 55 --- .../Arduino/Portenta-H7/99-Tests/fps.py | 14 - .../Arduino/Portenta-H7/99-Tests/selftest.py | 77 ---- .../Arduino/Portenta-H7/99-Tests/unittests.py | 38 -- .../OpenMV/00-Arduino/arduino_i2c_slave.py | 91 ----- .../OpenMV/00-Arduino/arduino_spi_slave.py | 94 ----- .../OpenMV/00-Arduino/arduino_uart.py | 38 -- .../examples/OpenMV/01-Basics/helloworld.py | 17 - scripts/examples/OpenMV/01-Basics/main.py | 33 -- .../02-Board-Control/native_emitters.py | 19 - .../OpenMV/03-Drawing/arrow_drawing.py | 31 -- .../OpenMV/03-Drawing/circle_drawing.py | 31 -- scripts/examples/OpenMV/03-Drawing/copy2fb.py | 21 -- .../OpenMV/03-Drawing/cross_drawing.py | 29 -- .../OpenMV/03-Drawing/ellipse_drawing.py | 35 -- .../examples/OpenMV/03-Drawing/flood_fill.py | 35 -- .../OpenMV/03-Drawing/image_drawing.py | 25 -- .../03-Drawing/image_drawing_advanced.py | 93 ----- .../image_drawing_alpha_blending_test.py | 71 ---- ...ng_alpha_blending_with_color_table_test.py | 81 ----- .../image_drawing_alpha_table_test.py | 75 ---- ...awing_alpha_table_with_color_table_test.py | 85 ----- .../image_drawing_scale_down_test.py | 69 ---- .../03-Drawing/image_drawing_scale_up_test.py | 63 ---- .../image_drawing_with_custom_palette.py | 43 --- .../OpenMV/03-Drawing/keypoints_drawing.py | 31 -- .../OpenMV/03-Drawing/line_drawing.py | 31 -- .../OpenMV/03-Drawing/rectangle_drawing.py | 31 -- .../OpenMV/03-Drawing/text_drawing.py | 33 -- .../adaptive_histogram_equalization.py | 29 -- .../OpenMV/04-Image-Filters/blur_filter.py | 21 -- .../OpenMV/04-Image-Filters/cartoon_filter.py | 29 -- .../color_bilateral_filter.py | 33 -- .../04-Image-Filters/color_binary_filter.py | 61 ---- .../04-Image-Filters/color_light_removal.py | 25 -- .../OpenMV/04-Image-Filters/edge_filter.py | 21 -- .../04-Image-Filters/erode_and_dilate.py | 35 -- .../04-Image-Filters/gamma_correction.py | 21 -- .../grayscale_bilateral_filter.py | 33 -- .../grayscale_binary_filter.py | 45 --- .../grayscale_light_removal.py | 25 -- .../histogram_equalization.py | 19 - .../OpenMV/04-Image-Filters/kernel_filters.py | 27 -- .../04-Image-Filters/lens_correction.py | 21 -- .../OpenMV/04-Image-Filters/linear_polar.py | 21 -- .../OpenMV/04-Image-Filters/log_polar.py | 21 -- .../mean_adaptive_threshold_filter.py | 25 -- .../OpenMV/04-Image-Filters/mean_filter.py | 25 -- .../median_adaptive_threshold_filter.py | 27 -- .../OpenMV/04-Image-Filters/median_filter.py | 27 -- .../midpoint_adaptive_threshold_filter.py | 28 -- .../04-Image-Filters/midpoint_filter.py | 27 -- .../mode_adaptive_threshold_filter.py | 25 -- .../OpenMV/04-Image-Filters/mode_filter.py | 25 -- .../OpenMV/04-Image-Filters/negative.py | 19 - .../perspective_and_rotation_correction.py | 71 ---- .../perspective_correction.py | 39 -- .../04-Image-Filters/rotation_correction.py | 49 --- .../OpenMV/04-Image-Filters/sharpen_filter.py | 21 -- .../examples/OpenMV/04-Image-Filters/ulab.py | 19 - .../OpenMV/04-Image-Filters/unsharp_filter.py | 21 -- .../vflip_hmirror_transpose.py | 33 -- .../OpenMV/05-Snapshot/emboss_snapshot.py | 33 -- .../examples/OpenMV/05-Snapshot/snapshot.py | 27 -- .../05-Snapshot/snapshot_on_face_detection.py | 51 --- .../OpenMV/05-Snapshot/time_lapse_photos.py | 67 ---- .../examples/OpenMV/06-Video-Recording/gif.py | 37 -- .../gif_on_face_detection.py | 65 ---- .../06-Video-Recording/imageio_memory.py | 33 -- .../OpenMV/06-Video-Recording/imageio_read.py | 32 -- .../06-Video-Recording/imageio_write.py | 36 -- .../OpenMV/06-Video-Recording/mjpeg.py | 37 -- .../mjpeg_on_face_detection.py | 65 ---- .../07-Face-Detection/face_detection.py | 51 --- .../07-Face-Detection/face_recognition.py | 27 -- .../OpenMV/07-Face-Detection/face_tracking.py | 68 ---- .../08-Eye-Tracking/face_eye_detection.py | 49 --- .../OpenMV/08-Eye-Tracking/iris_detection.py | 52 --- .../OpenMV/09-Feature-Detection/edges.py | 20 -- .../09-Feature-Detection/find_circles.py | 39 -- .../find_line_segments.py | 39 -- .../OpenMV/09-Feature-Detection/find_lines.py | 57 --- .../OpenMV/09-Feature-Detection/find_rects.py | 31 -- .../OpenMV/09-Feature-Detection/hog.py | 28 -- .../OpenMV/09-Feature-Detection/keypoints.py | 58 --- .../09-Feature-Detection/keypoints_save.py | 37 -- .../OpenMV/09-Feature-Detection/lbp.py | 53 --- .../linear_regression_fast.py | 43 --- .../linear_regression_robust.py | 45 --- .../09-Feature-Detection/template_matching.py | 48 --- .../OpenMV/16-Codes/qrcodes_with_lens_zoom.py | 22 -- .../apriltags_pixy_i2c_emulation.py | 245 ------------- .../apriltags_pixy_spi_emulation.py | 253 ------------- .../apriltags_pixy_uart_emulation.py | 227 ------------ .../mavlink_apriltags_landing_target.py | 159 --------- .../absolute-rotation-scale.py | 67 ---- .../22-Optical-Flow/absolute-translation.py | 55 --- .../differential-rotation-scale.py | 67 ---- .../differential-translation.py | 55 --- .../image-patches-absolute-rotation-scale.py | 73 ---- .../image-patches-absolute-translation.py | 69 ---- ...age-patches-differential-rotation-scale.py | 73 ---- .../image-patches-differential-translation.py | 69 ---- .../I2C_Lidar_Lite_V3_example_code.py | 63 ---- .../25-Machine-Learning/nn_stm32cubeai.py | 38 -- .../tf_object_detection.py | 51 --- .../tf_person_detection_search_just_center.py | 48 --- ...tf_person_detection_search_whole_window.py | 42 --- .../OpenMV/28-Global-Shutter/high_fps.py | 29 -- .../28-Global-Shutter/triggered_mode.py | 29 -- .../OpenMV/32-modbus/modbus_apriltag.py | 39 -- .../OpenMV/32-modbus/modbus_rtu_slave.py | 17 - ..._as_the_remote_device_for_your_computer.py | 87 ----- ..._as_the_remote_device_for_your_computer.py | 79 ---- ...e_transfer_raw_as_the_controller_device.py | 129 ------- ...image_transfer_raw_as_the_remote_device.py | 99 ------ ...pular_features_as_the_controller_device.py | 157 -------- .../100_fps_ir_led_tracking.py | 135 ------- .../35-Readout-Control/apriltag_tracking.py | 151 -------- 633 files changed, 42 insertions(+), 15430 deletions(-) rename scripts/examples/{Arduino/Portenta-H7/02-Board-Control/adc_read.py => 00-OpenMV-Boards/00-Board-Control/adc_read_ext_channel.py} (100%) rename scripts/examples/{Arduino/Portenta-H7/02-Board-Control => 00-OpenMV-Boards/00-Board-Control}/adc_read_int_channel.py (100%) create mode 100644 scripts/examples/00-OpenMV-Boards/00-Board-Control/blinky.py rename scripts/examples/{Arduino/Portenta-H7/02-Board-Control => 00-OpenMV-Boards/00-Board-Control}/can.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/02-Board-Control => 00-OpenMV-Boards/00-Board-Control}/cpufreq_scaling.py (100%) rename scripts/examples/{Arduino/Portenta-H7/02-Board-Control => 00-OpenMV-Boards/00-Board-Control}/dac_write.py (100%) rename scripts/examples/{Arduino/Portenta-H7/02-Board-Control => 00-OpenMV-Boards/00-Board-Control}/dac_write_timed.py (100%) rename scripts/examples/{Arduino/Portenta-H7/02-Board-Control => 00-OpenMV-Boards/00-Board-Control}/i2c_control.py (100%) rename scripts/examples/{Arduino/Portenta-H7/02-Board-Control => 00-OpenMV-Boards/00-Board-Control}/led_control.py (100%) rename scripts/examples/{Arduino/Portenta-H7/02-Board-Control => 00-OpenMV-Boards/00-Board-Control}/pin_control.py (100%) rename scripts/examples/{OpenMV/02-Board-Control => 00-OpenMV-Boards/00-Board-Control}/pwm_control.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/02-Board-Control => 00-OpenMV-Boards/00-Board-Control}/rtc.py (100%) rename scripts/examples/{Arduino/Portenta-H7/02-Board-Control => 00-OpenMV-Boards/00-Board-Control}/servo_control.py (100%) rename scripts/examples/{OpenMV/02-Board-Control => 00-OpenMV-Boards/00-Board-Control}/spi_control.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/02-Board-Control => 00-OpenMV-Boards/00-Board-Control}/timer_control.py (100%) rename scripts/examples/{OpenMV/02-Board-Control => 00-OpenMV-Boards/00-Board-Control}/timer_tests.py (100%) rename scripts/examples/{Arduino/Portenta-H7/02-Board-Control => 00-OpenMV-Boards/00-Board-Control}/uart_control.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/02-Board-Control => 00-OpenMV-Boards/00-Board-Control}/usb_hid.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/02-Board-Control => 00-OpenMV-Boards/00-Board-Control}/usb_vcp.py (100%) rename scripts/examples/{OpenMV/02-Board-Control => 00-OpenMV-Boards/00-Board-Control}/vsync_gpio_output.py (100%) rename scripts/examples/{OpenMV/14-WiFi-Shield => 00-OpenMV-Boards/01-WiFi-Shield}/connect.py (100%) rename scripts/examples/{OpenMV/14-WiFi-Shield => 00-OpenMV-Boards/01-WiFi-Shield}/dns.py (100%) rename scripts/examples/{OpenMV/14-WiFi-Shield => 00-OpenMV-Boards/01-WiFi-Shield}/fw_update.py (100%) rename scripts/examples/{OpenMV/14-WiFi-Shield => 00-OpenMV-Boards/01-WiFi-Shield}/http_client.py (100%) rename scripts/examples/{OpenMV/14-WiFi-Shield => 00-OpenMV-Boards/01-WiFi-Shield}/http_client_ssl.py (100%) rename scripts/examples/{OpenMV/14-WiFi-Shield => 00-OpenMV-Boards/01-WiFi-Shield}/http_post.py (100%) rename scripts/examples/{OpenMV/14-WiFi-Shield => 00-OpenMV-Boards/01-WiFi-Shield}/mjpeg_streamer.py (100%) rename scripts/examples/{OpenMV/14-WiFi-Shield => 00-OpenMV-Boards/01-WiFi-Shield}/mjpeg_streamer_ap.py (100%) rename scripts/examples/{OpenMV/14-WiFi-Shield => 00-OpenMV-Boards/01-WiFi-Shield}/mjpeg_streamer_fir.py (100%) rename scripts/examples/{OpenMV/14-WiFi-Shield => 00-OpenMV-Boards/01-WiFi-Shield}/mqtt_pub.py (100%) rename scripts/examples/{OpenMV/14-WiFi-Shield => 00-OpenMV-Boards/01-WiFi-Shield}/mqtt_sub.py (100%) rename scripts/examples/{OpenMV/14-WiFi-Shield => 00-OpenMV-Boards/01-WiFi-Shield}/ntp.py (100%) rename scripts/examples/{OpenMV/14-WiFi-Shield => 00-OpenMV-Boards/01-WiFi-Shield}/scan.py (100%) rename scripts/examples/{OpenMV/14-WiFi-Shield => 00-OpenMV-Boards/01-WiFi-Shield}/static_ip.py (100%) rename scripts/examples/{OpenMV/11-LCD-Shield => 00-OpenMV-Boards/02-LCD-Shield}/lcd.py (100%) rename scripts/examples/{OpenMV/15-Servo-Shield => 00-OpenMV-Boards/03-Servo-Shield}/main.py (100%) rename scripts/examples/{OpenMV/15-Servo-Shield => 00-OpenMV-Boards/03-Servo-Shield}/pca9685.py (100%) rename scripts/examples/{OpenMV/15-Servo-Shield => 00-OpenMV-Boards/03-Servo-Shield}/servo.py (100%) rename scripts/examples/{OpenMV/12-Thermopile-Shield => 00-OpenMV-Boards/04-Thermopile-Shield}/thermal_camera.py (100%) rename scripts/examples/{OpenMV/12-Thermopile-Shield => 00-OpenMV-Boards/04-Thermopile-Shield}/thermal_overlay.py (100%) rename scripts/examples/{OpenMV/12-Thermopile-Shield => 00-OpenMV-Boards/04-Thermopile-Shield}/thermal_overlay_lcd.py (100%) rename scripts/examples/{OpenMV/13-BLE-Shield => 00-OpenMV-Boards/05-BLE-Shield}/ble.py (100%) rename scripts/examples/{OpenMV/23-Motor-Shield => 00-OpenMV-Boards/06-Motor-Shield}/motor-shield-power-driver.py (100%) rename scripts/examples/{OpenMV/23-Motor-Shield => 00-OpenMV-Boards/06-Motor-Shield}/motor-shield-pwm.py (100%) rename scripts/examples/{OpenMV/23-Motor-Shield => 00-OpenMV-Boards/06-Motor-Shield}/motor.py (100%) rename scripts/examples/{OpenMV/23-Motor-Shield => 00-OpenMV-Boards/06-Motor-Shield}/stepper.py (100%) rename scripts/examples/{OpenMV/29-IMU-Shield => 00-OpenMV-Boards/07-IMU-Shield}/imu_read.py (100%) rename scripts/examples/{OpenMV/30-Distance-Shield => 00-OpenMV-Boards/08-Distance-Shield}/distance_read.py (100%) rename scripts/examples/{OpenMV/31-TV-Shield => 00-OpenMV-Boards/09-TV-Shield}/tv.py (100%) rename scripts/examples/{OpenMV/33-Light-Shield => 00-OpenMV-Boards/10-Light-Shield}/light.py (100%) rename scripts/examples/{OpenMV/19-Low-Power => 00-OpenMV-Boards/11-Low-Power}/deep_sleep.py (100%) rename scripts/examples/{Arduino/Portenta-H7/19-Low-Power => 00-OpenMV-Boards/11-Low-Power}/extint_wakeup.py (100%) rename scripts/examples/{OpenMV/19-Low-Power => 00-OpenMV-Boards/11-Low-Power}/sensor_sleep.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/19-Low-Power => 00-OpenMV-Boards/11-Low-Power}/stop_mode.py (100%) rename scripts/examples/{OpenMV => 00-OpenMV-Boards}/99-Tests/colorbar.py (100%) rename scripts/examples/{OpenMV => 00-OpenMV-Boards}/99-Tests/fps.py (100%) rename scripts/examples/{OpenMV => 00-OpenMV-Boards}/99-Tests/selftest.py (100%) rename scripts/examples/{OpenMV => 00-OpenMV-Boards}/99-Tests/unittests.py (100%) rename scripts/examples/{Arduino/Portenta-H7/01-Basics => 00-OpenMV-Boards}/main.py (100%) rename scripts/examples/{Arduino/Nano-33-BLE-Sense/00-Board => 01-Arduino-Boards/Nano-33-BLE-Sense/00-Board-Control}/blinky.py (100%) rename scripts/examples/{Arduino/Nano-33-BLE-Sense/00-Board => 01-Arduino-Boards/Nano-33-BLE-Sense/00-Board-Control}/i2c_scanner.py (100%) rename scripts/examples/{Arduino => 01-Arduino-Boards}/Nano-33-BLE-Sense/01-Sensors/apds9960/ambient.py (100%) rename scripts/examples/{Arduino => 01-Arduino-Boards}/Nano-33-BLE-Sense/01-Sensors/apds9960/gesture.py (100%) rename scripts/examples/{Arduino => 01-Arduino-Boards}/Nano-33-BLE-Sense/01-Sensors/apds9960/proximity.py (100%) rename scripts/examples/{Arduino => 01-Arduino-Boards}/Nano-33-BLE-Sense/01-Sensors/hts221.py (100%) rename scripts/examples/{Arduino => 01-Arduino-Boards}/Nano-33-BLE-Sense/01-Sensors/lps22.py (100%) rename scripts/examples/{Arduino => 01-Arduino-Boards}/Nano-33-BLE-Sense/01-Sensors/lsm9ds1.py (100%) rename scripts/examples/{Arduino/Nano-33-BLE-Sense/03-Audio => 01-Arduino-Boards/Nano-33-BLE-Sense/02-Audio}/audio_fft.py (100%) rename scripts/examples/{Arduino/Nano-33-BLE-Sense/02-Bluetooth => 01-Arduino-Boards/Nano-33-BLE-Sense/03-Bluetooth}/ble_blinky.py (100%) rename scripts/examples/{Arduino/Nano-33-BLE-Sense/02-Bluetooth => 01-Arduino-Boards/Nano-33-BLE-Sense/03-Bluetooth}/ble_scan.py (100%) rename scripts/examples/{Arduino/Nano-33-BLE-Sense/02-Bluetooth => 01-Arduino-Boards/Nano-33-BLE-Sense/03-Bluetooth}/ble_temperature.py (100%) rename scripts/examples/{Arduino => 01-Arduino-Boards}/Nano-33-BLE-Sense/04-Thermal/thermal_camera.py (100%) rename scripts/examples/{Arduino/Nano-RP2040/00-Basics => 01-Arduino-Boards/Nano-RP2040/00-Board-Control}/blinky.py (100%) rename scripts/examples/{Arduino/Nano-RP2040/00-Basics => 01-Arduino-Boards/Nano-RP2040/00-Board-Control}/i2c_scanner.py (100%) rename scripts/examples/{Arduino => 01-Arduino-Boards}/Nano-RP2040/01-Sensors/lsm6dsox_basic.py (100%) rename scripts/examples/{Arduino => 01-Arduino-Boards}/Nano-RP2040/01-Sensors/lsm6dsox_mlc.py (100%) rename scripts/examples/{Arduino/Nano-RP2040/04-Audio => 01-Arduino-Boards/Nano-RP2040/03-Audio}/audio_fft.py (100%) rename scripts/examples/{Arduino => 01-Arduino-Boards}/Nano-RP2040/03-WiFi/ap_mode.py (100%) rename scripts/examples/{Arduino => 01-Arduino-Boards}/Nano-RP2040/03-WiFi/http_client.py (100%) rename scripts/examples/{Arduino => 01-Arduino-Boards}/Nano-RP2040/03-WiFi/ntp.py (100%) rename scripts/examples/{Arduino => 01-Arduino-Boards}/Nano-RP2040/03-WiFi/scan.py (100%) rename scripts/examples/{Arduino/Nano-RP2040/02-Bluetooth => 01-Arduino-Boards/Nano-RP2040/04-Bluetooth}/ble_blinky.py (100%) rename scripts/examples/{Arduino/Nano-RP2040/02-Bluetooth => 01-Arduino-Boards/Nano-RP2040/04-Bluetooth}/ble_temperature.py (100%) rename scripts/examples/{Arduino/Nano-RP2040/02-Bluetooth => 01-Arduino-Boards/Nano-RP2040/04-Bluetooth}/temp_sensor_aioble.py (100%) rename scripts/examples/{Arduino => 01-Arduino-Boards}/Nano-RP2040/05-Thermal/thermal_camera.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/02-Board-Control/adc_read.py => 01-Arduino-Boards/Nicla-Vision/00-Board-Control/adc_read_ext_channel.py} (100%) rename scripts/examples/{Arduino/Nicla-Vision/02-Board-Control => 01-Arduino-Boards/Nicla-Vision/00-Board-Control}/adc_read_int_channel.py (100%) create mode 100644 scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/blinky.py rename scripts/examples/{Arduino/Nicla-Vision/02-Board-Control => 01-Arduino-Boards/Nicla-Vision/00-Board-Control}/can.py (100%) rename scripts/examples/{Arduino/Portenta-H7/02-Board-Control => 01-Arduino-Boards/Nicla-Vision/00-Board-Control}/cpufreq_scaling.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/02-Board-Control => 01-Arduino-Boards/Nicla-Vision/00-Board-Control}/i2c_control.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/02-Board-Control => 01-Arduino-Boards/Nicla-Vision/00-Board-Control}/led_control.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/02-Board-Control => 01-Arduino-Boards/Nicla-Vision/00-Board-Control}/pin_control.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/02-Board-Control => 01-Arduino-Boards/Nicla-Vision/00-Board-Control}/pwm_control.py (100%) rename scripts/examples/{Arduino/Portenta-H7/02-Board-Control => 01-Arduino-Boards/Nicla-Vision/00-Board-Control}/rtc.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/02-Board-Control => 01-Arduino-Boards/Nicla-Vision/00-Board-Control}/spi_control.py (100%) rename scripts/examples/{Arduino/Portenta-H7/02-Board-Control => 01-Arduino-Boards/Nicla-Vision/00-Board-Control}/timer_control.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/02-Board-Control => 01-Arduino-Boards/Nicla-Vision/00-Board-Control}/uart_control.py (100%) rename scripts/examples/{Arduino/Portenta-H7/02-Board-Control => 01-Arduino-Boards/Nicla-Vision/00-Board-Control}/usb_hid.py (100%) rename scripts/examples/{OpenMV/02-Board-Control => 01-Arduino-Boards/Nicla-Vision/00-Board-Control}/usb_vcp.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/02-Board-Control => 01-Arduino-Boards/Nicla-Vision/00-Board-Control}/vsync_gpio_output.py (100%) rename scripts/examples/{Arduino => 01-Arduino-Boards}/Nicla-Vision/01-Sensors/lsm6dsox_basic.py (100%) rename scripts/examples/{Arduino => 01-Arduino-Boards}/Nicla-Vision/01-Sensors/lsm6dsox_mlc.py (100%) rename scripts/examples/{Arduino => 01-Arduino-Boards}/Nicla-Vision/01-Sensors/vl53l1x_tof.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/37-Audio => 01-Arduino-Boards/Nicla-Vision/02-Audio}/audio_fft.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/40-WiFi => 01-Arduino-Boards/Nicla-Vision/03-WiFi}/connect.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/40-WiFi => 01-Arduino-Boards/Nicla-Vision/03-WiFi}/dns.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/40-WiFi => 01-Arduino-Boards/Nicla-Vision/03-WiFi}/http_client.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/40-WiFi => 01-Arduino-Boards/Nicla-Vision/03-WiFi}/http_client_ssl.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/40-WiFi => 01-Arduino-Boards/Nicla-Vision/03-WiFi}/mjpeg_streamer.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/40-WiFi => 01-Arduino-Boards/Nicla-Vision/03-WiFi}/mqtt_pub.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/40-WiFi => 01-Arduino-Boards/Nicla-Vision/03-WiFi}/mqtt_sub.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/40-WiFi => 01-Arduino-Boards/Nicla-Vision/03-WiFi}/ntp.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/40-WiFi => 01-Arduino-Boards/Nicla-Vision/03-WiFi}/scan.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/40-WiFi => 01-Arduino-Boards/Nicla-Vision/03-WiFi}/static_ip.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/41-Bluetooth => 01-Arduino-Boards/Nicla-Vision/04-Bluetooth}/ble_temperature.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/19-Low-Power => 01-Arduino-Boards/Nicla-Vision/05-Low-Power}/deep_sleep.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/19-Low-Power => 01-Arduino-Boards/Nicla-Vision/05-Low-Power}/extint_wakeup.py (100%) rename scripts/examples/{Arduino/Portenta-H7/19-Low-Power => 01-Arduino-Boards/Nicla-Vision/05-Low-Power}/stop_mode.py (100%) rename scripts/examples/{OpenMV/02-Board-Control/adc_read.py => 01-Arduino-Boards/Portenta-H7/00-Board-Control/adc_read_ext_channel.py} (100%) rename scripts/examples/{OpenMV/02-Board-Control => 01-Arduino-Boards/Portenta-H7/00-Board-Control}/adc_read_int_channel.py (100%) create mode 100644 scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/blinky.py rename scripts/examples/{OpenMV/02-Board-Control => 01-Arduino-Boards/Portenta-H7/00-Board-Control}/can.py (100%) rename scripts/examples/{OpenMV/02-Board-Control => 01-Arduino-Boards/Portenta-H7/00-Board-Control}/cpufreq_scaling.py (100%) rename scripts/examples/{OpenMV/02-Board-Control => 01-Arduino-Boards/Portenta-H7/00-Board-Control}/dac_write.py (100%) rename scripts/examples/{OpenMV/02-Board-Control => 01-Arduino-Boards/Portenta-H7/00-Board-Control}/dac_write_timed.py (100%) rename scripts/examples/{OpenMV/02-Board-Control => 01-Arduino-Boards/Portenta-H7/00-Board-Control}/i2c_control.py (100%) rename scripts/examples/{OpenMV/02-Board-Control => 01-Arduino-Boards/Portenta-H7/00-Board-Control}/led_control.py (100%) rename scripts/examples/{OpenMV/02-Board-Control => 01-Arduino-Boards/Portenta-H7/00-Board-Control}/pin_control.py (100%) rename scripts/examples/{Arduino/Portenta-H7/02-Board-Control => 01-Arduino-Boards/Portenta-H7/00-Board-Control}/pwm_control.py (100%) rename scripts/examples/{OpenMV/02-Board-Control => 01-Arduino-Boards/Portenta-H7/00-Board-Control}/rtc.py (100%) rename scripts/examples/{OpenMV/02-Board-Control => 01-Arduino-Boards/Portenta-H7/00-Board-Control}/servo_control.py (100%) rename scripts/examples/{Arduino/Portenta-H7/02-Board-Control => 01-Arduino-Boards/Portenta-H7/00-Board-Control}/spi_control.py (100%) rename scripts/examples/{OpenMV/02-Board-Control => 01-Arduino-Boards/Portenta-H7/00-Board-Control}/timer_control.py (100%) rename scripts/examples/{Arduino/Portenta-H7/02-Board-Control => 01-Arduino-Boards/Portenta-H7/00-Board-Control}/timer_tests.py (100%) rename scripts/examples/{OpenMV/02-Board-Control => 01-Arduino-Boards/Portenta-H7/00-Board-Control}/uart_control.py (100%) rename scripts/examples/{OpenMV/02-Board-Control => 01-Arduino-Boards/Portenta-H7/00-Board-Control}/usb_hid.py (100%) rename scripts/examples/{Arduino/Portenta-H7/02-Board-Control => 01-Arduino-Boards/Portenta-H7/00-Board-Control}/usb_vcp.py (100%) rename scripts/examples/{Arduino/Portenta-H7/02-Board-Control => 01-Arduino-Boards/Portenta-H7/00-Board-Control}/vsync_gpio_output.py (100%) rename scripts/examples/{Arduino/Portenta-H7/37-Audio => 01-Arduino-Boards/Portenta-H7/01-Audio}/audio_fft.py (100%) rename scripts/examples/{Arduino/Portenta-H7/37-Audio => 01-Arduino-Boards/Portenta-H7/01-Audio}/micro_speech.py (100%) rename scripts/examples/{Arduino/Portenta-H7/40-WiFi => 01-Arduino-Boards/Portenta-H7/02-WiFi}/connect.py (100%) rename scripts/examples/{Arduino/Portenta-H7/40-WiFi => 01-Arduino-Boards/Portenta-H7/02-WiFi}/dns.py (100%) rename scripts/examples/{Arduino/Portenta-H7/40-WiFi => 01-Arduino-Boards/Portenta-H7/02-WiFi}/http_client.py (100%) rename scripts/examples/{Arduino/Portenta-H7/40-WiFi => 01-Arduino-Boards/Portenta-H7/02-WiFi}/http_client_ssl.py (100%) rename scripts/examples/{Arduino/Portenta-H7/40-WiFi => 01-Arduino-Boards/Portenta-H7/02-WiFi}/mjpeg_streamer.py (100%) rename scripts/examples/{Arduino/Portenta-H7/40-WiFi => 01-Arduino-Boards/Portenta-H7/02-WiFi}/mqtt_pub.py (100%) rename scripts/examples/{Arduino/Portenta-H7/40-WiFi => 01-Arduino-Boards/Portenta-H7/02-WiFi}/mqtt_sub.py (100%) rename scripts/examples/{Arduino/Portenta-H7/40-WiFi => 01-Arduino-Boards/Portenta-H7/02-WiFi}/ntp.py (100%) rename scripts/examples/{Arduino/Portenta-H7/40-WiFi => 01-Arduino-Boards/Portenta-H7/02-WiFi}/scan.py (100%) rename scripts/examples/{Arduino/Portenta-H7/40-WiFi => 01-Arduino-Boards/Portenta-H7/02-WiFi}/static_ip.py (100%) rename scripts/examples/{Arduino/Portenta-H7/41-Bluetooth => 01-Arduino-Boards/Portenta-H7/03-Bluetooth}/ble_temperature.py (100%) rename scripts/examples/{Arduino/Portenta-H7/39-LoRa => 01-Arduino-Boards/Portenta-H7/04-LoRa}/lora-example.py (100%) rename scripts/examples/{Arduino/Portenta-H7/38-Ethernet => 01-Arduino-Boards/Portenta-H7/05-Ethernet}/eth_cable_test.py (100%) rename scripts/examples/{Arduino/Portenta-H7/38-Ethernet => 01-Arduino-Boards/Portenta-H7/05-Ethernet}/http_client.py (100%) rename scripts/examples/{Arduino/Portenta-H7/38-Ethernet => 01-Arduino-Boards/Portenta-H7/05-Ethernet}/http_client_ssl.py (100%) rename scripts/examples/{Arduino/Portenta-H7/38-Ethernet => 01-Arduino-Boards/Portenta-H7/05-Ethernet}/peer_to_peer.py (100%) rename scripts/examples/{Arduino/Portenta-H7/19-Low-Power => 01-Arduino-Boards/Portenta-H7/06-Low-Power}/deep_sleep.py (100%) rename scripts/examples/{OpenMV/19-Low-Power => 01-Arduino-Boards/Portenta-H7/06-Low-Power}/extint_wakeup.py (100%) rename scripts/examples/{Arduino/Portenta-H7/19-Low-Power => 01-Arduino-Boards/Portenta-H7/06-Low-Power}/himax_wakeup_on_motion_detection.py (100%) rename scripts/examples/{Arduino/Portenta-H7/19-Low-Power => 01-Arduino-Boards/Portenta-H7/06-Low-Power}/sensor_sleep.py (100%) rename scripts/examples/{OpenMV/19-Low-Power => 01-Arduino-Boards/Portenta-H7/06-Low-Power}/stop_mode.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/00-Basics => 02-HelloWorld}/helloworld.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/05-Snapshot => 03-Camera/00-Snapshot}/emboss_snapshot.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/05-Snapshot => 03-Camera/00-Snapshot}/snapshot.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/05-Snapshot => 03-Camera/00-Snapshot}/snapshot_on_face_detection.py (100%) rename scripts/examples/{OpenMV/05-Snapshot => 03-Camera/00-Snapshot}/snapshot_on_movement.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/05-Snapshot => 03-Camera/00-Snapshot}/time_lapse_photos.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/06-Video-Recording => 03-Camera/01-Video-Recording}/gif.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/06-Video-Recording => 03-Camera/01-Video-Recording}/gif_on_face_detection.py (100%) rename scripts/examples/{OpenMV/06-Video-Recording => 03-Camera/01-Video-Recording}/gif_on_movement.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/06-Video-Recording => 03-Camera/01-Video-Recording}/imageio_memory.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/06-Video-Recording => 03-Camera/01-Video-Recording}/imageio_read.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/06-Video-Recording => 03-Camera/01-Video-Recording}/imageio_write.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/06-Video-Recording => 03-Camera/01-Video-Recording}/mjpeg.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/06-Video-Recording => 03-Camera/01-Video-Recording}/mjpeg_on_face_detection.py (100%) rename scripts/examples/{OpenMV/06-Video-Recording => 03-Camera/01-Video-Recording}/mjpeg_on_movement.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/22-Optical-Flow => 03-Camera/02-Optical-Flow}/absolute-rotation-scale.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/22-Optical-Flow => 03-Camera/02-Optical-Flow}/absolute-translation.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/22-Optical-Flow => 03-Camera/02-Optical-Flow}/differential-rotation-scale.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/22-Optical-Flow => 03-Camera/02-Optical-Flow}/differential-translation.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/22-Optical-Flow => 03-Camera/02-Optical-Flow}/image-patches-absolute-rotation-scale.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/22-Optical-Flow => 03-Camera/02-Optical-Flow}/image-patches-absolute-translation.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/22-Optical-Flow => 03-Camera/02-Optical-Flow}/image-patches-differential-rotation-scale.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/22-Optical-Flow => 03-Camera/02-Optical-Flow}/image-patches-differential-translation.py (100%) rename scripts/examples/{OpenMV/37-Event-Cameras => 03-Camera/03-Event-Cameras}/frogeye2020.py (100%) rename scripts/examples/{OpenMV/37-Event-Cameras => 03-Camera/03-Event-Cameras}/frogeye2020_with_tracking.py (100%) rename scripts/examples/{Arduino/Portenta-H7/28-Global-Shutter => 03-Camera/04-Global-Shutter}/high_fps.py (100%) rename scripts/examples/{Arduino/Portenta-H7/28-Global-Shutter => 03-Camera/04-Global-Shutter}/triggered_mode.py (100%) rename scripts/examples/{OpenMV/27-Lepton => 03-Camera/05-FLIR-Lepton}/lepton_get_object_high_temp.py (100%) rename scripts/examples/{OpenMV/27-Lepton => 03-Camera/05-FLIR-Lepton}/lepton_get_object_temp.py (100%) rename scripts/examples/{OpenMV/27-Lepton => 03-Camera/05-FLIR-Lepton}/lepton_get_object_temp_color.py (100%) rename scripts/examples/{OpenMV/27-Lepton => 03-Camera/05-FLIR-Lepton}/lepton_get_object_temp_color_lcd.py (100%) rename scripts/examples/{OpenMV/27-Lepton => 03-Camera/05-FLIR-Lepton}/lepton_get_object_temp_lcd.py (100%) rename scripts/examples/{OpenMV/27-Lepton => 03-Camera/05-FLIR-Lepton}/lepton_hotspot_grayscale_color_tracking.py (100%) rename scripts/examples/{OpenMV/27-Lepton => 03-Camera/05-FLIR-Lepton}/lepton_hotspot_grayscale_color_tracking_lcd.py (100%) rename scripts/examples/{OpenMV/27-Lepton => 03-Camera/05-FLIR-Lepton}/lepton_hotspot_rgb565_color_tracking.py (100%) rename scripts/examples/{OpenMV/27-Lepton => 03-Camera/05-FLIR-Lepton}/lepton_hotspot_rgb565_color_tracking_lcd.py (100%) rename scripts/examples/{OpenMV/27-Lepton => 03-Camera/05-FLIR-Lepton}/lepton_target_temp_hotspot_grayscale_color_tracking.py (100%) rename scripts/examples/{OpenMV/27-Lepton => 03-Camera/05-FLIR-Lepton}/lepton_target_temp_hotspot_rgb565_color_tracking.py (100%) rename scripts/examples/{OpenMV/38-Time-of-Flight => 03-Camera/06-Time-of-Flight}/tof_camera.py (100%) rename scripts/examples/{OpenMV/38-Time-of-Flight => 03-Camera/06-Time-of-Flight}/tof_overlay.py (100%) rename scripts/examples/{OpenMV/21-Sensor-Control => 03-Camera/07-Sensor-Control}/sensor_auto_gain_control.py (100%) rename scripts/examples/{OpenMV/21-Sensor-Control => 03-Camera/07-Sensor-Control}/sensor_exposure_control.py (100%) rename scripts/examples/{OpenMV/21-Sensor-Control => 03-Camera/07-Sensor-Control}/sensor_horizontal_mirror.py (100%) rename scripts/examples/{OpenMV/21-Sensor-Control => 03-Camera/07-Sensor-Control}/sensor_manual_whitebal_control.py (100%) rename scripts/examples/{OpenMV/21-Sensor-Control => 03-Camera/07-Sensor-Control}/sensor_vertical_flip.py (100%) rename scripts/examples/{OpenMV/21-Sensor-Control => 03-Camera/07-Sensor-Control}/sesnor_manual_gain_control.py (100%) rename scripts/examples/{Arduino/Portenta-H7/35-Readout-Control => 03-Camera/08-Readout-Control}/100_fps_ir_led_tracking.py (100%) rename scripts/examples/{Arduino/Portenta-H7/35-Readout-Control => 03-Camera/08-Readout-Control}/apriltag_tracking.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/03-Drawing => 04-Image-Processing/00-Drawing}/arrow_drawing.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/03-Drawing => 04-Image-Processing/00-Drawing}/circle_drawing.py (100%) rename scripts/examples/{Arduino/Portenta-H7/03-Drawing => 04-Image-Processing/00-Drawing}/copy2fb.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/03-Drawing => 04-Image-Processing/00-Drawing}/cross_drawing.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/03-Drawing => 04-Image-Processing/00-Drawing}/ellipse_drawing.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/03-Drawing => 04-Image-Processing/00-Drawing}/flood_fill.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/03-Drawing => 04-Image-Processing/00-Drawing}/image_drawing.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/03-Drawing => 04-Image-Processing/00-Drawing}/image_drawing_advanced.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/03-Drawing => 04-Image-Processing/00-Drawing}/image_drawing_alpha_blending_test.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/03-Drawing => 04-Image-Processing/00-Drawing}/image_drawing_alpha_blending_with_color_table_test.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/03-Drawing => 04-Image-Processing/00-Drawing}/image_drawing_alpha_table_test.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/03-Drawing => 04-Image-Processing/00-Drawing}/image_drawing_alpha_table_with_color_table_test.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/03-Drawing => 04-Image-Processing/00-Drawing}/image_drawing_scale_down_test.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/03-Drawing => 04-Image-Processing/00-Drawing}/image_drawing_scale_up_test.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/03-Drawing => 04-Image-Processing/00-Drawing}/image_drawing_with_custom_palette.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/03-Drawing => 04-Image-Processing/00-Drawing}/keypoints_drawing.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/03-Drawing => 04-Image-Processing/00-Drawing}/line_drawing.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/03-Drawing => 04-Image-Processing/00-Drawing}/rectangle_drawing.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/03-Drawing => 04-Image-Processing/00-Drawing}/text_drawing.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/adaptive_histogram_equalization.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/blur_filter.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/cartoon_filter.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/color_bilateral_filter.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/color_binary_filter.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/color_light_removal.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/edge_filter.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/erode_and_dilate.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/gamma_correction.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/grayscale_bilateral_filter.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/grayscale_binary_filter.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/grayscale_light_removal.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/histogram_equalization.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/kernel_filters.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/lens_correction.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/linear_polar.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/log_polar.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/mean_adaptive_threshold_filter.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/mean_filter.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/median_adaptive_threshold_filter.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/median_filter.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/midpoint_adaptive_threshold_filter.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/midpoint_filter.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/mode_adaptive_threshold_filter.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/mode_filter.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/negative.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/perspective_and_rotation_correction.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/perspective_correction.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/rotation_correction.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/sharpen_filter.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/ulab.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/unsharp_filter.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/04-Image-Filters => 04-Image-Processing/01-Image-Filters}/vflip_hmirror_transpose.py (100%) rename scripts/examples/{OpenMV/10-Color-Tracking => 04-Image-Processing/02-Color-Tracking}/automatic_grayscale_color_tracking.py (100%) rename scripts/examples/{OpenMV/10-Color-Tracking => 04-Image-Processing/02-Color-Tracking}/automatic_rgb565_color_tracking.py (100%) rename scripts/examples/{OpenMV/10-Color-Tracking => 04-Image-Processing/02-Color-Tracking}/black_grayscale_line_following.py (100%) rename scripts/examples/{OpenMV/10-Color-Tracking => 04-Image-Processing/02-Color-Tracking}/image_histogram_info.py (100%) rename scripts/examples/{OpenMV/10-Color-Tracking => 04-Image-Processing/02-Color-Tracking}/image_statistics_info.py (100%) rename scripts/examples/{OpenMV/10-Color-Tracking => 04-Image-Processing/02-Color-Tracking}/ir_beacon_grayscale_tracking.py (100%) rename scripts/examples/{OpenMV/10-Color-Tracking => 04-Image-Processing/02-Color-Tracking}/ir_beacon_rgb565_tracking.py (100%) rename scripts/examples/{OpenMV/10-Color-Tracking => 04-Image-Processing/02-Color-Tracking}/multi_color_blob_tracking.py (100%) rename scripts/examples/{OpenMV/10-Color-Tracking => 04-Image-Processing/02-Color-Tracking}/multi_color_code_tracking.py (100%) rename scripts/examples/{OpenMV/10-Color-Tracking => 04-Image-Processing/02-Color-Tracking}/single_color_code_tracking.py (100%) rename scripts/examples/{OpenMV/10-Color-Tracking => 04-Image-Processing/02-Color-Tracking}/single_color_grayscale_blob_tracking.py (100%) rename scripts/examples/{OpenMV/10-Color-Tracking => 04-Image-Processing/02-Color-Tracking}/single_color_rgb565_blob_tracking.py (100%) rename scripts/examples/{OpenMV/20-Frame-Differencing => 04-Image-Processing/03-Frame-Differencing}/in_memory_advanced_frame_differencing.py (100%) rename scripts/examples/{OpenMV/20-Frame-Differencing => 04-Image-Processing/03-Frame-Differencing}/in_memory_basic_frame_differencing.py (100%) rename scripts/examples/{OpenMV/20-Frame-Differencing => 04-Image-Processing/03-Frame-Differencing}/in_memory_shadow_removal.py (100%) rename scripts/examples/{OpenMV/20-Frame-Differencing => 04-Image-Processing/03-Frame-Differencing}/in_memory_structural_similarity.py (100%) rename scripts/examples/{OpenMV/20-Frame-Differencing => 04-Image-Processing/03-Frame-Differencing}/on_disk_advanced_frame_differencing.py (100%) rename scripts/examples/{OpenMV/20-Frame-Differencing => 04-Image-Processing/03-Frame-Differencing}/on_disk_basic_frame_differencing.py (100%) rename scripts/examples/{OpenMV/20-Frame-Differencing => 04-Image-Processing/03-Frame-Differencing}/on_disk_shadow_removal.py (100%) rename scripts/examples/{OpenMV/20-Frame-Differencing => 04-Image-Processing/03-Frame-Differencing}/on_disk_structural_similarity.py (100%) rename scripts/examples/{OpenMV/25-Machine-Learning => 05-Machine-Learning/00-TensorFlow}/tf_face_collection.py (100%) rename scripts/examples/{OpenMV/25-Machine-Learning => 05-Machine-Learning/00-TensorFlow}/tf_face_recognition.py (100%) rename scripts/examples/{OpenMV/25-Machine-Learning => 05-Machine-Learning/00-TensorFlow}/tf_mobilenet_search_just_center.py (100%) rename scripts/examples/{OpenMV/25-Machine-Learning => 05-Machine-Learning/00-TensorFlow}/tf_mobilenet_search_whole_window.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/25-Machine-Learning => 05-Machine-Learning/00-TensorFlow}/tf_object_detection.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/25-Machine-Learning => 05-Machine-Learning/00-TensorFlow}/tf_person_detection_search_just_center.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/25-Machine-Learning => 05-Machine-Learning/00-TensorFlow}/tf_person_detection_search_whole_window.py (100%) rename scripts/examples/{Arduino/Portenta-H7/25-Machine-Learning => 05-Machine-Learning/01-ST-CubeAI}/nn_stm32cubeai.py (100%) rename scripts/examples/{Arduino/Portenta-H7/07-Face-Detection => 05-Machine-Learning/02-Haar-Cascade}/face_detection.py (100%) rename scripts/examples/{Arduino/Portenta-H7/08-Eye-Tracking => 05-Machine-Learning/02-Haar-Cascade}/face_eye_detection.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/07-Face-Detection => 05-Machine-Learning/02-Haar-Cascade}/face_recognition.py (100%) rename scripts/examples/{Arduino/Portenta-H7/07-Face-Detection => 05-Machine-Learning/02-Haar-Cascade}/face_tracking.py (100%) rename scripts/examples/{Arduino/Portenta-H7/08-Eye-Tracking => 05-Machine-Learning/02-Haar-Cascade}/iris_detection.py (100%) rename scripts/examples/{Arduino/Portenta-H7/09-Feature-Detection => 06-Feature-Detection}/edges.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/09-Feature-Detection => 06-Feature-Detection}/find_circles.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/09-Feature-Detection => 06-Feature-Detection}/find_line_segments.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/09-Feature-Detection => 06-Feature-Detection}/find_lines.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/09-Feature-Detection => 06-Feature-Detection}/find_rects.py (100%) rename scripts/examples/{Arduino/Portenta-H7/09-Feature-Detection => 06-Feature-Detection}/hog.py (100%) rename scripts/examples/{Arduino/Portenta-H7/09-Feature-Detection => 06-Feature-Detection}/keypoints.py (100%) rename scripts/examples/{Arduino/Portenta-H7/09-Feature-Detection => 06-Feature-Detection}/keypoints_save.py (100%) rename scripts/examples/{Arduino/Portenta-H7/09-Feature-Detection => 06-Feature-Detection}/lbp.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/09-Feature-Detection => 06-Feature-Detection}/linear_regression_fast.py (100%) rename scripts/examples/{Arduino/Nicla-Vision/09-Feature-Detection => 06-Feature-Detection}/linear_regression_robust.py (100%) rename scripts/examples/{OpenMV/09-Feature-Detection => 06-Feature-Detection}/selective_search.py (100%) rename scripts/examples/{Arduino/Portenta-H7/09-Feature-Detection => 06-Feature-Detection}/template_matching.py (100%) rename scripts/examples/{OpenMV/16-Codes => 07-Barcodes}/find_barcodes.py (100%) rename scripts/examples/{OpenMV/16-Codes => 07-Barcodes}/find_datamatrices.py (100%) rename scripts/examples/{OpenMV/16-Codes => 07-Barcodes}/find_datamatrices_w_lens_zoom.py (100%) rename scripts/examples/{OpenMV/16-Codes => 07-Barcodes}/qrcodes_with_lens_corr.py (100%) rename scripts/examples/{Arduino/Portenta-H7/16-Codes => 07-Barcodes}/qrcodes_with_lens_zoom.py (100%) rename scripts/examples/{OpenMV/26-April-Tags => 08-April-Tags}/find_apriltags.py (100%) rename scripts/examples/{OpenMV/26-April-Tags => 08-April-Tags}/find_apriltags_3d_pose.py (100%) rename scripts/examples/{OpenMV/26-April-Tags => 08-April-Tags}/find_apriltags_max_res.py (100%) rename scripts/examples/{OpenMV/26-April-Tags => 08-April-Tags}/find_apriltags_w_lens_zoom.py (100%) rename scripts/examples/{OpenMV/26-April-Tags => 08-April-Tags}/find_small_apriltags.py (100%) rename scripts/examples/{Arduino/Portenta-H7 => 09-Interface-Library}/00-Arduino/arduino_i2c_slave.py (100%) rename scripts/examples/{Arduino/Portenta-H7 => 09-Interface-Library}/00-Arduino/arduino_spi_slave.py (100%) rename scripts/examples/{Arduino/Portenta-H7 => 09-Interface-Library}/00-Arduino/arduino_uart.py (100%) rename scripts/examples/{Arduino/Portenta-H7/17-Pixy-Emulation => 09-Interface-Library/01-Pixy-Emulation}/apriltags_pixy_i2c_emulation.py (100%) rename scripts/examples/{Arduino/Portenta-H7/17-Pixy-Emulation => 09-Interface-Library/01-Pixy-Emulation}/apriltags_pixy_spi_emulation.py (100%) rename scripts/examples/{Arduino/Portenta-H7/17-Pixy-Emulation => 09-Interface-Library/01-Pixy-Emulation}/apriltags_pixy_uart_emulation.py (100%) rename scripts/examples/{OpenMV/17-Pixy-Emulation => 09-Interface-Library/01-Pixy-Emulation}/pixy_i2c_emulation.py (100%) rename scripts/examples/{OpenMV/17-Pixy-Emulation => 09-Interface-Library/01-Pixy-Emulation}/pixy_spi_emulation.py (100%) rename scripts/examples/{OpenMV/17-Pixy-Emulation => 09-Interface-Library/01-Pixy-Emulation}/pixy_uart_emulation.py (100%) rename scripts/examples/{Arduino/Portenta-H7/18-MAVLink => 09-Interface-Library/02-MAVLink}/mavlink_apriltags_landing_target.py (100%) rename scripts/examples/{OpenMV/18-MAVLink => 09-Interface-Library/02-MAVLink}/mavlink_opticalflow.py (100%) rename scripts/examples/{Arduino/Portenta-H7/32-modbus => 09-Interface-Library/03-Modbus}/modbus_apriltag.py (100%) rename scripts/examples/{Arduino/Portenta-H7/32-modbus => 09-Interface-Library/03-Modbus}/modbus_rtu_slave.py (100%) rename scripts/examples/{Arduino/Nicla-Vision => 10-RPC-Library}/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py (100%) rename scripts/examples/{Arduino/Nicla-Vision => 10-RPC-Library}/34-Remote-Control/image_transfer_jpg_streaming_as_the_remote_device_for_your_computer.py (100%) rename scripts/examples/{Arduino/Nicla-Vision => 10-RPC-Library}/34-Remote-Control/image_transfer_raw_as_the_controller_device.py (100%) rename scripts/examples/{Arduino/Nicla-Vision => 10-RPC-Library}/34-Remote-Control/image_transfer_raw_as_the_remote_device.py (100%) rename scripts/examples/{Arduino/Nicla-Vision => 10-RPC-Library}/34-Remote-Control/popular_features_as_the_controller_device.py (100%) rename scripts/examples/{OpenMV => 10-RPC-Library}/34-Remote-Control/popular_features_as_the_remote_device.py (100%) rename scripts/examples/{OpenMV => 10-RPC-Library}/36-Web-Servers/rtsp_video_server_lan.py (100%) rename scripts/examples/{OpenMV => 10-RPC-Library}/36-Web-Servers/rtsp_video_server_wlan.py (100%) delete mode 100644 scripts/examples/Arduino/Nicla-Vision/00-Basics/main.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/02-Board-Control/native_emitters.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/03-Drawing/copy2fb.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/05-Snapshot/snapshot_on_movement.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/gif_on_movement.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/mjpeg_on_movement.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/07-Face-Detection/face_detection.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/07-Face-Detection/face_tracking.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/edges.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/hog.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/keypoints.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/keypoints_save.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/lbp.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/template_matching.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/16-Codes/find_barcodes.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/16-Codes/find_datamatrices.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/16-Codes/find_datamatrices_w_lens_zoom.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/16-Codes/qrcodes_with_lens_corr.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/16-Codes/qrcodes_with_lens_zoom.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/in_memory_basic_frame_differencing.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/in_memory_structural_similarity.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/on_disk_basic_frame_differencing.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/on_disk_structural_similarity.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/nn_stm32cubeai.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_face_collection.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_face_recognition.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_mobilenet_search_just_center.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_mobilenet_search_whole_window.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_3d_pose.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_max_res.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_w_lens_zoom.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_small_apriltags.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/34-Remote-Control/popular_features_as_the_remote_device.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/36-Web-Servers/rtsp_video_server_lan.py delete mode 100644 scripts/examples/Arduino/Nicla-Vision/36-Web-Servers/rtsp_video_server_wlan.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/01-Basics/helloworld.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/02-Board-Control/native_emitters.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/03-Drawing/arrow_drawing.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/03-Drawing/circle_drawing.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/03-Drawing/cross_drawing.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/03-Drawing/ellipse_drawing.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/03-Drawing/flood_fill.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_advanced.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_alpha_blending_test.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_alpha_blending_with_color_table_test.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_alpha_table_test.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_alpha_table_with_color_table_test.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_scale_down_test.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_scale_up_test.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_with_custom_palette.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/03-Drawing/keypoints_drawing.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/03-Drawing/line_drawing.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/03-Drawing/rectangle_drawing.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/03-Drawing/text_drawing.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/adaptive_histogram_equalization.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/blur_filter.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/cartoon_filter.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/color_bilateral_filter.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/color_binary_filter.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/color_light_removal.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/edge_filter.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/erode_and_dilate.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/gamma_correction.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/grayscale_bilateral_filter.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/grayscale_binary_filter.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/grayscale_light_removal.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/histogram_equalization.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/kernel_filters.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/lens_correction.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/linear_polar.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/log_polar.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/mean_adaptive_threshold_filter.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/mean_filter.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/median_adaptive_threshold_filter.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/median_filter.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/midpoint_adaptive_threshold_filter.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/midpoint_filter.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/mode_adaptive_threshold_filter.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/mode_filter.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/negative.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/perspective_and_rotation_correction.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/perspective_correction.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/rotation_correction.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/sharpen_filter.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/ulab.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/unsharp_filter.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/04-Image-Filters/vflip_hmirror_transpose.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/05-Snapshot/emboss_snapshot.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/05-Snapshot/snapshot.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/05-Snapshot/snapshot_on_face_detection.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/05-Snapshot/snapshot_on_movement.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/05-Snapshot/time_lapse_photos.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/06-Video-Recording/gif.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/06-Video-Recording/gif_on_face_detection.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/06-Video-Recording/gif_on_movement.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/06-Video-Recording/imageio_memory.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/06-Video-Recording/imageio_read.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/06-Video-Recording/imageio_write.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/06-Video-Recording/mjpeg.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/06-Video-Recording/mjpeg_on_face_detection.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/06-Video-Recording/mjpeg_on_movement.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/07-Face-Detection/face_recognition.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/find_circles.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/find_line_segments.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/find_lines.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/find_rects.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/linear_regression_fast.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/linear_regression_robust.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/selective_search.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/automatic_grayscale_color_tracking.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/automatic_rgb565_color_tracking.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/black_grayscale_line_following.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/image_histogram_info.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/image_statistics_info.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/ir_beacon_grayscale_tracking.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/ir_beacon_rgb565_tracking.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/multi_color_blob_tracking.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/multi_color_code_tracking.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/single_color_code_tracking.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/single_color_grayscale_blob_tracking.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/single_color_rgb565_blob_tracking.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/16-Codes/find_barcodes.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/16-Codes/find_datamatrices.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/16-Codes/find_datamatrices_w_lens_zoom.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/16-Codes/qrcodes_with_lens_corr.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/17-Pixy-Emulation/pixy_i2c_emulation.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/17-Pixy-Emulation/pixy_spi_emulation.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/17-Pixy-Emulation/pixy_uart_emulation.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/18-MAVLink/mavlink_opticalflow.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/in_memory_advanced_frame_differencing.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/in_memory_basic_frame_differencing.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/in_memory_shadow_removal.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/in_memory_structural_similarity.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/on_disk_advanced_frame_differencing.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/on_disk_basic_frame_differencing.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/on_disk_shadow_removal.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/on_disk_structural_similarity.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/himax_motion_detection.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sensor_auto_gain_control.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sensor_exposure_control.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sensor_horizontal_mirror.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sensor_manual_whitebal_control.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sensor_vertical_flip.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sesnor_manual_gain_control.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/absolute-rotation-scale.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/absolute-translation.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/differential-rotation-scale.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/differential-translation.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/image-patches-absolute-rotation-scale.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/image-patches-absolute-translation.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/image-patches-differential-rotation-scale.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/image-patches-differential-translation.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/24-External-Sensors/I2C_Lidar_Lite_V3_example_code.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_face_collection.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_face_recognition.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_mobilenet_search_just_center.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_mobilenet_search_whole_window.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_object_detection.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_person_detection_search_just_center.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_person_detection_search_whole_window.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/26-April-Tags/find_apriltags.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/26-April-Tags/find_apriltags_3d_pose.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/26-April-Tags/find_apriltags_max_res.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/26-April-Tags/find_apriltags_w_lens_zoom.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/26-April-Tags/find_small_apriltags.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/34-Remote-Control/image_transfer_jpg_streaming_as_the_remote_device_for_your_computer.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/34-Remote-Control/image_transfer_raw_as_the_controller_device.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/34-Remote-Control/image_transfer_raw_as_the_remote_device.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/34-Remote-Control/popular_features_as_the_controller_device.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/34-Remote-Control/popular_features_as_the_remote_device.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/36-Web-Servers/rtsp_video_server_lan.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/36-Web-Servers/rtsp_video_server_wlan.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/99-Tests/colorbar.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/99-Tests/fps.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/99-Tests/selftest.py delete mode 100644 scripts/examples/Arduino/Portenta-H7/99-Tests/unittests.py delete mode 100644 scripts/examples/OpenMV/00-Arduino/arduino_i2c_slave.py delete mode 100644 scripts/examples/OpenMV/00-Arduino/arduino_spi_slave.py delete mode 100644 scripts/examples/OpenMV/00-Arduino/arduino_uart.py delete mode 100644 scripts/examples/OpenMV/01-Basics/helloworld.py delete mode 100644 scripts/examples/OpenMV/01-Basics/main.py delete mode 100644 scripts/examples/OpenMV/02-Board-Control/native_emitters.py delete mode 100644 scripts/examples/OpenMV/03-Drawing/arrow_drawing.py delete mode 100644 scripts/examples/OpenMV/03-Drawing/circle_drawing.py delete mode 100644 scripts/examples/OpenMV/03-Drawing/copy2fb.py delete mode 100644 scripts/examples/OpenMV/03-Drawing/cross_drawing.py delete mode 100644 scripts/examples/OpenMV/03-Drawing/ellipse_drawing.py delete mode 100644 scripts/examples/OpenMV/03-Drawing/flood_fill.py delete mode 100644 scripts/examples/OpenMV/03-Drawing/image_drawing.py delete mode 100644 scripts/examples/OpenMV/03-Drawing/image_drawing_advanced.py delete mode 100644 scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_blending_test.py delete mode 100644 scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_blending_with_color_table_test.py delete mode 100644 scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_table_test.py delete mode 100644 scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_table_with_color_table_test.py delete mode 100644 scripts/examples/OpenMV/03-Drawing/image_drawing_scale_down_test.py delete mode 100644 scripts/examples/OpenMV/03-Drawing/image_drawing_scale_up_test.py delete mode 100644 scripts/examples/OpenMV/03-Drawing/image_drawing_with_custom_palette.py delete mode 100644 scripts/examples/OpenMV/03-Drawing/keypoints_drawing.py delete mode 100644 scripts/examples/OpenMV/03-Drawing/line_drawing.py delete mode 100644 scripts/examples/OpenMV/03-Drawing/rectangle_drawing.py delete mode 100644 scripts/examples/OpenMV/03-Drawing/text_drawing.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/adaptive_histogram_equalization.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/blur_filter.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/cartoon_filter.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/color_bilateral_filter.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/color_binary_filter.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/color_light_removal.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/edge_filter.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/erode_and_dilate.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/gamma_correction.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/grayscale_bilateral_filter.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/grayscale_binary_filter.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/grayscale_light_removal.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/histogram_equalization.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/kernel_filters.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/lens_correction.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/linear_polar.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/log_polar.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/mean_adaptive_threshold_filter.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/mean_filter.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/median_adaptive_threshold_filter.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/median_filter.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/midpoint_adaptive_threshold_filter.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/midpoint_filter.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/mode_adaptive_threshold_filter.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/mode_filter.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/negative.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/perspective_and_rotation_correction.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/perspective_correction.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/rotation_correction.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/sharpen_filter.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/ulab.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/unsharp_filter.py delete mode 100644 scripts/examples/OpenMV/04-Image-Filters/vflip_hmirror_transpose.py delete mode 100644 scripts/examples/OpenMV/05-Snapshot/emboss_snapshot.py delete mode 100644 scripts/examples/OpenMV/05-Snapshot/snapshot.py delete mode 100644 scripts/examples/OpenMV/05-Snapshot/snapshot_on_face_detection.py delete mode 100644 scripts/examples/OpenMV/05-Snapshot/time_lapse_photos.py delete mode 100644 scripts/examples/OpenMV/06-Video-Recording/gif.py delete mode 100644 scripts/examples/OpenMV/06-Video-Recording/gif_on_face_detection.py delete mode 100644 scripts/examples/OpenMV/06-Video-Recording/imageio_memory.py delete mode 100644 scripts/examples/OpenMV/06-Video-Recording/imageio_read.py delete mode 100644 scripts/examples/OpenMV/06-Video-Recording/imageio_write.py delete mode 100644 scripts/examples/OpenMV/06-Video-Recording/mjpeg.py delete mode 100644 scripts/examples/OpenMV/06-Video-Recording/mjpeg_on_face_detection.py delete mode 100644 scripts/examples/OpenMV/07-Face-Detection/face_detection.py delete mode 100644 scripts/examples/OpenMV/07-Face-Detection/face_recognition.py delete mode 100644 scripts/examples/OpenMV/07-Face-Detection/face_tracking.py delete mode 100644 scripts/examples/OpenMV/08-Eye-Tracking/face_eye_detection.py delete mode 100644 scripts/examples/OpenMV/08-Eye-Tracking/iris_detection.py delete mode 100644 scripts/examples/OpenMV/09-Feature-Detection/edges.py delete mode 100644 scripts/examples/OpenMV/09-Feature-Detection/find_circles.py delete mode 100644 scripts/examples/OpenMV/09-Feature-Detection/find_line_segments.py delete mode 100644 scripts/examples/OpenMV/09-Feature-Detection/find_lines.py delete mode 100644 scripts/examples/OpenMV/09-Feature-Detection/find_rects.py delete mode 100644 scripts/examples/OpenMV/09-Feature-Detection/hog.py delete mode 100644 scripts/examples/OpenMV/09-Feature-Detection/keypoints.py delete mode 100644 scripts/examples/OpenMV/09-Feature-Detection/keypoints_save.py delete mode 100644 scripts/examples/OpenMV/09-Feature-Detection/lbp.py delete mode 100644 scripts/examples/OpenMV/09-Feature-Detection/linear_regression_fast.py delete mode 100644 scripts/examples/OpenMV/09-Feature-Detection/linear_regression_robust.py delete mode 100644 scripts/examples/OpenMV/09-Feature-Detection/template_matching.py delete mode 100644 scripts/examples/OpenMV/16-Codes/qrcodes_with_lens_zoom.py delete mode 100644 scripts/examples/OpenMV/17-Pixy-Emulation/apriltags_pixy_i2c_emulation.py delete mode 100644 scripts/examples/OpenMV/17-Pixy-Emulation/apriltags_pixy_spi_emulation.py delete mode 100644 scripts/examples/OpenMV/17-Pixy-Emulation/apriltags_pixy_uart_emulation.py delete mode 100644 scripts/examples/OpenMV/18-MAVLink/mavlink_apriltags_landing_target.py delete mode 100644 scripts/examples/OpenMV/22-Optical-Flow/absolute-rotation-scale.py delete mode 100644 scripts/examples/OpenMV/22-Optical-Flow/absolute-translation.py delete mode 100644 scripts/examples/OpenMV/22-Optical-Flow/differential-rotation-scale.py delete mode 100644 scripts/examples/OpenMV/22-Optical-Flow/differential-translation.py delete mode 100644 scripts/examples/OpenMV/22-Optical-Flow/image-patches-absolute-rotation-scale.py delete mode 100644 scripts/examples/OpenMV/22-Optical-Flow/image-patches-absolute-translation.py delete mode 100644 scripts/examples/OpenMV/22-Optical-Flow/image-patches-differential-rotation-scale.py delete mode 100644 scripts/examples/OpenMV/22-Optical-Flow/image-patches-differential-translation.py delete mode 100644 scripts/examples/OpenMV/24-External-Sensors/I2C_Lidar_Lite_V3_example_code.py delete mode 100644 scripts/examples/OpenMV/25-Machine-Learning/nn_stm32cubeai.py delete mode 100644 scripts/examples/OpenMV/25-Machine-Learning/tf_object_detection.py delete mode 100644 scripts/examples/OpenMV/25-Machine-Learning/tf_person_detection_search_just_center.py delete mode 100644 scripts/examples/OpenMV/25-Machine-Learning/tf_person_detection_search_whole_window.py delete mode 100644 scripts/examples/OpenMV/28-Global-Shutter/high_fps.py delete mode 100644 scripts/examples/OpenMV/28-Global-Shutter/triggered_mode.py delete mode 100644 scripts/examples/OpenMV/32-modbus/modbus_apriltag.py delete mode 100644 scripts/examples/OpenMV/32-modbus/modbus_rtu_slave.py delete mode 100644 scripts/examples/OpenMV/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py delete mode 100644 scripts/examples/OpenMV/34-Remote-Control/image_transfer_jpg_streaming_as_the_remote_device_for_your_computer.py delete mode 100644 scripts/examples/OpenMV/34-Remote-Control/image_transfer_raw_as_the_controller_device.py delete mode 100644 scripts/examples/OpenMV/34-Remote-Control/image_transfer_raw_as_the_remote_device.py delete mode 100644 scripts/examples/OpenMV/34-Remote-Control/popular_features_as_the_controller_device.py delete mode 100644 scripts/examples/OpenMV/35-Readout-Control/100_fps_ir_led_tracking.py delete mode 100644 scripts/examples/OpenMV/35-Readout-Control/apriltag_tracking.py diff --git a/scripts/examples/Arduino/Portenta-H7/02-Board-Control/adc_read.py b/scripts/examples/00-OpenMV-Boards/00-Board-Control/adc_read_ext_channel.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/02-Board-Control/adc_read.py rename to scripts/examples/00-OpenMV-Boards/00-Board-Control/adc_read_ext_channel.py diff --git a/scripts/examples/Arduino/Portenta-H7/02-Board-Control/adc_read_int_channel.py b/scripts/examples/00-OpenMV-Boards/00-Board-Control/adc_read_int_channel.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/02-Board-Control/adc_read_int_channel.py rename to scripts/examples/00-OpenMV-Boards/00-Board-Control/adc_read_int_channel.py diff --git a/scripts/examples/00-OpenMV-Boards/00-Board-Control/blinky.py b/scripts/examples/00-OpenMV-Boards/00-Board-Control/blinky.py new file mode 100644 index 000000000..495387562 --- /dev/null +++ b/scripts/examples/00-OpenMV-Boards/00-Board-Control/blinky.py @@ -0,0 +1,14 @@ +# Blinky example + +import time +from machine import Pin + +# This is the only LED pin available on the Nano RP2040, +# other than the RGB LED connected to Nina WiFi module. +led = Pin("LED_BLUE", Pin.OUT) + +while (True): + led.on() + time.sleep_ms(250) + led.off() + time.sleep_ms(250) diff --git a/scripts/examples/Arduino/Portenta-H7/02-Board-Control/can.py b/scripts/examples/00-OpenMV-Boards/00-Board-Control/can.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/02-Board-Control/can.py rename to scripts/examples/00-OpenMV-Boards/00-Board-Control/can.py diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/cpufreq_scaling.py b/scripts/examples/00-OpenMV-Boards/00-Board-Control/cpufreq_scaling.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/02-Board-Control/cpufreq_scaling.py rename to scripts/examples/00-OpenMV-Boards/00-Board-Control/cpufreq_scaling.py diff --git a/scripts/examples/Arduino/Portenta-H7/02-Board-Control/dac_write.py b/scripts/examples/00-OpenMV-Boards/00-Board-Control/dac_write.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/02-Board-Control/dac_write.py rename to scripts/examples/00-OpenMV-Boards/00-Board-Control/dac_write.py diff --git a/scripts/examples/Arduino/Portenta-H7/02-Board-Control/dac_write_timed.py b/scripts/examples/00-OpenMV-Boards/00-Board-Control/dac_write_timed.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/02-Board-Control/dac_write_timed.py rename to scripts/examples/00-OpenMV-Boards/00-Board-Control/dac_write_timed.py diff --git a/scripts/examples/Arduino/Portenta-H7/02-Board-Control/i2c_control.py b/scripts/examples/00-OpenMV-Boards/00-Board-Control/i2c_control.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/02-Board-Control/i2c_control.py rename to scripts/examples/00-OpenMV-Boards/00-Board-Control/i2c_control.py diff --git a/scripts/examples/Arduino/Portenta-H7/02-Board-Control/led_control.py b/scripts/examples/00-OpenMV-Boards/00-Board-Control/led_control.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/02-Board-Control/led_control.py rename to scripts/examples/00-OpenMV-Boards/00-Board-Control/led_control.py diff --git a/scripts/examples/Arduino/Portenta-H7/02-Board-Control/pin_control.py b/scripts/examples/00-OpenMV-Boards/00-Board-Control/pin_control.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/02-Board-Control/pin_control.py rename to scripts/examples/00-OpenMV-Boards/00-Board-Control/pin_control.py diff --git a/scripts/examples/OpenMV/02-Board-Control/pwm_control.py b/scripts/examples/00-OpenMV-Boards/00-Board-Control/pwm_control.py similarity index 100% rename from scripts/examples/OpenMV/02-Board-Control/pwm_control.py rename to scripts/examples/00-OpenMV-Boards/00-Board-Control/pwm_control.py diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/rtc.py b/scripts/examples/00-OpenMV-Boards/00-Board-Control/rtc.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/02-Board-Control/rtc.py rename to scripts/examples/00-OpenMV-Boards/00-Board-Control/rtc.py diff --git a/scripts/examples/Arduino/Portenta-H7/02-Board-Control/servo_control.py b/scripts/examples/00-OpenMV-Boards/00-Board-Control/servo_control.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/02-Board-Control/servo_control.py rename to scripts/examples/00-OpenMV-Boards/00-Board-Control/servo_control.py diff --git a/scripts/examples/OpenMV/02-Board-Control/spi_control.py b/scripts/examples/00-OpenMV-Boards/00-Board-Control/spi_control.py similarity index 100% rename from scripts/examples/OpenMV/02-Board-Control/spi_control.py rename to scripts/examples/00-OpenMV-Boards/00-Board-Control/spi_control.py diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/timer_control.py b/scripts/examples/00-OpenMV-Boards/00-Board-Control/timer_control.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/02-Board-Control/timer_control.py rename to scripts/examples/00-OpenMV-Boards/00-Board-Control/timer_control.py diff --git a/scripts/examples/OpenMV/02-Board-Control/timer_tests.py b/scripts/examples/00-OpenMV-Boards/00-Board-Control/timer_tests.py similarity index 100% rename from scripts/examples/OpenMV/02-Board-Control/timer_tests.py rename to scripts/examples/00-OpenMV-Boards/00-Board-Control/timer_tests.py diff --git a/scripts/examples/Arduino/Portenta-H7/02-Board-Control/uart_control.py b/scripts/examples/00-OpenMV-Boards/00-Board-Control/uart_control.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/02-Board-Control/uart_control.py rename to scripts/examples/00-OpenMV-Boards/00-Board-Control/uart_control.py diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/usb_hid.py b/scripts/examples/00-OpenMV-Boards/00-Board-Control/usb_hid.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/02-Board-Control/usb_hid.py rename to scripts/examples/00-OpenMV-Boards/00-Board-Control/usb_hid.py diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/usb_vcp.py b/scripts/examples/00-OpenMV-Boards/00-Board-Control/usb_vcp.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/02-Board-Control/usb_vcp.py rename to scripts/examples/00-OpenMV-Boards/00-Board-Control/usb_vcp.py diff --git a/scripts/examples/OpenMV/02-Board-Control/vsync_gpio_output.py b/scripts/examples/00-OpenMV-Boards/00-Board-Control/vsync_gpio_output.py similarity index 100% rename from scripts/examples/OpenMV/02-Board-Control/vsync_gpio_output.py rename to scripts/examples/00-OpenMV-Boards/00-Board-Control/vsync_gpio_output.py diff --git a/scripts/examples/OpenMV/14-WiFi-Shield/connect.py b/scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/connect.py similarity index 100% rename from scripts/examples/OpenMV/14-WiFi-Shield/connect.py rename to scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/connect.py diff --git a/scripts/examples/OpenMV/14-WiFi-Shield/dns.py b/scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/dns.py similarity index 100% rename from scripts/examples/OpenMV/14-WiFi-Shield/dns.py rename to scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/dns.py diff --git a/scripts/examples/OpenMV/14-WiFi-Shield/fw_update.py b/scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/fw_update.py similarity index 100% rename from scripts/examples/OpenMV/14-WiFi-Shield/fw_update.py rename to scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/fw_update.py diff --git a/scripts/examples/OpenMV/14-WiFi-Shield/http_client.py b/scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/http_client.py similarity index 100% rename from scripts/examples/OpenMV/14-WiFi-Shield/http_client.py rename to scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/http_client.py diff --git a/scripts/examples/OpenMV/14-WiFi-Shield/http_client_ssl.py b/scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/http_client_ssl.py similarity index 100% rename from scripts/examples/OpenMV/14-WiFi-Shield/http_client_ssl.py rename to scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/http_client_ssl.py diff --git a/scripts/examples/OpenMV/14-WiFi-Shield/http_post.py b/scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/http_post.py similarity index 100% rename from scripts/examples/OpenMV/14-WiFi-Shield/http_post.py rename to scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/http_post.py diff --git a/scripts/examples/OpenMV/14-WiFi-Shield/mjpeg_streamer.py b/scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/mjpeg_streamer.py similarity index 100% rename from scripts/examples/OpenMV/14-WiFi-Shield/mjpeg_streamer.py rename to scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/mjpeg_streamer.py diff --git a/scripts/examples/OpenMV/14-WiFi-Shield/mjpeg_streamer_ap.py b/scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/mjpeg_streamer_ap.py similarity index 100% rename from scripts/examples/OpenMV/14-WiFi-Shield/mjpeg_streamer_ap.py rename to scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/mjpeg_streamer_ap.py diff --git a/scripts/examples/OpenMV/14-WiFi-Shield/mjpeg_streamer_fir.py b/scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/mjpeg_streamer_fir.py similarity index 100% rename from scripts/examples/OpenMV/14-WiFi-Shield/mjpeg_streamer_fir.py rename to scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/mjpeg_streamer_fir.py diff --git a/scripts/examples/OpenMV/14-WiFi-Shield/mqtt_pub.py b/scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/mqtt_pub.py similarity index 100% rename from scripts/examples/OpenMV/14-WiFi-Shield/mqtt_pub.py rename to scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/mqtt_pub.py diff --git a/scripts/examples/OpenMV/14-WiFi-Shield/mqtt_sub.py b/scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/mqtt_sub.py similarity index 100% rename from scripts/examples/OpenMV/14-WiFi-Shield/mqtt_sub.py rename to scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/mqtt_sub.py diff --git a/scripts/examples/OpenMV/14-WiFi-Shield/ntp.py b/scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/ntp.py similarity index 100% rename from scripts/examples/OpenMV/14-WiFi-Shield/ntp.py rename to scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/ntp.py diff --git a/scripts/examples/OpenMV/14-WiFi-Shield/scan.py b/scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/scan.py similarity index 100% rename from scripts/examples/OpenMV/14-WiFi-Shield/scan.py rename to scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/scan.py diff --git a/scripts/examples/OpenMV/14-WiFi-Shield/static_ip.py b/scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/static_ip.py similarity index 100% rename from scripts/examples/OpenMV/14-WiFi-Shield/static_ip.py rename to scripts/examples/00-OpenMV-Boards/01-WiFi-Shield/static_ip.py diff --git a/scripts/examples/OpenMV/11-LCD-Shield/lcd.py b/scripts/examples/00-OpenMV-Boards/02-LCD-Shield/lcd.py similarity index 100% rename from scripts/examples/OpenMV/11-LCD-Shield/lcd.py rename to scripts/examples/00-OpenMV-Boards/02-LCD-Shield/lcd.py diff --git a/scripts/examples/OpenMV/15-Servo-Shield/main.py b/scripts/examples/00-OpenMV-Boards/03-Servo-Shield/main.py similarity index 100% rename from scripts/examples/OpenMV/15-Servo-Shield/main.py rename to scripts/examples/00-OpenMV-Boards/03-Servo-Shield/main.py diff --git a/scripts/examples/OpenMV/15-Servo-Shield/pca9685.py b/scripts/examples/00-OpenMV-Boards/03-Servo-Shield/pca9685.py similarity index 100% rename from scripts/examples/OpenMV/15-Servo-Shield/pca9685.py rename to scripts/examples/00-OpenMV-Boards/03-Servo-Shield/pca9685.py diff --git a/scripts/examples/OpenMV/15-Servo-Shield/servo.py b/scripts/examples/00-OpenMV-Boards/03-Servo-Shield/servo.py similarity index 100% rename from scripts/examples/OpenMV/15-Servo-Shield/servo.py rename to scripts/examples/00-OpenMV-Boards/03-Servo-Shield/servo.py diff --git a/scripts/examples/OpenMV/12-Thermopile-Shield/thermal_camera.py b/scripts/examples/00-OpenMV-Boards/04-Thermopile-Shield/thermal_camera.py similarity index 100% rename from scripts/examples/OpenMV/12-Thermopile-Shield/thermal_camera.py rename to scripts/examples/00-OpenMV-Boards/04-Thermopile-Shield/thermal_camera.py diff --git a/scripts/examples/OpenMV/12-Thermopile-Shield/thermal_overlay.py b/scripts/examples/00-OpenMV-Boards/04-Thermopile-Shield/thermal_overlay.py similarity index 100% rename from scripts/examples/OpenMV/12-Thermopile-Shield/thermal_overlay.py rename to scripts/examples/00-OpenMV-Boards/04-Thermopile-Shield/thermal_overlay.py diff --git a/scripts/examples/OpenMV/12-Thermopile-Shield/thermal_overlay_lcd.py b/scripts/examples/00-OpenMV-Boards/04-Thermopile-Shield/thermal_overlay_lcd.py similarity index 100% rename from scripts/examples/OpenMV/12-Thermopile-Shield/thermal_overlay_lcd.py rename to scripts/examples/00-OpenMV-Boards/04-Thermopile-Shield/thermal_overlay_lcd.py diff --git a/scripts/examples/OpenMV/13-BLE-Shield/ble.py b/scripts/examples/00-OpenMV-Boards/05-BLE-Shield/ble.py similarity index 100% rename from scripts/examples/OpenMV/13-BLE-Shield/ble.py rename to scripts/examples/00-OpenMV-Boards/05-BLE-Shield/ble.py diff --git a/scripts/examples/OpenMV/23-Motor-Shield/motor-shield-power-driver.py b/scripts/examples/00-OpenMV-Boards/06-Motor-Shield/motor-shield-power-driver.py similarity index 100% rename from scripts/examples/OpenMV/23-Motor-Shield/motor-shield-power-driver.py rename to scripts/examples/00-OpenMV-Boards/06-Motor-Shield/motor-shield-power-driver.py diff --git a/scripts/examples/OpenMV/23-Motor-Shield/motor-shield-pwm.py b/scripts/examples/00-OpenMV-Boards/06-Motor-Shield/motor-shield-pwm.py similarity index 100% rename from scripts/examples/OpenMV/23-Motor-Shield/motor-shield-pwm.py rename to scripts/examples/00-OpenMV-Boards/06-Motor-Shield/motor-shield-pwm.py diff --git a/scripts/examples/OpenMV/23-Motor-Shield/motor.py b/scripts/examples/00-OpenMV-Boards/06-Motor-Shield/motor.py similarity index 100% rename from scripts/examples/OpenMV/23-Motor-Shield/motor.py rename to scripts/examples/00-OpenMV-Boards/06-Motor-Shield/motor.py diff --git a/scripts/examples/OpenMV/23-Motor-Shield/stepper.py b/scripts/examples/00-OpenMV-Boards/06-Motor-Shield/stepper.py similarity index 100% rename from scripts/examples/OpenMV/23-Motor-Shield/stepper.py rename to scripts/examples/00-OpenMV-Boards/06-Motor-Shield/stepper.py diff --git a/scripts/examples/OpenMV/29-IMU-Shield/imu_read.py b/scripts/examples/00-OpenMV-Boards/07-IMU-Shield/imu_read.py similarity index 100% rename from scripts/examples/OpenMV/29-IMU-Shield/imu_read.py rename to scripts/examples/00-OpenMV-Boards/07-IMU-Shield/imu_read.py diff --git a/scripts/examples/OpenMV/30-Distance-Shield/distance_read.py b/scripts/examples/00-OpenMV-Boards/08-Distance-Shield/distance_read.py similarity index 100% rename from scripts/examples/OpenMV/30-Distance-Shield/distance_read.py rename to scripts/examples/00-OpenMV-Boards/08-Distance-Shield/distance_read.py diff --git a/scripts/examples/OpenMV/31-TV-Shield/tv.py b/scripts/examples/00-OpenMV-Boards/09-TV-Shield/tv.py similarity index 100% rename from scripts/examples/OpenMV/31-TV-Shield/tv.py rename to scripts/examples/00-OpenMV-Boards/09-TV-Shield/tv.py diff --git a/scripts/examples/OpenMV/33-Light-Shield/light.py b/scripts/examples/00-OpenMV-Boards/10-Light-Shield/light.py similarity index 100% rename from scripts/examples/OpenMV/33-Light-Shield/light.py rename to scripts/examples/00-OpenMV-Boards/10-Light-Shield/light.py diff --git a/scripts/examples/OpenMV/19-Low-Power/deep_sleep.py b/scripts/examples/00-OpenMV-Boards/11-Low-Power/deep_sleep.py similarity index 100% rename from scripts/examples/OpenMV/19-Low-Power/deep_sleep.py rename to scripts/examples/00-OpenMV-Boards/11-Low-Power/deep_sleep.py diff --git a/scripts/examples/Arduino/Portenta-H7/19-Low-Power/extint_wakeup.py b/scripts/examples/00-OpenMV-Boards/11-Low-Power/extint_wakeup.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/19-Low-Power/extint_wakeup.py rename to scripts/examples/00-OpenMV-Boards/11-Low-Power/extint_wakeup.py diff --git a/scripts/examples/OpenMV/19-Low-Power/sensor_sleep.py b/scripts/examples/00-OpenMV-Boards/11-Low-Power/sensor_sleep.py similarity index 100% rename from scripts/examples/OpenMV/19-Low-Power/sensor_sleep.py rename to scripts/examples/00-OpenMV-Boards/11-Low-Power/sensor_sleep.py diff --git a/scripts/examples/Arduino/Nicla-Vision/19-Low-Power/stop_mode.py b/scripts/examples/00-OpenMV-Boards/11-Low-Power/stop_mode.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/19-Low-Power/stop_mode.py rename to scripts/examples/00-OpenMV-Boards/11-Low-Power/stop_mode.py diff --git a/scripts/examples/OpenMV/99-Tests/colorbar.py b/scripts/examples/00-OpenMV-Boards/99-Tests/colorbar.py similarity index 100% rename from scripts/examples/OpenMV/99-Tests/colorbar.py rename to scripts/examples/00-OpenMV-Boards/99-Tests/colorbar.py diff --git a/scripts/examples/OpenMV/99-Tests/fps.py b/scripts/examples/00-OpenMV-Boards/99-Tests/fps.py similarity index 100% rename from scripts/examples/OpenMV/99-Tests/fps.py rename to scripts/examples/00-OpenMV-Boards/99-Tests/fps.py diff --git a/scripts/examples/OpenMV/99-Tests/selftest.py b/scripts/examples/00-OpenMV-Boards/99-Tests/selftest.py similarity index 100% rename from scripts/examples/OpenMV/99-Tests/selftest.py rename to scripts/examples/00-OpenMV-Boards/99-Tests/selftest.py diff --git a/scripts/examples/OpenMV/99-Tests/unittests.py b/scripts/examples/00-OpenMV-Boards/99-Tests/unittests.py similarity index 100% rename from scripts/examples/OpenMV/99-Tests/unittests.py rename to scripts/examples/00-OpenMV-Boards/99-Tests/unittests.py diff --git a/scripts/examples/Arduino/Portenta-H7/01-Basics/main.py b/scripts/examples/00-OpenMV-Boards/main.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/01-Basics/main.py rename to scripts/examples/00-OpenMV-Boards/main.py diff --git a/scripts/examples/Arduino/Nano-33-BLE-Sense/00-Board/blinky.py b/scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/00-Board-Control/blinky.py similarity index 100% rename from scripts/examples/Arduino/Nano-33-BLE-Sense/00-Board/blinky.py rename to scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/00-Board-Control/blinky.py diff --git a/scripts/examples/Arduino/Nano-33-BLE-Sense/00-Board/i2c_scanner.py b/scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/00-Board-Control/i2c_scanner.py similarity index 100% rename from scripts/examples/Arduino/Nano-33-BLE-Sense/00-Board/i2c_scanner.py rename to scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/00-Board-Control/i2c_scanner.py diff --git a/scripts/examples/Arduino/Nano-33-BLE-Sense/01-Sensors/apds9960/ambient.py b/scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/apds9960/ambient.py similarity index 100% rename from scripts/examples/Arduino/Nano-33-BLE-Sense/01-Sensors/apds9960/ambient.py rename to scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/apds9960/ambient.py diff --git a/scripts/examples/Arduino/Nano-33-BLE-Sense/01-Sensors/apds9960/gesture.py b/scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/apds9960/gesture.py similarity index 100% rename from scripts/examples/Arduino/Nano-33-BLE-Sense/01-Sensors/apds9960/gesture.py rename to scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/apds9960/gesture.py diff --git a/scripts/examples/Arduino/Nano-33-BLE-Sense/01-Sensors/apds9960/proximity.py b/scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/apds9960/proximity.py similarity index 100% rename from scripts/examples/Arduino/Nano-33-BLE-Sense/01-Sensors/apds9960/proximity.py rename to scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/apds9960/proximity.py diff --git a/scripts/examples/Arduino/Nano-33-BLE-Sense/01-Sensors/hts221.py b/scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/hts221.py similarity index 100% rename from scripts/examples/Arduino/Nano-33-BLE-Sense/01-Sensors/hts221.py rename to scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/hts221.py diff --git a/scripts/examples/Arduino/Nano-33-BLE-Sense/01-Sensors/lps22.py b/scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/lps22.py similarity index 100% rename from scripts/examples/Arduino/Nano-33-BLE-Sense/01-Sensors/lps22.py rename to scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/lps22.py diff --git a/scripts/examples/Arduino/Nano-33-BLE-Sense/01-Sensors/lsm9ds1.py b/scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/lsm9ds1.py similarity index 100% rename from scripts/examples/Arduino/Nano-33-BLE-Sense/01-Sensors/lsm9ds1.py rename to scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/lsm9ds1.py diff --git a/scripts/examples/Arduino/Nano-33-BLE-Sense/03-Audio/audio_fft.py b/scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/02-Audio/audio_fft.py similarity index 100% rename from scripts/examples/Arduino/Nano-33-BLE-Sense/03-Audio/audio_fft.py rename to scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/02-Audio/audio_fft.py diff --git a/scripts/examples/Arduino/Nano-33-BLE-Sense/02-Bluetooth/ble_blinky.py b/scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/03-Bluetooth/ble_blinky.py similarity index 100% rename from scripts/examples/Arduino/Nano-33-BLE-Sense/02-Bluetooth/ble_blinky.py rename to scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/03-Bluetooth/ble_blinky.py diff --git a/scripts/examples/Arduino/Nano-33-BLE-Sense/02-Bluetooth/ble_scan.py b/scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/03-Bluetooth/ble_scan.py similarity index 100% rename from scripts/examples/Arduino/Nano-33-BLE-Sense/02-Bluetooth/ble_scan.py rename to scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/03-Bluetooth/ble_scan.py diff --git a/scripts/examples/Arduino/Nano-33-BLE-Sense/02-Bluetooth/ble_temperature.py b/scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/03-Bluetooth/ble_temperature.py similarity index 100% rename from scripts/examples/Arduino/Nano-33-BLE-Sense/02-Bluetooth/ble_temperature.py rename to scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/03-Bluetooth/ble_temperature.py diff --git a/scripts/examples/Arduino/Nano-33-BLE-Sense/04-Thermal/thermal_camera.py b/scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/04-Thermal/thermal_camera.py similarity index 100% rename from scripts/examples/Arduino/Nano-33-BLE-Sense/04-Thermal/thermal_camera.py rename to scripts/examples/01-Arduino-Boards/Nano-33-BLE-Sense/04-Thermal/thermal_camera.py diff --git a/scripts/examples/Arduino/Nano-RP2040/00-Basics/blinky.py b/scripts/examples/01-Arduino-Boards/Nano-RP2040/00-Board-Control/blinky.py similarity index 100% rename from scripts/examples/Arduino/Nano-RP2040/00-Basics/blinky.py rename to scripts/examples/01-Arduino-Boards/Nano-RP2040/00-Board-Control/blinky.py diff --git a/scripts/examples/Arduino/Nano-RP2040/00-Basics/i2c_scanner.py b/scripts/examples/01-Arduino-Boards/Nano-RP2040/00-Board-Control/i2c_scanner.py similarity index 100% rename from scripts/examples/Arduino/Nano-RP2040/00-Basics/i2c_scanner.py rename to scripts/examples/01-Arduino-Boards/Nano-RP2040/00-Board-Control/i2c_scanner.py diff --git a/scripts/examples/Arduino/Nano-RP2040/01-Sensors/lsm6dsox_basic.py b/scripts/examples/01-Arduino-Boards/Nano-RP2040/01-Sensors/lsm6dsox_basic.py similarity index 100% rename from scripts/examples/Arduino/Nano-RP2040/01-Sensors/lsm6dsox_basic.py rename to scripts/examples/01-Arduino-Boards/Nano-RP2040/01-Sensors/lsm6dsox_basic.py diff --git a/scripts/examples/Arduino/Nano-RP2040/01-Sensors/lsm6dsox_mlc.py b/scripts/examples/01-Arduino-Boards/Nano-RP2040/01-Sensors/lsm6dsox_mlc.py similarity index 100% rename from scripts/examples/Arduino/Nano-RP2040/01-Sensors/lsm6dsox_mlc.py rename to scripts/examples/01-Arduino-Boards/Nano-RP2040/01-Sensors/lsm6dsox_mlc.py diff --git a/scripts/examples/Arduino/Nano-RP2040/04-Audio/audio_fft.py b/scripts/examples/01-Arduino-Boards/Nano-RP2040/03-Audio/audio_fft.py similarity index 100% rename from scripts/examples/Arduino/Nano-RP2040/04-Audio/audio_fft.py rename to scripts/examples/01-Arduino-Boards/Nano-RP2040/03-Audio/audio_fft.py diff --git a/scripts/examples/Arduino/Nano-RP2040/03-WiFi/ap_mode.py b/scripts/examples/01-Arduino-Boards/Nano-RP2040/03-WiFi/ap_mode.py similarity index 100% rename from scripts/examples/Arduino/Nano-RP2040/03-WiFi/ap_mode.py rename to scripts/examples/01-Arduino-Boards/Nano-RP2040/03-WiFi/ap_mode.py diff --git a/scripts/examples/Arduino/Nano-RP2040/03-WiFi/http_client.py b/scripts/examples/01-Arduino-Boards/Nano-RP2040/03-WiFi/http_client.py similarity index 100% rename from scripts/examples/Arduino/Nano-RP2040/03-WiFi/http_client.py rename to scripts/examples/01-Arduino-Boards/Nano-RP2040/03-WiFi/http_client.py diff --git a/scripts/examples/Arduino/Nano-RP2040/03-WiFi/ntp.py b/scripts/examples/01-Arduino-Boards/Nano-RP2040/03-WiFi/ntp.py similarity index 100% rename from scripts/examples/Arduino/Nano-RP2040/03-WiFi/ntp.py rename to scripts/examples/01-Arduino-Boards/Nano-RP2040/03-WiFi/ntp.py diff --git a/scripts/examples/Arduino/Nano-RP2040/03-WiFi/scan.py b/scripts/examples/01-Arduino-Boards/Nano-RP2040/03-WiFi/scan.py similarity index 100% rename from scripts/examples/Arduino/Nano-RP2040/03-WiFi/scan.py rename to scripts/examples/01-Arduino-Boards/Nano-RP2040/03-WiFi/scan.py diff --git a/scripts/examples/Arduino/Nano-RP2040/02-Bluetooth/ble_blinky.py b/scripts/examples/01-Arduino-Boards/Nano-RP2040/04-Bluetooth/ble_blinky.py similarity index 100% rename from scripts/examples/Arduino/Nano-RP2040/02-Bluetooth/ble_blinky.py rename to scripts/examples/01-Arduino-Boards/Nano-RP2040/04-Bluetooth/ble_blinky.py diff --git a/scripts/examples/Arduino/Nano-RP2040/02-Bluetooth/ble_temperature.py b/scripts/examples/01-Arduino-Boards/Nano-RP2040/04-Bluetooth/ble_temperature.py similarity index 100% rename from scripts/examples/Arduino/Nano-RP2040/02-Bluetooth/ble_temperature.py rename to scripts/examples/01-Arduino-Boards/Nano-RP2040/04-Bluetooth/ble_temperature.py diff --git a/scripts/examples/Arduino/Nano-RP2040/02-Bluetooth/temp_sensor_aioble.py b/scripts/examples/01-Arduino-Boards/Nano-RP2040/04-Bluetooth/temp_sensor_aioble.py similarity index 100% rename from scripts/examples/Arduino/Nano-RP2040/02-Bluetooth/temp_sensor_aioble.py rename to scripts/examples/01-Arduino-Boards/Nano-RP2040/04-Bluetooth/temp_sensor_aioble.py diff --git a/scripts/examples/Arduino/Nano-RP2040/05-Thermal/thermal_camera.py b/scripts/examples/01-Arduino-Boards/Nano-RP2040/05-Thermal/thermal_camera.py similarity index 100% rename from scripts/examples/Arduino/Nano-RP2040/05-Thermal/thermal_camera.py rename to scripts/examples/01-Arduino-Boards/Nano-RP2040/05-Thermal/thermal_camera.py diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/adc_read.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/adc_read_ext_channel.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/02-Board-Control/adc_read.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/adc_read_ext_channel.py diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/adc_read_int_channel.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/adc_read_int_channel.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/02-Board-Control/adc_read_int_channel.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/adc_read_int_channel.py diff --git a/scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/blinky.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/blinky.py new file mode 100644 index 000000000..495387562 --- /dev/null +++ b/scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/blinky.py @@ -0,0 +1,14 @@ +# Blinky example + +import time +from machine import Pin + +# This is the only LED pin available on the Nano RP2040, +# other than the RGB LED connected to Nina WiFi module. +led = Pin("LED_BLUE", Pin.OUT) + +while (True): + led.on() + time.sleep_ms(250) + led.off() + time.sleep_ms(250) diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/can.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/can.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/02-Board-Control/can.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/can.py diff --git a/scripts/examples/Arduino/Portenta-H7/02-Board-Control/cpufreq_scaling.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/cpufreq_scaling.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/02-Board-Control/cpufreq_scaling.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/cpufreq_scaling.py diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/i2c_control.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/i2c_control.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/02-Board-Control/i2c_control.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/i2c_control.py diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/led_control.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/led_control.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/02-Board-Control/led_control.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/led_control.py diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/pin_control.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/pin_control.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/02-Board-Control/pin_control.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/pin_control.py diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/pwm_control.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/pwm_control.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/02-Board-Control/pwm_control.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/pwm_control.py diff --git a/scripts/examples/Arduino/Portenta-H7/02-Board-Control/rtc.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/rtc.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/02-Board-Control/rtc.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/rtc.py diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/spi_control.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/spi_control.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/02-Board-Control/spi_control.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/spi_control.py diff --git a/scripts/examples/Arduino/Portenta-H7/02-Board-Control/timer_control.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/timer_control.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/02-Board-Control/timer_control.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/timer_control.py diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/uart_control.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/uart_control.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/02-Board-Control/uart_control.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/uart_control.py diff --git a/scripts/examples/Arduino/Portenta-H7/02-Board-Control/usb_hid.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/usb_hid.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/02-Board-Control/usb_hid.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/usb_hid.py diff --git a/scripts/examples/OpenMV/02-Board-Control/usb_vcp.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/usb_vcp.py similarity index 100% rename from scripts/examples/OpenMV/02-Board-Control/usb_vcp.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/usb_vcp.py diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/vsync_gpio_output.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/vsync_gpio_output.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/02-Board-Control/vsync_gpio_output.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/00-Board-Control/vsync_gpio_output.py diff --git a/scripts/examples/Arduino/Nicla-Vision/01-Sensors/lsm6dsox_basic.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/01-Sensors/lsm6dsox_basic.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/01-Sensors/lsm6dsox_basic.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/01-Sensors/lsm6dsox_basic.py diff --git a/scripts/examples/Arduino/Nicla-Vision/01-Sensors/lsm6dsox_mlc.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/01-Sensors/lsm6dsox_mlc.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/01-Sensors/lsm6dsox_mlc.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/01-Sensors/lsm6dsox_mlc.py diff --git a/scripts/examples/Arduino/Nicla-Vision/01-Sensors/vl53l1x_tof.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/01-Sensors/vl53l1x_tof.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/01-Sensors/vl53l1x_tof.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/01-Sensors/vl53l1x_tof.py diff --git a/scripts/examples/Arduino/Nicla-Vision/37-Audio/audio_fft.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/02-Audio/audio_fft.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/37-Audio/audio_fft.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/02-Audio/audio_fft.py diff --git a/scripts/examples/Arduino/Nicla-Vision/40-WiFi/connect.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/03-WiFi/connect.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/40-WiFi/connect.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/03-WiFi/connect.py diff --git a/scripts/examples/Arduino/Nicla-Vision/40-WiFi/dns.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/03-WiFi/dns.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/40-WiFi/dns.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/03-WiFi/dns.py diff --git a/scripts/examples/Arduino/Nicla-Vision/40-WiFi/http_client.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/03-WiFi/http_client.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/40-WiFi/http_client.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/03-WiFi/http_client.py diff --git a/scripts/examples/Arduino/Nicla-Vision/40-WiFi/http_client_ssl.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/03-WiFi/http_client_ssl.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/40-WiFi/http_client_ssl.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/03-WiFi/http_client_ssl.py diff --git a/scripts/examples/Arduino/Nicla-Vision/40-WiFi/mjpeg_streamer.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/03-WiFi/mjpeg_streamer.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/40-WiFi/mjpeg_streamer.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/03-WiFi/mjpeg_streamer.py diff --git a/scripts/examples/Arduino/Nicla-Vision/40-WiFi/mqtt_pub.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/03-WiFi/mqtt_pub.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/40-WiFi/mqtt_pub.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/03-WiFi/mqtt_pub.py diff --git a/scripts/examples/Arduino/Nicla-Vision/40-WiFi/mqtt_sub.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/03-WiFi/mqtt_sub.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/40-WiFi/mqtt_sub.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/03-WiFi/mqtt_sub.py diff --git a/scripts/examples/Arduino/Nicla-Vision/40-WiFi/ntp.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/03-WiFi/ntp.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/40-WiFi/ntp.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/03-WiFi/ntp.py diff --git a/scripts/examples/Arduino/Nicla-Vision/40-WiFi/scan.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/03-WiFi/scan.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/40-WiFi/scan.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/03-WiFi/scan.py diff --git a/scripts/examples/Arduino/Nicla-Vision/40-WiFi/static_ip.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/03-WiFi/static_ip.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/40-WiFi/static_ip.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/03-WiFi/static_ip.py diff --git a/scripts/examples/Arduino/Nicla-Vision/41-Bluetooth/ble_temperature.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/04-Bluetooth/ble_temperature.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/41-Bluetooth/ble_temperature.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/04-Bluetooth/ble_temperature.py diff --git a/scripts/examples/Arduino/Nicla-Vision/19-Low-Power/deep_sleep.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/05-Low-Power/deep_sleep.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/19-Low-Power/deep_sleep.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/05-Low-Power/deep_sleep.py diff --git a/scripts/examples/Arduino/Nicla-Vision/19-Low-Power/extint_wakeup.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/05-Low-Power/extint_wakeup.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/19-Low-Power/extint_wakeup.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/05-Low-Power/extint_wakeup.py diff --git a/scripts/examples/Arduino/Portenta-H7/19-Low-Power/stop_mode.py b/scripts/examples/01-Arduino-Boards/Nicla-Vision/05-Low-Power/stop_mode.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/19-Low-Power/stop_mode.py rename to scripts/examples/01-Arduino-Boards/Nicla-Vision/05-Low-Power/stop_mode.py diff --git a/scripts/examples/OpenMV/02-Board-Control/adc_read.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/adc_read_ext_channel.py similarity index 100% rename from scripts/examples/OpenMV/02-Board-Control/adc_read.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/adc_read_ext_channel.py diff --git a/scripts/examples/OpenMV/02-Board-Control/adc_read_int_channel.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/adc_read_int_channel.py similarity index 100% rename from scripts/examples/OpenMV/02-Board-Control/adc_read_int_channel.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/adc_read_int_channel.py diff --git a/scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/blinky.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/blinky.py new file mode 100644 index 000000000..495387562 --- /dev/null +++ b/scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/blinky.py @@ -0,0 +1,14 @@ +# Blinky example + +import time +from machine import Pin + +# This is the only LED pin available on the Nano RP2040, +# other than the RGB LED connected to Nina WiFi module. +led = Pin("LED_BLUE", Pin.OUT) + +while (True): + led.on() + time.sleep_ms(250) + led.off() + time.sleep_ms(250) diff --git a/scripts/examples/OpenMV/02-Board-Control/can.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/can.py similarity index 100% rename from scripts/examples/OpenMV/02-Board-Control/can.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/can.py diff --git a/scripts/examples/OpenMV/02-Board-Control/cpufreq_scaling.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/cpufreq_scaling.py similarity index 100% rename from scripts/examples/OpenMV/02-Board-Control/cpufreq_scaling.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/cpufreq_scaling.py diff --git a/scripts/examples/OpenMV/02-Board-Control/dac_write.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/dac_write.py similarity index 100% rename from scripts/examples/OpenMV/02-Board-Control/dac_write.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/dac_write.py diff --git a/scripts/examples/OpenMV/02-Board-Control/dac_write_timed.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/dac_write_timed.py similarity index 100% rename from scripts/examples/OpenMV/02-Board-Control/dac_write_timed.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/dac_write_timed.py diff --git a/scripts/examples/OpenMV/02-Board-Control/i2c_control.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/i2c_control.py similarity index 100% rename from scripts/examples/OpenMV/02-Board-Control/i2c_control.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/i2c_control.py diff --git a/scripts/examples/OpenMV/02-Board-Control/led_control.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/led_control.py similarity index 100% rename from scripts/examples/OpenMV/02-Board-Control/led_control.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/led_control.py diff --git a/scripts/examples/OpenMV/02-Board-Control/pin_control.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/pin_control.py similarity index 100% rename from scripts/examples/OpenMV/02-Board-Control/pin_control.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/pin_control.py diff --git a/scripts/examples/Arduino/Portenta-H7/02-Board-Control/pwm_control.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/pwm_control.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/02-Board-Control/pwm_control.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/pwm_control.py diff --git a/scripts/examples/OpenMV/02-Board-Control/rtc.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/rtc.py similarity index 100% rename from scripts/examples/OpenMV/02-Board-Control/rtc.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/rtc.py diff --git a/scripts/examples/OpenMV/02-Board-Control/servo_control.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/servo_control.py similarity index 100% rename from scripts/examples/OpenMV/02-Board-Control/servo_control.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/servo_control.py diff --git a/scripts/examples/Arduino/Portenta-H7/02-Board-Control/spi_control.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/spi_control.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/02-Board-Control/spi_control.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/spi_control.py diff --git a/scripts/examples/OpenMV/02-Board-Control/timer_control.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/timer_control.py similarity index 100% rename from scripts/examples/OpenMV/02-Board-Control/timer_control.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/timer_control.py diff --git a/scripts/examples/Arduino/Portenta-H7/02-Board-Control/timer_tests.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/timer_tests.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/02-Board-Control/timer_tests.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/timer_tests.py diff --git a/scripts/examples/OpenMV/02-Board-Control/uart_control.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/uart_control.py similarity index 100% rename from scripts/examples/OpenMV/02-Board-Control/uart_control.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/uart_control.py diff --git a/scripts/examples/OpenMV/02-Board-Control/usb_hid.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/usb_hid.py similarity index 100% rename from scripts/examples/OpenMV/02-Board-Control/usb_hid.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/usb_hid.py diff --git a/scripts/examples/Arduino/Portenta-H7/02-Board-Control/usb_vcp.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/usb_vcp.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/02-Board-Control/usb_vcp.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/usb_vcp.py diff --git a/scripts/examples/Arduino/Portenta-H7/02-Board-Control/vsync_gpio_output.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/vsync_gpio_output.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/02-Board-Control/vsync_gpio_output.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/00-Board-Control/vsync_gpio_output.py diff --git a/scripts/examples/Arduino/Portenta-H7/37-Audio/audio_fft.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/01-Audio/audio_fft.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/37-Audio/audio_fft.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/01-Audio/audio_fft.py diff --git a/scripts/examples/Arduino/Portenta-H7/37-Audio/micro_speech.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/01-Audio/micro_speech.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/37-Audio/micro_speech.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/01-Audio/micro_speech.py diff --git a/scripts/examples/Arduino/Portenta-H7/40-WiFi/connect.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/02-WiFi/connect.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/40-WiFi/connect.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/02-WiFi/connect.py diff --git a/scripts/examples/Arduino/Portenta-H7/40-WiFi/dns.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/02-WiFi/dns.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/40-WiFi/dns.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/02-WiFi/dns.py diff --git a/scripts/examples/Arduino/Portenta-H7/40-WiFi/http_client.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/02-WiFi/http_client.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/40-WiFi/http_client.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/02-WiFi/http_client.py diff --git a/scripts/examples/Arduino/Portenta-H7/40-WiFi/http_client_ssl.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/02-WiFi/http_client_ssl.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/40-WiFi/http_client_ssl.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/02-WiFi/http_client_ssl.py diff --git a/scripts/examples/Arduino/Portenta-H7/40-WiFi/mjpeg_streamer.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/02-WiFi/mjpeg_streamer.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/40-WiFi/mjpeg_streamer.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/02-WiFi/mjpeg_streamer.py diff --git a/scripts/examples/Arduino/Portenta-H7/40-WiFi/mqtt_pub.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/02-WiFi/mqtt_pub.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/40-WiFi/mqtt_pub.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/02-WiFi/mqtt_pub.py diff --git a/scripts/examples/Arduino/Portenta-H7/40-WiFi/mqtt_sub.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/02-WiFi/mqtt_sub.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/40-WiFi/mqtt_sub.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/02-WiFi/mqtt_sub.py diff --git a/scripts/examples/Arduino/Portenta-H7/40-WiFi/ntp.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/02-WiFi/ntp.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/40-WiFi/ntp.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/02-WiFi/ntp.py diff --git a/scripts/examples/Arduino/Portenta-H7/40-WiFi/scan.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/02-WiFi/scan.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/40-WiFi/scan.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/02-WiFi/scan.py diff --git a/scripts/examples/Arduino/Portenta-H7/40-WiFi/static_ip.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/02-WiFi/static_ip.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/40-WiFi/static_ip.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/02-WiFi/static_ip.py diff --git a/scripts/examples/Arduino/Portenta-H7/41-Bluetooth/ble_temperature.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/03-Bluetooth/ble_temperature.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/41-Bluetooth/ble_temperature.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/03-Bluetooth/ble_temperature.py diff --git a/scripts/examples/Arduino/Portenta-H7/39-LoRa/lora-example.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/04-LoRa/lora-example.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/39-LoRa/lora-example.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/04-LoRa/lora-example.py diff --git a/scripts/examples/Arduino/Portenta-H7/38-Ethernet/eth_cable_test.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/05-Ethernet/eth_cable_test.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/38-Ethernet/eth_cable_test.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/05-Ethernet/eth_cable_test.py diff --git a/scripts/examples/Arduino/Portenta-H7/38-Ethernet/http_client.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/05-Ethernet/http_client.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/38-Ethernet/http_client.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/05-Ethernet/http_client.py diff --git a/scripts/examples/Arduino/Portenta-H7/38-Ethernet/http_client_ssl.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/05-Ethernet/http_client_ssl.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/38-Ethernet/http_client_ssl.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/05-Ethernet/http_client_ssl.py diff --git a/scripts/examples/Arduino/Portenta-H7/38-Ethernet/peer_to_peer.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/05-Ethernet/peer_to_peer.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/38-Ethernet/peer_to_peer.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/05-Ethernet/peer_to_peer.py diff --git a/scripts/examples/Arduino/Portenta-H7/19-Low-Power/deep_sleep.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/06-Low-Power/deep_sleep.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/19-Low-Power/deep_sleep.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/06-Low-Power/deep_sleep.py diff --git a/scripts/examples/OpenMV/19-Low-Power/extint_wakeup.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/06-Low-Power/extint_wakeup.py similarity index 100% rename from scripts/examples/OpenMV/19-Low-Power/extint_wakeup.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/06-Low-Power/extint_wakeup.py diff --git a/scripts/examples/Arduino/Portenta-H7/19-Low-Power/himax_wakeup_on_motion_detection.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/06-Low-Power/himax_wakeup_on_motion_detection.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/19-Low-Power/himax_wakeup_on_motion_detection.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/06-Low-Power/himax_wakeup_on_motion_detection.py diff --git a/scripts/examples/Arduino/Portenta-H7/19-Low-Power/sensor_sleep.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/06-Low-Power/sensor_sleep.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/19-Low-Power/sensor_sleep.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/06-Low-Power/sensor_sleep.py diff --git a/scripts/examples/OpenMV/19-Low-Power/stop_mode.py b/scripts/examples/01-Arduino-Boards/Portenta-H7/06-Low-Power/stop_mode.py similarity index 100% rename from scripts/examples/OpenMV/19-Low-Power/stop_mode.py rename to scripts/examples/01-Arduino-Boards/Portenta-H7/06-Low-Power/stop_mode.py diff --git a/scripts/examples/Arduino/Nicla-Vision/00-Basics/helloworld.py b/scripts/examples/02-HelloWorld/helloworld.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/00-Basics/helloworld.py rename to scripts/examples/02-HelloWorld/helloworld.py diff --git a/scripts/examples/Arduino/Nicla-Vision/05-Snapshot/emboss_snapshot.py b/scripts/examples/03-Camera/00-Snapshot/emboss_snapshot.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/05-Snapshot/emboss_snapshot.py rename to scripts/examples/03-Camera/00-Snapshot/emboss_snapshot.py diff --git a/scripts/examples/Arduino/Nicla-Vision/05-Snapshot/snapshot.py b/scripts/examples/03-Camera/00-Snapshot/snapshot.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/05-Snapshot/snapshot.py rename to scripts/examples/03-Camera/00-Snapshot/snapshot.py diff --git a/scripts/examples/Arduino/Nicla-Vision/05-Snapshot/snapshot_on_face_detection.py b/scripts/examples/03-Camera/00-Snapshot/snapshot_on_face_detection.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/05-Snapshot/snapshot_on_face_detection.py rename to scripts/examples/03-Camera/00-Snapshot/snapshot_on_face_detection.py diff --git a/scripts/examples/OpenMV/05-Snapshot/snapshot_on_movement.py b/scripts/examples/03-Camera/00-Snapshot/snapshot_on_movement.py similarity index 100% rename from scripts/examples/OpenMV/05-Snapshot/snapshot_on_movement.py rename to scripts/examples/03-Camera/00-Snapshot/snapshot_on_movement.py diff --git a/scripts/examples/Arduino/Nicla-Vision/05-Snapshot/time_lapse_photos.py b/scripts/examples/03-Camera/00-Snapshot/time_lapse_photos.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/05-Snapshot/time_lapse_photos.py rename to scripts/examples/03-Camera/00-Snapshot/time_lapse_photos.py diff --git a/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/gif.py b/scripts/examples/03-Camera/01-Video-Recording/gif.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/gif.py rename to scripts/examples/03-Camera/01-Video-Recording/gif.py diff --git a/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/gif_on_face_detection.py b/scripts/examples/03-Camera/01-Video-Recording/gif_on_face_detection.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/gif_on_face_detection.py rename to scripts/examples/03-Camera/01-Video-Recording/gif_on_face_detection.py diff --git a/scripts/examples/OpenMV/06-Video-Recording/gif_on_movement.py b/scripts/examples/03-Camera/01-Video-Recording/gif_on_movement.py similarity index 100% rename from scripts/examples/OpenMV/06-Video-Recording/gif_on_movement.py rename to scripts/examples/03-Camera/01-Video-Recording/gif_on_movement.py diff --git a/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/imageio_memory.py b/scripts/examples/03-Camera/01-Video-Recording/imageio_memory.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/imageio_memory.py rename to scripts/examples/03-Camera/01-Video-Recording/imageio_memory.py diff --git a/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/imageio_read.py b/scripts/examples/03-Camera/01-Video-Recording/imageio_read.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/imageio_read.py rename to scripts/examples/03-Camera/01-Video-Recording/imageio_read.py diff --git a/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/imageio_write.py b/scripts/examples/03-Camera/01-Video-Recording/imageio_write.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/imageio_write.py rename to scripts/examples/03-Camera/01-Video-Recording/imageio_write.py diff --git a/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/mjpeg.py b/scripts/examples/03-Camera/01-Video-Recording/mjpeg.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/mjpeg.py rename to scripts/examples/03-Camera/01-Video-Recording/mjpeg.py diff --git a/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/mjpeg_on_face_detection.py b/scripts/examples/03-Camera/01-Video-Recording/mjpeg_on_face_detection.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/mjpeg_on_face_detection.py rename to scripts/examples/03-Camera/01-Video-Recording/mjpeg_on_face_detection.py diff --git a/scripts/examples/OpenMV/06-Video-Recording/mjpeg_on_movement.py b/scripts/examples/03-Camera/01-Video-Recording/mjpeg_on_movement.py similarity index 100% rename from scripts/examples/OpenMV/06-Video-Recording/mjpeg_on_movement.py rename to scripts/examples/03-Camera/01-Video-Recording/mjpeg_on_movement.py diff --git a/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/absolute-rotation-scale.py b/scripts/examples/03-Camera/02-Optical-Flow/absolute-rotation-scale.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/absolute-rotation-scale.py rename to scripts/examples/03-Camera/02-Optical-Flow/absolute-rotation-scale.py diff --git a/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/absolute-translation.py b/scripts/examples/03-Camera/02-Optical-Flow/absolute-translation.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/absolute-translation.py rename to scripts/examples/03-Camera/02-Optical-Flow/absolute-translation.py diff --git a/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/differential-rotation-scale.py b/scripts/examples/03-Camera/02-Optical-Flow/differential-rotation-scale.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/differential-rotation-scale.py rename to scripts/examples/03-Camera/02-Optical-Flow/differential-rotation-scale.py diff --git a/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/differential-translation.py b/scripts/examples/03-Camera/02-Optical-Flow/differential-translation.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/differential-translation.py rename to scripts/examples/03-Camera/02-Optical-Flow/differential-translation.py diff --git a/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-absolute-rotation-scale.py b/scripts/examples/03-Camera/02-Optical-Flow/image-patches-absolute-rotation-scale.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-absolute-rotation-scale.py rename to scripts/examples/03-Camera/02-Optical-Flow/image-patches-absolute-rotation-scale.py diff --git a/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-absolute-translation.py b/scripts/examples/03-Camera/02-Optical-Flow/image-patches-absolute-translation.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-absolute-translation.py rename to scripts/examples/03-Camera/02-Optical-Flow/image-patches-absolute-translation.py diff --git a/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-differential-rotation-scale.py b/scripts/examples/03-Camera/02-Optical-Flow/image-patches-differential-rotation-scale.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-differential-rotation-scale.py rename to scripts/examples/03-Camera/02-Optical-Flow/image-patches-differential-rotation-scale.py diff --git a/scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-differential-translation.py b/scripts/examples/03-Camera/02-Optical-Flow/image-patches-differential-translation.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/22-Optical-Flow/image-patches-differential-translation.py rename to scripts/examples/03-Camera/02-Optical-Flow/image-patches-differential-translation.py diff --git a/scripts/examples/OpenMV/37-Event-Cameras/frogeye2020.py b/scripts/examples/03-Camera/03-Event-Cameras/frogeye2020.py similarity index 100% rename from scripts/examples/OpenMV/37-Event-Cameras/frogeye2020.py rename to scripts/examples/03-Camera/03-Event-Cameras/frogeye2020.py diff --git a/scripts/examples/OpenMV/37-Event-Cameras/frogeye2020_with_tracking.py b/scripts/examples/03-Camera/03-Event-Cameras/frogeye2020_with_tracking.py similarity index 100% rename from scripts/examples/OpenMV/37-Event-Cameras/frogeye2020_with_tracking.py rename to scripts/examples/03-Camera/03-Event-Cameras/frogeye2020_with_tracking.py diff --git a/scripts/examples/Arduino/Portenta-H7/28-Global-Shutter/high_fps.py b/scripts/examples/03-Camera/04-Global-Shutter/high_fps.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/28-Global-Shutter/high_fps.py rename to scripts/examples/03-Camera/04-Global-Shutter/high_fps.py diff --git a/scripts/examples/Arduino/Portenta-H7/28-Global-Shutter/triggered_mode.py b/scripts/examples/03-Camera/04-Global-Shutter/triggered_mode.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/28-Global-Shutter/triggered_mode.py rename to scripts/examples/03-Camera/04-Global-Shutter/triggered_mode.py diff --git a/scripts/examples/OpenMV/27-Lepton/lepton_get_object_high_temp.py b/scripts/examples/03-Camera/05-FLIR-Lepton/lepton_get_object_high_temp.py similarity index 100% rename from scripts/examples/OpenMV/27-Lepton/lepton_get_object_high_temp.py rename to scripts/examples/03-Camera/05-FLIR-Lepton/lepton_get_object_high_temp.py diff --git a/scripts/examples/OpenMV/27-Lepton/lepton_get_object_temp.py b/scripts/examples/03-Camera/05-FLIR-Lepton/lepton_get_object_temp.py similarity index 100% rename from scripts/examples/OpenMV/27-Lepton/lepton_get_object_temp.py rename to scripts/examples/03-Camera/05-FLIR-Lepton/lepton_get_object_temp.py diff --git a/scripts/examples/OpenMV/27-Lepton/lepton_get_object_temp_color.py b/scripts/examples/03-Camera/05-FLIR-Lepton/lepton_get_object_temp_color.py similarity index 100% rename from scripts/examples/OpenMV/27-Lepton/lepton_get_object_temp_color.py rename to scripts/examples/03-Camera/05-FLIR-Lepton/lepton_get_object_temp_color.py diff --git a/scripts/examples/OpenMV/27-Lepton/lepton_get_object_temp_color_lcd.py b/scripts/examples/03-Camera/05-FLIR-Lepton/lepton_get_object_temp_color_lcd.py similarity index 100% rename from scripts/examples/OpenMV/27-Lepton/lepton_get_object_temp_color_lcd.py rename to scripts/examples/03-Camera/05-FLIR-Lepton/lepton_get_object_temp_color_lcd.py diff --git a/scripts/examples/OpenMV/27-Lepton/lepton_get_object_temp_lcd.py b/scripts/examples/03-Camera/05-FLIR-Lepton/lepton_get_object_temp_lcd.py similarity index 100% rename from scripts/examples/OpenMV/27-Lepton/lepton_get_object_temp_lcd.py rename to scripts/examples/03-Camera/05-FLIR-Lepton/lepton_get_object_temp_lcd.py diff --git a/scripts/examples/OpenMV/27-Lepton/lepton_hotspot_grayscale_color_tracking.py b/scripts/examples/03-Camera/05-FLIR-Lepton/lepton_hotspot_grayscale_color_tracking.py similarity index 100% rename from scripts/examples/OpenMV/27-Lepton/lepton_hotspot_grayscale_color_tracking.py rename to scripts/examples/03-Camera/05-FLIR-Lepton/lepton_hotspot_grayscale_color_tracking.py diff --git a/scripts/examples/OpenMV/27-Lepton/lepton_hotspot_grayscale_color_tracking_lcd.py b/scripts/examples/03-Camera/05-FLIR-Lepton/lepton_hotspot_grayscale_color_tracking_lcd.py similarity index 100% rename from scripts/examples/OpenMV/27-Lepton/lepton_hotspot_grayscale_color_tracking_lcd.py rename to scripts/examples/03-Camera/05-FLIR-Lepton/lepton_hotspot_grayscale_color_tracking_lcd.py diff --git a/scripts/examples/OpenMV/27-Lepton/lepton_hotspot_rgb565_color_tracking.py b/scripts/examples/03-Camera/05-FLIR-Lepton/lepton_hotspot_rgb565_color_tracking.py similarity index 100% rename from scripts/examples/OpenMV/27-Lepton/lepton_hotspot_rgb565_color_tracking.py rename to scripts/examples/03-Camera/05-FLIR-Lepton/lepton_hotspot_rgb565_color_tracking.py diff --git a/scripts/examples/OpenMV/27-Lepton/lepton_hotspot_rgb565_color_tracking_lcd.py b/scripts/examples/03-Camera/05-FLIR-Lepton/lepton_hotspot_rgb565_color_tracking_lcd.py similarity index 100% rename from scripts/examples/OpenMV/27-Lepton/lepton_hotspot_rgb565_color_tracking_lcd.py rename to scripts/examples/03-Camera/05-FLIR-Lepton/lepton_hotspot_rgb565_color_tracking_lcd.py diff --git a/scripts/examples/OpenMV/27-Lepton/lepton_target_temp_hotspot_grayscale_color_tracking.py b/scripts/examples/03-Camera/05-FLIR-Lepton/lepton_target_temp_hotspot_grayscale_color_tracking.py similarity index 100% rename from scripts/examples/OpenMV/27-Lepton/lepton_target_temp_hotspot_grayscale_color_tracking.py rename to scripts/examples/03-Camera/05-FLIR-Lepton/lepton_target_temp_hotspot_grayscale_color_tracking.py diff --git a/scripts/examples/OpenMV/27-Lepton/lepton_target_temp_hotspot_rgb565_color_tracking.py b/scripts/examples/03-Camera/05-FLIR-Lepton/lepton_target_temp_hotspot_rgb565_color_tracking.py similarity index 100% rename from scripts/examples/OpenMV/27-Lepton/lepton_target_temp_hotspot_rgb565_color_tracking.py rename to scripts/examples/03-Camera/05-FLIR-Lepton/lepton_target_temp_hotspot_rgb565_color_tracking.py diff --git a/scripts/examples/OpenMV/38-Time-of-Flight/tof_camera.py b/scripts/examples/03-Camera/06-Time-of-Flight/tof_camera.py similarity index 100% rename from scripts/examples/OpenMV/38-Time-of-Flight/tof_camera.py rename to scripts/examples/03-Camera/06-Time-of-Flight/tof_camera.py diff --git a/scripts/examples/OpenMV/38-Time-of-Flight/tof_overlay.py b/scripts/examples/03-Camera/06-Time-of-Flight/tof_overlay.py similarity index 100% rename from scripts/examples/OpenMV/38-Time-of-Flight/tof_overlay.py rename to scripts/examples/03-Camera/06-Time-of-Flight/tof_overlay.py diff --git a/scripts/examples/OpenMV/21-Sensor-Control/sensor_auto_gain_control.py b/scripts/examples/03-Camera/07-Sensor-Control/sensor_auto_gain_control.py similarity index 100% rename from scripts/examples/OpenMV/21-Sensor-Control/sensor_auto_gain_control.py rename to scripts/examples/03-Camera/07-Sensor-Control/sensor_auto_gain_control.py diff --git a/scripts/examples/OpenMV/21-Sensor-Control/sensor_exposure_control.py b/scripts/examples/03-Camera/07-Sensor-Control/sensor_exposure_control.py similarity index 100% rename from scripts/examples/OpenMV/21-Sensor-Control/sensor_exposure_control.py rename to scripts/examples/03-Camera/07-Sensor-Control/sensor_exposure_control.py diff --git a/scripts/examples/OpenMV/21-Sensor-Control/sensor_horizontal_mirror.py b/scripts/examples/03-Camera/07-Sensor-Control/sensor_horizontal_mirror.py similarity index 100% rename from scripts/examples/OpenMV/21-Sensor-Control/sensor_horizontal_mirror.py rename to scripts/examples/03-Camera/07-Sensor-Control/sensor_horizontal_mirror.py diff --git a/scripts/examples/OpenMV/21-Sensor-Control/sensor_manual_whitebal_control.py b/scripts/examples/03-Camera/07-Sensor-Control/sensor_manual_whitebal_control.py similarity index 100% rename from scripts/examples/OpenMV/21-Sensor-Control/sensor_manual_whitebal_control.py rename to scripts/examples/03-Camera/07-Sensor-Control/sensor_manual_whitebal_control.py diff --git a/scripts/examples/OpenMV/21-Sensor-Control/sensor_vertical_flip.py b/scripts/examples/03-Camera/07-Sensor-Control/sensor_vertical_flip.py similarity index 100% rename from scripts/examples/OpenMV/21-Sensor-Control/sensor_vertical_flip.py rename to scripts/examples/03-Camera/07-Sensor-Control/sensor_vertical_flip.py diff --git a/scripts/examples/OpenMV/21-Sensor-Control/sesnor_manual_gain_control.py b/scripts/examples/03-Camera/07-Sensor-Control/sesnor_manual_gain_control.py similarity index 100% rename from scripts/examples/OpenMV/21-Sensor-Control/sesnor_manual_gain_control.py rename to scripts/examples/03-Camera/07-Sensor-Control/sesnor_manual_gain_control.py diff --git a/scripts/examples/Arduino/Portenta-H7/35-Readout-Control/100_fps_ir_led_tracking.py b/scripts/examples/03-Camera/08-Readout-Control/100_fps_ir_led_tracking.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/35-Readout-Control/100_fps_ir_led_tracking.py rename to scripts/examples/03-Camera/08-Readout-Control/100_fps_ir_led_tracking.py diff --git a/scripts/examples/Arduino/Portenta-H7/35-Readout-Control/apriltag_tracking.py b/scripts/examples/03-Camera/08-Readout-Control/apriltag_tracking.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/35-Readout-Control/apriltag_tracking.py rename to scripts/examples/03-Camera/08-Readout-Control/apriltag_tracking.py diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/arrow_drawing.py b/scripts/examples/04-Image-Processing/00-Drawing/arrow_drawing.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/03-Drawing/arrow_drawing.py rename to scripts/examples/04-Image-Processing/00-Drawing/arrow_drawing.py diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/circle_drawing.py b/scripts/examples/04-Image-Processing/00-Drawing/circle_drawing.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/03-Drawing/circle_drawing.py rename to scripts/examples/04-Image-Processing/00-Drawing/circle_drawing.py diff --git a/scripts/examples/Arduino/Portenta-H7/03-Drawing/copy2fb.py b/scripts/examples/04-Image-Processing/00-Drawing/copy2fb.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/03-Drawing/copy2fb.py rename to scripts/examples/04-Image-Processing/00-Drawing/copy2fb.py diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/cross_drawing.py b/scripts/examples/04-Image-Processing/00-Drawing/cross_drawing.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/03-Drawing/cross_drawing.py rename to scripts/examples/04-Image-Processing/00-Drawing/cross_drawing.py diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/ellipse_drawing.py b/scripts/examples/04-Image-Processing/00-Drawing/ellipse_drawing.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/03-Drawing/ellipse_drawing.py rename to scripts/examples/04-Image-Processing/00-Drawing/ellipse_drawing.py diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/flood_fill.py b/scripts/examples/04-Image-Processing/00-Drawing/flood_fill.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/03-Drawing/flood_fill.py rename to scripts/examples/04-Image-Processing/00-Drawing/flood_fill.py diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing.py b/scripts/examples/04-Image-Processing/00-Drawing/image_drawing.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing.py rename to scripts/examples/04-Image-Processing/00-Drawing/image_drawing.py diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_advanced.py b/scripts/examples/04-Image-Processing/00-Drawing/image_drawing_advanced.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_advanced.py rename to scripts/examples/04-Image-Processing/00-Drawing/image_drawing_advanced.py diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_blending_test.py b/scripts/examples/04-Image-Processing/00-Drawing/image_drawing_alpha_blending_test.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_blending_test.py rename to scripts/examples/04-Image-Processing/00-Drawing/image_drawing_alpha_blending_test.py diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_blending_with_color_table_test.py b/scripts/examples/04-Image-Processing/00-Drawing/image_drawing_alpha_blending_with_color_table_test.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_blending_with_color_table_test.py rename to scripts/examples/04-Image-Processing/00-Drawing/image_drawing_alpha_blending_with_color_table_test.py diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_table_test.py b/scripts/examples/04-Image-Processing/00-Drawing/image_drawing_alpha_table_test.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_table_test.py rename to scripts/examples/04-Image-Processing/00-Drawing/image_drawing_alpha_table_test.py diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_table_with_color_table_test.py b/scripts/examples/04-Image-Processing/00-Drawing/image_drawing_alpha_table_with_color_table_test.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_alpha_table_with_color_table_test.py rename to scripts/examples/04-Image-Processing/00-Drawing/image_drawing_alpha_table_with_color_table_test.py diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_scale_down_test.py b/scripts/examples/04-Image-Processing/00-Drawing/image_drawing_scale_down_test.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_scale_down_test.py rename to scripts/examples/04-Image-Processing/00-Drawing/image_drawing_scale_down_test.py diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_scale_up_test.py b/scripts/examples/04-Image-Processing/00-Drawing/image_drawing_scale_up_test.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_scale_up_test.py rename to scripts/examples/04-Image-Processing/00-Drawing/image_drawing_scale_up_test.py diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_with_custom_palette.py b/scripts/examples/04-Image-Processing/00-Drawing/image_drawing_with_custom_palette.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/03-Drawing/image_drawing_with_custom_palette.py rename to scripts/examples/04-Image-Processing/00-Drawing/image_drawing_with_custom_palette.py diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/keypoints_drawing.py b/scripts/examples/04-Image-Processing/00-Drawing/keypoints_drawing.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/03-Drawing/keypoints_drawing.py rename to scripts/examples/04-Image-Processing/00-Drawing/keypoints_drawing.py diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/line_drawing.py b/scripts/examples/04-Image-Processing/00-Drawing/line_drawing.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/03-Drawing/line_drawing.py rename to scripts/examples/04-Image-Processing/00-Drawing/line_drawing.py diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/rectangle_drawing.py b/scripts/examples/04-Image-Processing/00-Drawing/rectangle_drawing.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/03-Drawing/rectangle_drawing.py rename to scripts/examples/04-Image-Processing/00-Drawing/rectangle_drawing.py diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/text_drawing.py b/scripts/examples/04-Image-Processing/00-Drawing/text_drawing.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/03-Drawing/text_drawing.py rename to scripts/examples/04-Image-Processing/00-Drawing/text_drawing.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/adaptive_histogram_equalization.py b/scripts/examples/04-Image-Processing/01-Image-Filters/adaptive_histogram_equalization.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/adaptive_histogram_equalization.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/adaptive_histogram_equalization.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/blur_filter.py b/scripts/examples/04-Image-Processing/01-Image-Filters/blur_filter.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/blur_filter.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/blur_filter.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/cartoon_filter.py b/scripts/examples/04-Image-Processing/01-Image-Filters/cartoon_filter.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/cartoon_filter.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/cartoon_filter.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/color_bilateral_filter.py b/scripts/examples/04-Image-Processing/01-Image-Filters/color_bilateral_filter.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/color_bilateral_filter.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/color_bilateral_filter.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/color_binary_filter.py b/scripts/examples/04-Image-Processing/01-Image-Filters/color_binary_filter.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/color_binary_filter.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/color_binary_filter.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/color_light_removal.py b/scripts/examples/04-Image-Processing/01-Image-Filters/color_light_removal.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/color_light_removal.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/color_light_removal.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/edge_filter.py b/scripts/examples/04-Image-Processing/01-Image-Filters/edge_filter.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/edge_filter.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/edge_filter.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/erode_and_dilate.py b/scripts/examples/04-Image-Processing/01-Image-Filters/erode_and_dilate.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/erode_and_dilate.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/erode_and_dilate.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/gamma_correction.py b/scripts/examples/04-Image-Processing/01-Image-Filters/gamma_correction.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/gamma_correction.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/gamma_correction.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/grayscale_bilateral_filter.py b/scripts/examples/04-Image-Processing/01-Image-Filters/grayscale_bilateral_filter.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/grayscale_bilateral_filter.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/grayscale_bilateral_filter.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/grayscale_binary_filter.py b/scripts/examples/04-Image-Processing/01-Image-Filters/grayscale_binary_filter.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/grayscale_binary_filter.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/grayscale_binary_filter.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/grayscale_light_removal.py b/scripts/examples/04-Image-Processing/01-Image-Filters/grayscale_light_removal.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/grayscale_light_removal.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/grayscale_light_removal.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/histogram_equalization.py b/scripts/examples/04-Image-Processing/01-Image-Filters/histogram_equalization.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/histogram_equalization.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/histogram_equalization.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/kernel_filters.py b/scripts/examples/04-Image-Processing/01-Image-Filters/kernel_filters.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/kernel_filters.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/kernel_filters.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/lens_correction.py b/scripts/examples/04-Image-Processing/01-Image-Filters/lens_correction.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/lens_correction.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/lens_correction.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/linear_polar.py b/scripts/examples/04-Image-Processing/01-Image-Filters/linear_polar.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/linear_polar.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/linear_polar.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/log_polar.py b/scripts/examples/04-Image-Processing/01-Image-Filters/log_polar.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/log_polar.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/log_polar.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mean_adaptive_threshold_filter.py b/scripts/examples/04-Image-Processing/01-Image-Filters/mean_adaptive_threshold_filter.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mean_adaptive_threshold_filter.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/mean_adaptive_threshold_filter.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mean_filter.py b/scripts/examples/04-Image-Processing/01-Image-Filters/mean_filter.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mean_filter.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/mean_filter.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/median_adaptive_threshold_filter.py b/scripts/examples/04-Image-Processing/01-Image-Filters/median_adaptive_threshold_filter.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/median_adaptive_threshold_filter.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/median_adaptive_threshold_filter.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/median_filter.py b/scripts/examples/04-Image-Processing/01-Image-Filters/median_filter.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/median_filter.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/median_filter.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/midpoint_adaptive_threshold_filter.py b/scripts/examples/04-Image-Processing/01-Image-Filters/midpoint_adaptive_threshold_filter.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/midpoint_adaptive_threshold_filter.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/midpoint_adaptive_threshold_filter.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/midpoint_filter.py b/scripts/examples/04-Image-Processing/01-Image-Filters/midpoint_filter.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/midpoint_filter.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/midpoint_filter.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mode_adaptive_threshold_filter.py b/scripts/examples/04-Image-Processing/01-Image-Filters/mode_adaptive_threshold_filter.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mode_adaptive_threshold_filter.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/mode_adaptive_threshold_filter.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mode_filter.py b/scripts/examples/04-Image-Processing/01-Image-Filters/mode_filter.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/mode_filter.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/mode_filter.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/negative.py b/scripts/examples/04-Image-Processing/01-Image-Filters/negative.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/negative.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/negative.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/perspective_and_rotation_correction.py b/scripts/examples/04-Image-Processing/01-Image-Filters/perspective_and_rotation_correction.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/perspective_and_rotation_correction.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/perspective_and_rotation_correction.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/perspective_correction.py b/scripts/examples/04-Image-Processing/01-Image-Filters/perspective_correction.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/perspective_correction.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/perspective_correction.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/rotation_correction.py b/scripts/examples/04-Image-Processing/01-Image-Filters/rotation_correction.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/rotation_correction.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/rotation_correction.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/sharpen_filter.py b/scripts/examples/04-Image-Processing/01-Image-Filters/sharpen_filter.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/sharpen_filter.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/sharpen_filter.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/ulab.py b/scripts/examples/04-Image-Processing/01-Image-Filters/ulab.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/ulab.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/ulab.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/unsharp_filter.py b/scripts/examples/04-Image-Processing/01-Image-Filters/unsharp_filter.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/unsharp_filter.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/unsharp_filter.py diff --git a/scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/vflip_hmirror_transpose.py b/scripts/examples/04-Image-Processing/01-Image-Filters/vflip_hmirror_transpose.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/04-Image-Filters/vflip_hmirror_transpose.py rename to scripts/examples/04-Image-Processing/01-Image-Filters/vflip_hmirror_transpose.py diff --git a/scripts/examples/OpenMV/10-Color-Tracking/automatic_grayscale_color_tracking.py b/scripts/examples/04-Image-Processing/02-Color-Tracking/automatic_grayscale_color_tracking.py similarity index 100% rename from scripts/examples/OpenMV/10-Color-Tracking/automatic_grayscale_color_tracking.py rename to scripts/examples/04-Image-Processing/02-Color-Tracking/automatic_grayscale_color_tracking.py diff --git a/scripts/examples/OpenMV/10-Color-Tracking/automatic_rgb565_color_tracking.py b/scripts/examples/04-Image-Processing/02-Color-Tracking/automatic_rgb565_color_tracking.py similarity index 100% rename from scripts/examples/OpenMV/10-Color-Tracking/automatic_rgb565_color_tracking.py rename to scripts/examples/04-Image-Processing/02-Color-Tracking/automatic_rgb565_color_tracking.py diff --git a/scripts/examples/OpenMV/10-Color-Tracking/black_grayscale_line_following.py b/scripts/examples/04-Image-Processing/02-Color-Tracking/black_grayscale_line_following.py similarity index 100% rename from scripts/examples/OpenMV/10-Color-Tracking/black_grayscale_line_following.py rename to scripts/examples/04-Image-Processing/02-Color-Tracking/black_grayscale_line_following.py diff --git a/scripts/examples/OpenMV/10-Color-Tracking/image_histogram_info.py b/scripts/examples/04-Image-Processing/02-Color-Tracking/image_histogram_info.py similarity index 100% rename from scripts/examples/OpenMV/10-Color-Tracking/image_histogram_info.py rename to scripts/examples/04-Image-Processing/02-Color-Tracking/image_histogram_info.py diff --git a/scripts/examples/OpenMV/10-Color-Tracking/image_statistics_info.py b/scripts/examples/04-Image-Processing/02-Color-Tracking/image_statistics_info.py similarity index 100% rename from scripts/examples/OpenMV/10-Color-Tracking/image_statistics_info.py rename to scripts/examples/04-Image-Processing/02-Color-Tracking/image_statistics_info.py diff --git a/scripts/examples/OpenMV/10-Color-Tracking/ir_beacon_grayscale_tracking.py b/scripts/examples/04-Image-Processing/02-Color-Tracking/ir_beacon_grayscale_tracking.py similarity index 100% rename from scripts/examples/OpenMV/10-Color-Tracking/ir_beacon_grayscale_tracking.py rename to scripts/examples/04-Image-Processing/02-Color-Tracking/ir_beacon_grayscale_tracking.py diff --git a/scripts/examples/OpenMV/10-Color-Tracking/ir_beacon_rgb565_tracking.py b/scripts/examples/04-Image-Processing/02-Color-Tracking/ir_beacon_rgb565_tracking.py similarity index 100% rename from scripts/examples/OpenMV/10-Color-Tracking/ir_beacon_rgb565_tracking.py rename to scripts/examples/04-Image-Processing/02-Color-Tracking/ir_beacon_rgb565_tracking.py diff --git a/scripts/examples/OpenMV/10-Color-Tracking/multi_color_blob_tracking.py b/scripts/examples/04-Image-Processing/02-Color-Tracking/multi_color_blob_tracking.py similarity index 100% rename from scripts/examples/OpenMV/10-Color-Tracking/multi_color_blob_tracking.py rename to scripts/examples/04-Image-Processing/02-Color-Tracking/multi_color_blob_tracking.py diff --git a/scripts/examples/OpenMV/10-Color-Tracking/multi_color_code_tracking.py b/scripts/examples/04-Image-Processing/02-Color-Tracking/multi_color_code_tracking.py similarity index 100% rename from scripts/examples/OpenMV/10-Color-Tracking/multi_color_code_tracking.py rename to scripts/examples/04-Image-Processing/02-Color-Tracking/multi_color_code_tracking.py diff --git a/scripts/examples/OpenMV/10-Color-Tracking/single_color_code_tracking.py b/scripts/examples/04-Image-Processing/02-Color-Tracking/single_color_code_tracking.py similarity index 100% rename from scripts/examples/OpenMV/10-Color-Tracking/single_color_code_tracking.py rename to scripts/examples/04-Image-Processing/02-Color-Tracking/single_color_code_tracking.py diff --git a/scripts/examples/OpenMV/10-Color-Tracking/single_color_grayscale_blob_tracking.py b/scripts/examples/04-Image-Processing/02-Color-Tracking/single_color_grayscale_blob_tracking.py similarity index 100% rename from scripts/examples/OpenMV/10-Color-Tracking/single_color_grayscale_blob_tracking.py rename to scripts/examples/04-Image-Processing/02-Color-Tracking/single_color_grayscale_blob_tracking.py diff --git a/scripts/examples/OpenMV/10-Color-Tracking/single_color_rgb565_blob_tracking.py b/scripts/examples/04-Image-Processing/02-Color-Tracking/single_color_rgb565_blob_tracking.py similarity index 100% rename from scripts/examples/OpenMV/10-Color-Tracking/single_color_rgb565_blob_tracking.py rename to scripts/examples/04-Image-Processing/02-Color-Tracking/single_color_rgb565_blob_tracking.py diff --git a/scripts/examples/OpenMV/20-Frame-Differencing/in_memory_advanced_frame_differencing.py b/scripts/examples/04-Image-Processing/03-Frame-Differencing/in_memory_advanced_frame_differencing.py similarity index 100% rename from scripts/examples/OpenMV/20-Frame-Differencing/in_memory_advanced_frame_differencing.py rename to scripts/examples/04-Image-Processing/03-Frame-Differencing/in_memory_advanced_frame_differencing.py diff --git a/scripts/examples/OpenMV/20-Frame-Differencing/in_memory_basic_frame_differencing.py b/scripts/examples/04-Image-Processing/03-Frame-Differencing/in_memory_basic_frame_differencing.py similarity index 100% rename from scripts/examples/OpenMV/20-Frame-Differencing/in_memory_basic_frame_differencing.py rename to scripts/examples/04-Image-Processing/03-Frame-Differencing/in_memory_basic_frame_differencing.py diff --git a/scripts/examples/OpenMV/20-Frame-Differencing/in_memory_shadow_removal.py b/scripts/examples/04-Image-Processing/03-Frame-Differencing/in_memory_shadow_removal.py similarity index 100% rename from scripts/examples/OpenMV/20-Frame-Differencing/in_memory_shadow_removal.py rename to scripts/examples/04-Image-Processing/03-Frame-Differencing/in_memory_shadow_removal.py diff --git a/scripts/examples/OpenMV/20-Frame-Differencing/in_memory_structural_similarity.py b/scripts/examples/04-Image-Processing/03-Frame-Differencing/in_memory_structural_similarity.py similarity index 100% rename from scripts/examples/OpenMV/20-Frame-Differencing/in_memory_structural_similarity.py rename to scripts/examples/04-Image-Processing/03-Frame-Differencing/in_memory_structural_similarity.py diff --git a/scripts/examples/OpenMV/20-Frame-Differencing/on_disk_advanced_frame_differencing.py b/scripts/examples/04-Image-Processing/03-Frame-Differencing/on_disk_advanced_frame_differencing.py similarity index 100% rename from scripts/examples/OpenMV/20-Frame-Differencing/on_disk_advanced_frame_differencing.py rename to scripts/examples/04-Image-Processing/03-Frame-Differencing/on_disk_advanced_frame_differencing.py diff --git a/scripts/examples/OpenMV/20-Frame-Differencing/on_disk_basic_frame_differencing.py b/scripts/examples/04-Image-Processing/03-Frame-Differencing/on_disk_basic_frame_differencing.py similarity index 100% rename from scripts/examples/OpenMV/20-Frame-Differencing/on_disk_basic_frame_differencing.py rename to scripts/examples/04-Image-Processing/03-Frame-Differencing/on_disk_basic_frame_differencing.py diff --git a/scripts/examples/OpenMV/20-Frame-Differencing/on_disk_shadow_removal.py b/scripts/examples/04-Image-Processing/03-Frame-Differencing/on_disk_shadow_removal.py similarity index 100% rename from scripts/examples/OpenMV/20-Frame-Differencing/on_disk_shadow_removal.py rename to scripts/examples/04-Image-Processing/03-Frame-Differencing/on_disk_shadow_removal.py diff --git a/scripts/examples/OpenMV/20-Frame-Differencing/on_disk_structural_similarity.py b/scripts/examples/04-Image-Processing/03-Frame-Differencing/on_disk_structural_similarity.py similarity index 100% rename from scripts/examples/OpenMV/20-Frame-Differencing/on_disk_structural_similarity.py rename to scripts/examples/04-Image-Processing/03-Frame-Differencing/on_disk_structural_similarity.py diff --git a/scripts/examples/OpenMV/25-Machine-Learning/tf_face_collection.py b/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_face_collection.py similarity index 100% rename from scripts/examples/OpenMV/25-Machine-Learning/tf_face_collection.py rename to scripts/examples/05-Machine-Learning/00-TensorFlow/tf_face_collection.py diff --git a/scripts/examples/OpenMV/25-Machine-Learning/tf_face_recognition.py b/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_face_recognition.py similarity index 100% rename from scripts/examples/OpenMV/25-Machine-Learning/tf_face_recognition.py rename to scripts/examples/05-Machine-Learning/00-TensorFlow/tf_face_recognition.py diff --git a/scripts/examples/OpenMV/25-Machine-Learning/tf_mobilenet_search_just_center.py b/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_mobilenet_search_just_center.py similarity index 100% rename from scripts/examples/OpenMV/25-Machine-Learning/tf_mobilenet_search_just_center.py rename to scripts/examples/05-Machine-Learning/00-TensorFlow/tf_mobilenet_search_just_center.py diff --git a/scripts/examples/OpenMV/25-Machine-Learning/tf_mobilenet_search_whole_window.py b/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_mobilenet_search_whole_window.py similarity index 100% rename from scripts/examples/OpenMV/25-Machine-Learning/tf_mobilenet_search_whole_window.py rename to scripts/examples/05-Machine-Learning/00-TensorFlow/tf_mobilenet_search_whole_window.py diff --git a/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_object_detection.py b/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_object_detection.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_object_detection.py rename to scripts/examples/05-Machine-Learning/00-TensorFlow/tf_object_detection.py diff --git a/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_person_detection_search_just_center.py b/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_person_detection_search_just_center.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_person_detection_search_just_center.py rename to scripts/examples/05-Machine-Learning/00-TensorFlow/tf_person_detection_search_just_center.py diff --git a/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_person_detection_search_whole_window.py b/scripts/examples/05-Machine-Learning/00-TensorFlow/tf_person_detection_search_whole_window.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_person_detection_search_whole_window.py rename to scripts/examples/05-Machine-Learning/00-TensorFlow/tf_person_detection_search_whole_window.py diff --git a/scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/nn_stm32cubeai.py b/scripts/examples/05-Machine-Learning/01-ST-CubeAI/nn_stm32cubeai.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/nn_stm32cubeai.py rename to scripts/examples/05-Machine-Learning/01-ST-CubeAI/nn_stm32cubeai.py diff --git a/scripts/examples/Arduino/Portenta-H7/07-Face-Detection/face_detection.py b/scripts/examples/05-Machine-Learning/02-Haar-Cascade/face_detection.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/07-Face-Detection/face_detection.py rename to scripts/examples/05-Machine-Learning/02-Haar-Cascade/face_detection.py diff --git a/scripts/examples/Arduino/Portenta-H7/08-Eye-Tracking/face_eye_detection.py b/scripts/examples/05-Machine-Learning/02-Haar-Cascade/face_eye_detection.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/08-Eye-Tracking/face_eye_detection.py rename to scripts/examples/05-Machine-Learning/02-Haar-Cascade/face_eye_detection.py diff --git a/scripts/examples/Arduino/Nicla-Vision/07-Face-Detection/face_recognition.py b/scripts/examples/05-Machine-Learning/02-Haar-Cascade/face_recognition.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/07-Face-Detection/face_recognition.py rename to scripts/examples/05-Machine-Learning/02-Haar-Cascade/face_recognition.py diff --git a/scripts/examples/Arduino/Portenta-H7/07-Face-Detection/face_tracking.py b/scripts/examples/05-Machine-Learning/02-Haar-Cascade/face_tracking.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/07-Face-Detection/face_tracking.py rename to scripts/examples/05-Machine-Learning/02-Haar-Cascade/face_tracking.py diff --git a/scripts/examples/Arduino/Portenta-H7/08-Eye-Tracking/iris_detection.py b/scripts/examples/05-Machine-Learning/02-Haar-Cascade/iris_detection.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/08-Eye-Tracking/iris_detection.py rename to scripts/examples/05-Machine-Learning/02-Haar-Cascade/iris_detection.py diff --git a/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/edges.py b/scripts/examples/06-Feature-Detection/edges.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/edges.py rename to scripts/examples/06-Feature-Detection/edges.py diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_circles.py b/scripts/examples/06-Feature-Detection/find_circles.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_circles.py rename to scripts/examples/06-Feature-Detection/find_circles.py diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_line_segments.py b/scripts/examples/06-Feature-Detection/find_line_segments.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_line_segments.py rename to scripts/examples/06-Feature-Detection/find_line_segments.py diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_lines.py b/scripts/examples/06-Feature-Detection/find_lines.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_lines.py rename to scripts/examples/06-Feature-Detection/find_lines.py diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_rects.py b/scripts/examples/06-Feature-Detection/find_rects.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/find_rects.py rename to scripts/examples/06-Feature-Detection/find_rects.py diff --git a/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/hog.py b/scripts/examples/06-Feature-Detection/hog.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/hog.py rename to scripts/examples/06-Feature-Detection/hog.py diff --git a/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/keypoints.py b/scripts/examples/06-Feature-Detection/keypoints.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/keypoints.py rename to scripts/examples/06-Feature-Detection/keypoints.py diff --git a/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/keypoints_save.py b/scripts/examples/06-Feature-Detection/keypoints_save.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/keypoints_save.py rename to scripts/examples/06-Feature-Detection/keypoints_save.py diff --git a/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/lbp.py b/scripts/examples/06-Feature-Detection/lbp.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/lbp.py rename to scripts/examples/06-Feature-Detection/lbp.py diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/linear_regression_fast.py b/scripts/examples/06-Feature-Detection/linear_regression_fast.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/linear_regression_fast.py rename to scripts/examples/06-Feature-Detection/linear_regression_fast.py diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/linear_regression_robust.py b/scripts/examples/06-Feature-Detection/linear_regression_robust.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/linear_regression_robust.py rename to scripts/examples/06-Feature-Detection/linear_regression_robust.py diff --git a/scripts/examples/OpenMV/09-Feature-Detection/selective_search.py b/scripts/examples/06-Feature-Detection/selective_search.py similarity index 100% rename from scripts/examples/OpenMV/09-Feature-Detection/selective_search.py rename to scripts/examples/06-Feature-Detection/selective_search.py diff --git a/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/template_matching.py b/scripts/examples/06-Feature-Detection/template_matching.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/template_matching.py rename to scripts/examples/06-Feature-Detection/template_matching.py diff --git a/scripts/examples/OpenMV/16-Codes/find_barcodes.py b/scripts/examples/07-Barcodes/find_barcodes.py similarity index 100% rename from scripts/examples/OpenMV/16-Codes/find_barcodes.py rename to scripts/examples/07-Barcodes/find_barcodes.py diff --git a/scripts/examples/OpenMV/16-Codes/find_datamatrices.py b/scripts/examples/07-Barcodes/find_datamatrices.py similarity index 100% rename from scripts/examples/OpenMV/16-Codes/find_datamatrices.py rename to scripts/examples/07-Barcodes/find_datamatrices.py diff --git a/scripts/examples/OpenMV/16-Codes/find_datamatrices_w_lens_zoom.py b/scripts/examples/07-Barcodes/find_datamatrices_w_lens_zoom.py similarity index 100% rename from scripts/examples/OpenMV/16-Codes/find_datamatrices_w_lens_zoom.py rename to scripts/examples/07-Barcodes/find_datamatrices_w_lens_zoom.py diff --git a/scripts/examples/OpenMV/16-Codes/qrcodes_with_lens_corr.py b/scripts/examples/07-Barcodes/qrcodes_with_lens_corr.py similarity index 100% rename from scripts/examples/OpenMV/16-Codes/qrcodes_with_lens_corr.py rename to scripts/examples/07-Barcodes/qrcodes_with_lens_corr.py diff --git a/scripts/examples/Arduino/Portenta-H7/16-Codes/qrcodes_with_lens_zoom.py b/scripts/examples/07-Barcodes/qrcodes_with_lens_zoom.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/16-Codes/qrcodes_with_lens_zoom.py rename to scripts/examples/07-Barcodes/qrcodes_with_lens_zoom.py diff --git a/scripts/examples/OpenMV/26-April-Tags/find_apriltags.py b/scripts/examples/08-April-Tags/find_apriltags.py similarity index 100% rename from scripts/examples/OpenMV/26-April-Tags/find_apriltags.py rename to scripts/examples/08-April-Tags/find_apriltags.py diff --git a/scripts/examples/OpenMV/26-April-Tags/find_apriltags_3d_pose.py b/scripts/examples/08-April-Tags/find_apriltags_3d_pose.py similarity index 100% rename from scripts/examples/OpenMV/26-April-Tags/find_apriltags_3d_pose.py rename to scripts/examples/08-April-Tags/find_apriltags_3d_pose.py diff --git a/scripts/examples/OpenMV/26-April-Tags/find_apriltags_max_res.py b/scripts/examples/08-April-Tags/find_apriltags_max_res.py similarity index 100% rename from scripts/examples/OpenMV/26-April-Tags/find_apriltags_max_res.py rename to scripts/examples/08-April-Tags/find_apriltags_max_res.py diff --git a/scripts/examples/OpenMV/26-April-Tags/find_apriltags_w_lens_zoom.py b/scripts/examples/08-April-Tags/find_apriltags_w_lens_zoom.py similarity index 100% rename from scripts/examples/OpenMV/26-April-Tags/find_apriltags_w_lens_zoom.py rename to scripts/examples/08-April-Tags/find_apriltags_w_lens_zoom.py diff --git a/scripts/examples/OpenMV/26-April-Tags/find_small_apriltags.py b/scripts/examples/08-April-Tags/find_small_apriltags.py similarity index 100% rename from scripts/examples/OpenMV/26-April-Tags/find_small_apriltags.py rename to scripts/examples/08-April-Tags/find_small_apriltags.py diff --git a/scripts/examples/Arduino/Portenta-H7/00-Arduino/arduino_i2c_slave.py b/scripts/examples/09-Interface-Library/00-Arduino/arduino_i2c_slave.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/00-Arduino/arduino_i2c_slave.py rename to scripts/examples/09-Interface-Library/00-Arduino/arduino_i2c_slave.py diff --git a/scripts/examples/Arduino/Portenta-H7/00-Arduino/arduino_spi_slave.py b/scripts/examples/09-Interface-Library/00-Arduino/arduino_spi_slave.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/00-Arduino/arduino_spi_slave.py rename to scripts/examples/09-Interface-Library/00-Arduino/arduino_spi_slave.py diff --git a/scripts/examples/Arduino/Portenta-H7/00-Arduino/arduino_uart.py b/scripts/examples/09-Interface-Library/00-Arduino/arduino_uart.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/00-Arduino/arduino_uart.py rename to scripts/examples/09-Interface-Library/00-Arduino/arduino_uart.py diff --git a/scripts/examples/Arduino/Portenta-H7/17-Pixy-Emulation/apriltags_pixy_i2c_emulation.py b/scripts/examples/09-Interface-Library/01-Pixy-Emulation/apriltags_pixy_i2c_emulation.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/17-Pixy-Emulation/apriltags_pixy_i2c_emulation.py rename to scripts/examples/09-Interface-Library/01-Pixy-Emulation/apriltags_pixy_i2c_emulation.py diff --git a/scripts/examples/Arduino/Portenta-H7/17-Pixy-Emulation/apriltags_pixy_spi_emulation.py b/scripts/examples/09-Interface-Library/01-Pixy-Emulation/apriltags_pixy_spi_emulation.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/17-Pixy-Emulation/apriltags_pixy_spi_emulation.py rename to scripts/examples/09-Interface-Library/01-Pixy-Emulation/apriltags_pixy_spi_emulation.py diff --git a/scripts/examples/Arduino/Portenta-H7/17-Pixy-Emulation/apriltags_pixy_uart_emulation.py b/scripts/examples/09-Interface-Library/01-Pixy-Emulation/apriltags_pixy_uart_emulation.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/17-Pixy-Emulation/apriltags_pixy_uart_emulation.py rename to scripts/examples/09-Interface-Library/01-Pixy-Emulation/apriltags_pixy_uart_emulation.py diff --git a/scripts/examples/OpenMV/17-Pixy-Emulation/pixy_i2c_emulation.py b/scripts/examples/09-Interface-Library/01-Pixy-Emulation/pixy_i2c_emulation.py similarity index 100% rename from scripts/examples/OpenMV/17-Pixy-Emulation/pixy_i2c_emulation.py rename to scripts/examples/09-Interface-Library/01-Pixy-Emulation/pixy_i2c_emulation.py diff --git a/scripts/examples/OpenMV/17-Pixy-Emulation/pixy_spi_emulation.py b/scripts/examples/09-Interface-Library/01-Pixy-Emulation/pixy_spi_emulation.py similarity index 100% rename from scripts/examples/OpenMV/17-Pixy-Emulation/pixy_spi_emulation.py rename to scripts/examples/09-Interface-Library/01-Pixy-Emulation/pixy_spi_emulation.py diff --git a/scripts/examples/OpenMV/17-Pixy-Emulation/pixy_uart_emulation.py b/scripts/examples/09-Interface-Library/01-Pixy-Emulation/pixy_uart_emulation.py similarity index 100% rename from scripts/examples/OpenMV/17-Pixy-Emulation/pixy_uart_emulation.py rename to scripts/examples/09-Interface-Library/01-Pixy-Emulation/pixy_uart_emulation.py diff --git a/scripts/examples/Arduino/Portenta-H7/18-MAVLink/mavlink_apriltags_landing_target.py b/scripts/examples/09-Interface-Library/02-MAVLink/mavlink_apriltags_landing_target.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/18-MAVLink/mavlink_apriltags_landing_target.py rename to scripts/examples/09-Interface-Library/02-MAVLink/mavlink_apriltags_landing_target.py diff --git a/scripts/examples/OpenMV/18-MAVLink/mavlink_opticalflow.py b/scripts/examples/09-Interface-Library/02-MAVLink/mavlink_opticalflow.py similarity index 100% rename from scripts/examples/OpenMV/18-MAVLink/mavlink_opticalflow.py rename to scripts/examples/09-Interface-Library/02-MAVLink/mavlink_opticalflow.py diff --git a/scripts/examples/Arduino/Portenta-H7/32-modbus/modbus_apriltag.py b/scripts/examples/09-Interface-Library/03-Modbus/modbus_apriltag.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/32-modbus/modbus_apriltag.py rename to scripts/examples/09-Interface-Library/03-Modbus/modbus_apriltag.py diff --git a/scripts/examples/Arduino/Portenta-H7/32-modbus/modbus_rtu_slave.py b/scripts/examples/09-Interface-Library/03-Modbus/modbus_rtu_slave.py similarity index 100% rename from scripts/examples/Arduino/Portenta-H7/32-modbus/modbus_rtu_slave.py rename to scripts/examples/09-Interface-Library/03-Modbus/modbus_rtu_slave.py diff --git a/scripts/examples/Arduino/Nicla-Vision/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py b/scripts/examples/10-RPC-Library/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py rename to scripts/examples/10-RPC-Library/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py diff --git a/scripts/examples/Arduino/Nicla-Vision/34-Remote-Control/image_transfer_jpg_streaming_as_the_remote_device_for_your_computer.py b/scripts/examples/10-RPC-Library/34-Remote-Control/image_transfer_jpg_streaming_as_the_remote_device_for_your_computer.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/34-Remote-Control/image_transfer_jpg_streaming_as_the_remote_device_for_your_computer.py rename to scripts/examples/10-RPC-Library/34-Remote-Control/image_transfer_jpg_streaming_as_the_remote_device_for_your_computer.py diff --git a/scripts/examples/Arduino/Nicla-Vision/34-Remote-Control/image_transfer_raw_as_the_controller_device.py b/scripts/examples/10-RPC-Library/34-Remote-Control/image_transfer_raw_as_the_controller_device.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/34-Remote-Control/image_transfer_raw_as_the_controller_device.py rename to scripts/examples/10-RPC-Library/34-Remote-Control/image_transfer_raw_as_the_controller_device.py diff --git a/scripts/examples/Arduino/Nicla-Vision/34-Remote-Control/image_transfer_raw_as_the_remote_device.py b/scripts/examples/10-RPC-Library/34-Remote-Control/image_transfer_raw_as_the_remote_device.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/34-Remote-Control/image_transfer_raw_as_the_remote_device.py rename to scripts/examples/10-RPC-Library/34-Remote-Control/image_transfer_raw_as_the_remote_device.py diff --git a/scripts/examples/Arduino/Nicla-Vision/34-Remote-Control/popular_features_as_the_controller_device.py b/scripts/examples/10-RPC-Library/34-Remote-Control/popular_features_as_the_controller_device.py similarity index 100% rename from scripts/examples/Arduino/Nicla-Vision/34-Remote-Control/popular_features_as_the_controller_device.py rename to scripts/examples/10-RPC-Library/34-Remote-Control/popular_features_as_the_controller_device.py diff --git a/scripts/examples/OpenMV/34-Remote-Control/popular_features_as_the_remote_device.py b/scripts/examples/10-RPC-Library/34-Remote-Control/popular_features_as_the_remote_device.py similarity index 100% rename from scripts/examples/OpenMV/34-Remote-Control/popular_features_as_the_remote_device.py rename to scripts/examples/10-RPC-Library/34-Remote-Control/popular_features_as_the_remote_device.py diff --git a/scripts/examples/OpenMV/36-Web-Servers/rtsp_video_server_lan.py b/scripts/examples/10-RPC-Library/36-Web-Servers/rtsp_video_server_lan.py similarity index 100% rename from scripts/examples/OpenMV/36-Web-Servers/rtsp_video_server_lan.py rename to scripts/examples/10-RPC-Library/36-Web-Servers/rtsp_video_server_lan.py diff --git a/scripts/examples/OpenMV/36-Web-Servers/rtsp_video_server_wlan.py b/scripts/examples/10-RPC-Library/36-Web-Servers/rtsp_video_server_wlan.py similarity index 100% rename from scripts/examples/OpenMV/36-Web-Servers/rtsp_video_server_wlan.py rename to scripts/examples/10-RPC-Library/36-Web-Servers/rtsp_video_server_wlan.py diff --git a/scripts/examples/Arduino/Nicla-Vision/00-Basics/main.py b/scripts/examples/Arduino/Nicla-Vision/00-Basics/main.py deleted file mode 100644 index 461b2b7dd..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/00-Basics/main.py +++ /dev/null @@ -1,33 +0,0 @@ -# Main Module Example -# -# When your OpenMV Cam is disconnected from your computer it will either run the -# main.py script on the SD card (if attached) or the main.py script on -# your OpenMV Cam's internal flash drive. - -import time, pyb - -led = pyb.LED(3) # Red LED = 1, Green LED = 2, Blue LED = 3. -usb = pyb.USB_VCP() # This is a serial port object that allows you to communciate - # with your computer. While it is not open the code below runs. - -while(not usb.isconnected()): - led.on() - time.sleep_ms(150) - led.off() - time.sleep_ms(100) - led.on() - time.sleep_ms(150) - led.off() - time.sleep_ms(600) - -led = pyb.LED(2) # Switch to using the green LED. - -while(usb.isconnected()): - led.on() - time.sleep_ms(150) - led.off() - time.sleep_ms(100) - led.on() - time.sleep_ms(150) - led.off() - time.sleep_ms(600) diff --git a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/native_emitters.py b/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/native_emitters.py deleted file mode 100644 index 3839297fd..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/02-Board-Control/native_emitters.py +++ /dev/null @@ -1,19 +0,0 @@ -import time - -@micropython.asm_thumb -def asm(): - movw(r0, 42) - -@micropython.viper -def viper(a, b): - return a + b - -@micropython.native -def native(a, b): - return a + b - - -print(asm()) -print(viper(1, 2)) -print(native(1, 2)) - diff --git a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/copy2fb.py b/scripts/examples/Arduino/Nicla-Vision/03-Drawing/copy2fb.py deleted file mode 100644 index aad51253e..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/03-Drawing/copy2fb.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copy image to framebuffer. -# -# This example shows how to load and copy an image to framebuffer for testing. - -import sensor, image, time - -sensor.reset() -sensor.set_framesize(sensor.QQVGA) -sensor.set_pixformat(sensor.GRAYSCALE) - -# Load image -img = image.Image("/example.bmp", copy_to_fb=True) - -# Add a small delay to allow the IDE to read the loaded image. -time.sleep_ms(500) diff --git a/scripts/examples/Arduino/Nicla-Vision/05-Snapshot/snapshot_on_movement.py b/scripts/examples/Arduino/Nicla-Vision/05-Snapshot/snapshot_on_movement.py deleted file mode 100644 index 4e6c67956..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/05-Snapshot/snapshot_on_movement.py +++ /dev/null @@ -1,44 +0,0 @@ -# Snapshot on Movement Example -# -# Note: You will need an SD card to run this example. -# -# This example demonstrates using frame differencing with your OpenMV Cam to do -# motion detection. After motion is detected your OpenMV Cam will take picture. - -import sensor, image, pyb, os - -RED_LED_PIN = 1 -BLUE_LED_PIN = 3 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. - -if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory - -while(True): - - pyb.LED(RED_LED_PIN).on() - print("About to save background image...") - sensor.skip_frames(time = 2000) # Give the user time to get ready. - - pyb.LED(RED_LED_PIN).off() - sensor.snapshot().save("temp/bg.bmp") - print("Saved background image - Now detecting motion!") - pyb.LED(BLUE_LED_PIN).on() - - diff = 10 # We'll say we detected motion after 10 frames of motion. - while(diff): - img = sensor.snapshot() - img.difference("temp/bg.bmp") - stats = img.statistics() - # Stats 5 is the max of the lighting color channel. The below code - # triggers when the lighting max for the whole image goes above 20. - # The lighting difference maximum should be zero normally. - if (stats[5] > 20): - diff -= 1 - - pyb.LED(BLUE_LED_PIN).off() - print("Movement detected! Saving image...") - sensor.snapshot().save("temp/snapshot-%d.jpg" % pyb.rng()) # Save Pic. diff --git a/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/gif_on_movement.py b/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/gif_on_movement.py deleted file mode 100644 index 81e013053..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/gif_on_movement.py +++ /dev/null @@ -1,58 +0,0 @@ -# GIF Video Recording on Movement Example -# -# Note: You will need an SD card to run this example. -# -# You can use your OpenMV Cam to record gif files. You can either feed the -# recorder object RGB565 frames or Grayscale frames. Use photo editing software -# like GIMP to compress and optimize the Gif before uploading it to the web. -# -# This example demonstrates using frame differencing with your OpenMV Cam to do -# motion detection. After motion is detected your OpenMV Cam will take video. - -import sensor, image, time, gif, pyb, os - -RED_LED_PIN = 1 -BLUE_LED_PIN = 3 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. - -if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory - -while(True): - - pyb.LED(RED_LED_PIN).on() - print("About to save background image...") - sensor.skip_frames(time = 2000) # Give the user time to get ready. - - pyb.LED(RED_LED_PIN).off() - sensor.snapshot().save("temp/bg.bmp") - print("Saved background image - Now detecting motion!") - pyb.LED(BLUE_LED_PIN).on() - - diff = 10 # We'll say we detected motion after 10 frames of motion. - while(diff): - img = sensor.snapshot() - img.difference("temp/bg.bmp") - stats = img.statistics() - # Stats 5 is the max of the lighting color channel. The below code - # triggers when the lighting max for the whole image goes above 20. - # The lighting difference maximum should be zero normally. - if (stats[5] > 20): - diff -= 1 - - g = gif.Gif("example-%d.gif" % pyb.rng(), loop=True) - - clock = time.clock() # Tracks FPS. - print("You're on camera!") - for i in range(100): - clock.tick() - # clock.avg() returns the milliseconds between frames - gif delay is in - g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds. - print(clock.fps()) - - g.close() - pyb.LED(BLUE_LED_PIN).off() - print("Restarting...") diff --git a/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/mjpeg_on_movement.py b/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/mjpeg_on_movement.py deleted file mode 100644 index f74b0a8ae..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/06-Video-Recording/mjpeg_on_movement.py +++ /dev/null @@ -1,58 +0,0 @@ -# MJPEG Video Recording on Movement Example -# -# Note: You will need an SD card to run this example. -# -# You can use your OpenMV Cam to record mjpeg files. You can either feed the -# recorder object JPEG frames or RGB565/Grayscale frames. Once you've finished -# recording a Mjpeg file you can use VLC to play it. If you are on Ubuntu then -# the built-in video player will work too. -# -# This example demonstrates using frame differencing with your OpenMV Cam to do -# motion detection. After motion is detected your OpenMV Cam will take video. - -import sensor, image, time, mjpeg, pyb, os - -RED_LED_PIN = 1 -BLUE_LED_PIN = 3 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. - -if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory - -while(True): - - pyb.LED(RED_LED_PIN).on() - print("About to save background image...") - sensor.skip_frames(time = 2000) # Give the user time to get ready. - - pyb.LED(RED_LED_PIN).off() - sensor.snapshot().save("temp/bg.bmp") - print("Saved background image - Now detecting motion!") - pyb.LED(BLUE_LED_PIN).on() - - diff = 10 # We'll say we detected motion after 10 frames of motion. - while(diff): - img = sensor.snapshot() - img.difference("temp/bg.bmp") - stats = img.statistics() - # Stats 5 is the max of the lighting color channel. The below code - # triggers when the lighting max for the whole image goes above 20. - # The lighting difference maximum should be zero normally. - if (stats[5] > 20): - diff -= 1 - - m = mjpeg.Mjpeg("example-%d.mjpeg" % pyb.rng()) - - clock = time.clock() # Tracks FPS. - print("You're on camera!") - for i in range(200): - clock.tick() - m.add_frame(sensor.snapshot()) - print(clock.fps()) - - m.close(clock.fps()) - pyb.LED(BLUE_LED_PIN).off() - print("Restarting...") diff --git a/scripts/examples/Arduino/Nicla-Vision/07-Face-Detection/face_detection.py b/scripts/examples/Arduino/Nicla-Vision/07-Face-Detection/face_detection.py deleted file mode 100644 index 7648ea0c7..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/07-Face-Detection/face_detection.py +++ /dev/null @@ -1,46 +0,0 @@ -# Face Detection Example -# -# This example shows off the built-in face detection feature of the OpenMV Cam. -# -# Face detection works by using the Haar Cascade feature detector on an image. A -# Haar Cascade is a series of simple area contrasts checks. For the built-in -# frontalface detector there are 25 stages of checks with each stage having -# hundreds of checks a piece. Haar Cascades run fast because later stages are -# only evaluated if previous stages pass. Additionally, your OpenMV Cam uses -# a data structure called the integral image to quickly execute each area -# contrast check in constant time (the reason for feature detection being -# grayscale only is because of the space requirment for the integral image). - -import sensor, time, image - -# Reset sensor -sensor.reset() -sensor.set_framesize(sensor.QVGA) -sensor.set_pixformat(sensor.GRAYSCALE) - -# Load Haar Cascade -# By default this will use all stages, lower satges is faster but less accurate. -face_cascade = image.HaarCascade("frontalface", stages=25) -print(face_cascade) - -# FPS clock -clock = time.clock() - -while (True): - clock.tick() - - # Capture snapshot - img = sensor.snapshot() - - # Find objects. - # Note: Lower scale factor scales-down the image more and detects smaller objects. - # Higher threshold results in a higher detection rate, with more false positives. - objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25) - - # Draw objects - for r in objects: - img.draw_rectangle(r) - - # Print FPS. - # Note: Actual FPS is higher, streaming the FB makes it slower. - print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/07-Face-Detection/face_tracking.py b/scripts/examples/Arduino/Nicla-Vision/07-Face-Detection/face_tracking.py deleted file mode 100644 index c3bc6e481..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/07-Face-Detection/face_tracking.py +++ /dev/null @@ -1,63 +0,0 @@ -# Face Tracking Example -# -# This example shows off using the keypoints feature of your OpenMV Cam to track -# a face after it has been detected by a Haar Cascade. The first part of this -# script finds a face in the image using the frontalface Haar Cascade. -# After which the script uses the keypoints feature to automatically learn your -# face and track it. Keypoints can be used to automatically track anything. -import sensor, time, image - -# Reset sensor -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) - -# Load Haar Cascade -# By default this will use all stages, lower satges is faster but less accurate. -face_cascade = image.HaarCascade("frontalface", stages=25) -print(face_cascade) - -# First set of keypoints -kpts1 = None - -# Find a face! -while (kpts1 == None): - img = sensor.snapshot() - img.draw_string(0, 0, "Looking for a face...") - # Find faces - objects = img.find_features(face_cascade, threshold=0.5, scale=1.25) - if objects: - # Expand the ROI by 31 pixels in every direction - face = (objects[0][0]-31, objects[0][1]-31,objects[0][2]+31*2, objects[0][3]+31*2) - # Extract keypoints using the detect face size as the ROI - kpts1 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, roi=face) - # Draw a rectangle around the first face - img.draw_rectangle(objects[0]) - -# Draw keypoints -print(kpts1) -img.draw_keypoints(kpts1, size=24) -img = sensor.snapshot() -time.sleep_ms(2000) - -# FPS clock -clock = time.clock() - -while (True): - clock.tick() - img = sensor.snapshot() - # Extract keypoints from the whole frame - kpts2 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, normalized=True) - - if (kpts2): - # Match the first set of keypoints with the second one - c=image.match_descriptor(kpts1, kpts2, threshold=85) - match = c[6] # C[6] contains the number of matches. - if (match>5): - img.draw_rectangle(c[2:6]) - img.draw_cross(c[0], c[1], size=10) - print(kpts2, "matched:%d dt:%d"%(match, c[7])) - - # Draw FPS - img.draw_string(0, 0, "FPS:%.2f"%(clock.fps())) diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/edges.py b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/edges.py deleted file mode 100644 index 6faa6651f..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/edges.py +++ /dev/null @@ -1,19 +0,0 @@ -# Edge detection with Canny: -# -# This example demonstrates the Canny edge detector. -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. - -clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - # Use Canny edge detector - img.find_edges(image.EDGE_CANNY, threshold=(50, 80)) - # Faster simpler edge detection - #img.find_edges(image.EDGE_SIMPLE, threshold=(100, 255)) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/hog.py b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/hog.py deleted file mode 100644 index 32fa2c5ab..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/hog.py +++ /dev/null @@ -1,25 +0,0 @@ -# Histogram of Oriented Gradients (HoG) Example -# -# This example demonstrates HoG visualization. -# -# Note: Due to JPEG artifacts, the HoG visualization looks blurry. To see the -# image without JPEG artifacts, uncomment the lines that save the image to uSD. - -import sensor, image, time - -sensor.reset() -sensor.set_framesize(sensor.QVGA) -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.skip_frames(time = 2000) - -clock = time.clock() # Tracks FPS. -while (True): - clock.tick() - img = sensor.snapshot() - img.find_hog() - - # Uncomment to save raw FB to file and exit the loop - #img.save("/hog.pgm") - #break - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/keypoints.py b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/keypoints.py deleted file mode 100644 index 1d2ef1be6..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/keypoints.py +++ /dev/null @@ -1,51 +0,0 @@ -# Object tracking with keypoints example. -# Show the camera an object and then run the script. A set of keypoints will be extracted -# once and then tracked in the following frames. If you want a new set of keypoints re-run -# the script. NOTE: see the docs for arguments to tune find_keypoints and match_keypoints. -import sensor, time, image - -# Reset sensor -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) - -def draw_keypoints(img, kpts): - if kpts: - print(kpts) - img.draw_keypoints(kpts) - img = sensor.snapshot() - time.sleep_ms(1000) - -kpts1 = None -# NOTE: uncomment to load a keypoints descriptor from file -#kpts1 = image.load_descriptor("/desc.orb") -#img = sensor.snapshot() -#draw_keypoints(img, kpts1) - -clock = time.clock() -while (True): - clock.tick() - img = sensor.snapshot() - if (kpts1 == None): - # NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid. - kpts1 = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2) - draw_keypoints(img, kpts1) - else: - # NOTE: When extracting keypoints to match the first descriptor, we use normalized=True to extract - # keypoints from the first scale only, which will match one of the scales in the first descriptor. - kpts2 = img.find_keypoints(max_keypoints=150, threshold=10, normalized=True) - if (kpts2): - match = image.match_descriptor(kpts1, kpts2, threshold=85) - if (match.count()>10): - # If we have at least n "good matches" - # Draw bounding rectangle and cross. - img.draw_rectangle(match.rect()) - img.draw_cross(match.cx(), match.cy(), size=10) - - print(kpts2, "matched:%d dt:%d"%(match.count(), match.theta())) - # NOTE: uncomment if you want to draw the keypoints - #img.draw_keypoints(kpts2, size=KEYPOINTS_SIZE, matched=True) - - # Draw FPS - img.draw_string(0, 0, "FPS:%.2f"%(clock.fps())) diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/keypoints_save.py b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/keypoints_save.py deleted file mode 100644 index f5436f79d..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/keypoints_save.py +++ /dev/null @@ -1,30 +0,0 @@ -# Keypoints descriptor example. -# This example shows how to save a keypoints descriptor to file. Show the camera an object -# and then run the script. The script will extract and save a keypoints descriptor and the image. -# You can use the keypoints_editor.py util to remove unwanted keypoints. -# -# NOTE: Please reset the camera after running this script to see the new file. -import sensor, time, image - -# Reset sensor -sensor.reset() -sensor.set_framesize(sensor.QVGA) -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.skip_frames(time = 2000) - -FILE_NAME = "desc" -img = sensor.snapshot() -# NOTE: See the docs for other arguments -# NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid. -kpts = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2) - -if (kpts == None): - raise(Exception("Couldn't find any keypoints!")) - -image.save_descriptor(kpts, "/%s.orb"%(FILE_NAME)) -img.save("/%s.pgm"%(FILE_NAME)) - -img.draw_keypoints(kpts) -sensor.snapshot() -time.sleep_ms(1000) -raise(Exception("Done! Please reset the camera")) diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/lbp.py b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/lbp.py deleted file mode 100644 index 3894285c5..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/lbp.py +++ /dev/null @@ -1,49 +0,0 @@ -# Local Binary Patterns (LBP) Example -# -# This example shows off how to use the local binary pattern feature descriptor -# on your OpenMV Cam. LBP descriptors work like Freak feature descriptors. -# -# WARNING: LBP supports needs to be reworked! As of right now this feature needs -# a lot of work to be made into somethin useful. This script will reamin to show -# that the functionality exists, but, in its current state is inadequate. - -import sensor, time, image -sensor.reset() - -# Reset sensor -sensor.reset() -sensor.set_framesize(sensor.HQVGA) -sensor.set_pixformat(sensor.GRAYSCALE) - -# Load Haar Cascade -# By default this will use all stages, lower satges is faster but less accurate. -face_cascade = image.HaarCascade("frontalface", stages=25) -print(face_cascade) - -# Skip a few frames to allow the sensor settle down -# Note: This takes more time when exec from the IDE. -for i in range(0, 30): - img = sensor.snapshot() - img.draw_string(0, 0, "Please wait...") - -d0 = None -#d0 = image.load_descriptor("/desc.lbp") -clock = time.clock() - -while (True): - clock.tick() - img = sensor.snapshot() - - objects = img.find_features(face_cascade, threshold=0.5, scale_factor=1.25) - if objects: - face = objects[0] - d1 = img.find_lbp(face) - if (d0 == None): - d0 = d1 - else: - dist = image.match_descriptor(d0, d1) - img.draw_string(0, 10, "Match %d%%"%(dist)) - - img.draw_rectangle(face) - # Draw FPS - img.draw_string(0, 0, "FPS:%.2f"%(clock.fps())) diff --git a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/template_matching.py b/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/template_matching.py deleted file mode 100644 index e33f760bf..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/09-Feature-Detection/template_matching.py +++ /dev/null @@ -1,44 +0,0 @@ -# Template Matching Example - Normalized Cross Correlation (NCC) -# -# This example shows off how to use the NCC feature of your OpenMV Cam to match -# image patches to parts of an image... expect for extremely controlled enviorments -# NCC is not all to useful. -# -# WARNING: NCC supports needs to be reworked! As of right now this feature needs -# a lot of work to be made into somethin useful. This script will reamin to show -# that the functionality exists, but, in its current state is inadequate. - -import time, sensor, image -from image import SEARCH_EX, SEARCH_DS - -# Reset sensor -sensor.reset() -# Max resolution for template matching with SEARCH_EX is QQVGA -sensor.set_framesize(sensor.QQVGA) -# You can set windowing to reduce the search image. -#sensor.set_windowing(((640-80)//2, (480-60)//2, 80, 60)) -sensor.set_pixformat(sensor.GRAYSCALE) - -# Load template. -# Template should be a small (eg. 32x32 pixels) grayscale image. -template = image.Image("/template.pgm") - -clock = time.clock() - -# Run template matching -while (True): - clock.tick() - img = sensor.snapshot() - - # find_template(template, threshold, [roi, step, search]) - # ROI: The region of interest tuple (x, y, w, h). - # Step: The loop step used (y+=step, x+=step) use a bigger step to make it faster. - # Search is either image.SEARCH_EX for exhaustive search or image.SEARCH_DS for diamond search - # - # Note1: ROI has to be smaller than the image and bigger than the template. - # Note2: In diamond search, step and ROI are both ignored. - r = img.find_template(template, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) - if r: - img.draw_rectangle(r) - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/16-Codes/find_barcodes.py b/scripts/examples/Arduino/Nicla-Vision/16-Codes/find_barcodes.py deleted file mode 100644 index 241b5560a..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/16-Codes/find_barcodes.py +++ /dev/null @@ -1,63 +0,0 @@ -# Barcode Example -# -# This example shows off how easy it is to detect bar codes using the -# OpenMV Cam M7. Barcode detection does not work on the M4 Camera. - -import sensor, image, time, math - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.VGA) -sensor.set_windowing((640, 80)) # V Res of 80 == less work (40 for 2X the speed). -sensor.skip_frames(time = 2000) -clock = time.clock() - -# Barcode detection can run at the full 640x480 resolution of your OpenMV Cam's -# OV7725 camera module. Barcode detection will also work in RGB565 mode but at -# a lower resolution. That said, barcode detection requires a higher resolution -# to work well so it should always be run at 640x480 in grayscale... - -def barcode_name(code): - if(code.type() == image.EAN2): - return "EAN2" - if(code.type() == image.EAN5): - return "EAN5" - if(code.type() == image.EAN8): - return "EAN8" - if(code.type() == image.UPCE): - return "UPCE" - if(code.type() == image.ISBN10): - return "ISBN10" - if(code.type() == image.UPCA): - return "UPCA" - if(code.type() == image.EAN13): - return "EAN13" - if(code.type() == image.ISBN13): - return "ISBN13" - if(code.type() == image.I25): - return "I25" - if(code.type() == image.DATABAR): - return "DATABAR" - if(code.type() == image.DATABAR_EXP): - return "DATABAR_EXP" - if(code.type() == image.CODABAR): - return "CODABAR" - if(code.type() == image.CODE39): - return "CODE39" - if(code.type() == image.PDF417): - return "PDF417" - if(code.type() == image.CODE93): - return "CODE93" - if(code.type() == image.CODE128): - return "CODE128" - -while(True): - clock.tick() - img = sensor.snapshot() - codes = img.find_barcodes() - for code in codes: - img.draw_rectangle(code.rect()) - print_args = (barcode_name(code), code.payload(), (180 * code.rotation()) / math.pi, code.quality(), clock.fps()) - print("Barcode %s, Payload \"%s\", rotation %f (degrees), quality %d, FPS %f" % print_args) - if not codes: - print("FPS %f" % clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/16-Codes/find_datamatrices.py b/scripts/examples/Arduino/Nicla-Vision/16-Codes/find_datamatrices.py deleted file mode 100644 index 5a6cd8714..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/16-Codes/find_datamatrices.py +++ /dev/null @@ -1,25 +0,0 @@ -# Find Data Matrices Example -# -# This example shows off how easy it is to detect data matrices using the -# OpenMV Cam M7. Data matrices detection does not work on the M4 Camera. - -import sensor, image, time, math - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - img = sensor.snapshot() - img.lens_corr(1.8) # strength of 1.8 is good for the 2.8mm lens. - - matrices = img.find_datamatrices() - for matrix in matrices: - img.draw_rectangle(matrix.rect(), color = (255, 0, 0)) - print_args = (matrix.rows(), matrix.columns(), matrix.payload(), (180 * matrix.rotation()) / math.pi, clock.fps()) - print("Matrix [%d:%d], Payload \"%s\", rotation %f (degrees), FPS %f" % print_args) - if not matrices: - print("FPS %f" % clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/16-Codes/find_datamatrices_w_lens_zoom.py b/scripts/examples/Arduino/Nicla-Vision/16-Codes/find_datamatrices_w_lens_zoom.py deleted file mode 100644 index bce6bc8d3..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/16-Codes/find_datamatrices_w_lens_zoom.py +++ /dev/null @@ -1,25 +0,0 @@ -# Find Data Matrices w/ Lens Zoom Example -# -# This example shows off how easy it is to detect data matrices using the -# OpenMV Cam M7. Data matrices detection does not work on the M4 Camera. - -import sensor, image, time, math - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.VGA) -sensor.set_windowing((320, 240)) # 2x Zoom -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - img = sensor.snapshot() - - matrices = img.find_datamatrices() - for matrix in matrices: - img.draw_rectangle(matrix.rect(), color = (255, 0, 0)) - print_args = (matrix.rows(), matrix.columns(), matrix.payload(), (180 * matrix.rotation()) / math.pi, clock.fps()) - print("Matrix [%d:%d], Payload \"%s\", rotation %f (degrees), FPS %f" % print_args) - if not matrices: - print("FPS %f" % clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/16-Codes/qrcodes_with_lens_corr.py b/scripts/examples/Arduino/Nicla-Vision/16-Codes/qrcodes_with_lens_corr.py deleted file mode 100644 index 55899e509..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/16-Codes/qrcodes_with_lens_corr.py +++ /dev/null @@ -1,21 +0,0 @@ -# QRCode Example -# -# This example shows the power of the OpenMV Cam to detect QR Codes -# using lens correction (see the qrcodes_with_lens_corr.py script for higher performance). - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - img = sensor.snapshot() - img.lens_corr(1.8) # strength of 1.8 is good for the 2.8mm lens. - for code in img.find_qrcodes(): - img.draw_rectangle(code.rect(), color = (255, 0, 0)) - print(code) - print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/16-Codes/qrcodes_with_lens_zoom.py b/scripts/examples/Arduino/Nicla-Vision/16-Codes/qrcodes_with_lens_zoom.py deleted file mode 100644 index 5fbb51d2a..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/16-Codes/qrcodes_with_lens_zoom.py +++ /dev/null @@ -1,21 +0,0 @@ -# QRCode Example -# -# This example shows the power of the OpenMV Cam to detect QR Codes -# without needing lens correction. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.VGA) -sensor.set_windowing((240, 240)) # look at center 240x240 pixels of the VGA resolution. -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - img = sensor.snapshot() - for code in img.find_qrcodes(): - img.draw_rectangle(code.rect(), color = 127) - print(code) - print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/in_memory_basic_frame_differencing.py b/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/in_memory_basic_frame_differencing.py deleted file mode 100644 index 138955f86..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/in_memory_basic_frame_differencing.py +++ /dev/null @@ -1,46 +0,0 @@ -# In Memory Basic Frame Differencing Example -# -# This example demonstrates using frame differencing with your OpenMV Cam. It's -# called basic frame differencing because there's no background image update. -# So, as time passes the background image may change resulting in issues. - -import sensor, image, pyb, os, time - -TRIGGER_THRESHOLD = 5 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -# Take from the main frame buffer's RAM to allocate a second frame buffer. -# There's a lot more RAM in the frame buffer than in the MicroPython heap. -# However, after doing this you have a lot less RAM for some algorithms... -# So, be aware that it's a lot easier to get out of RAM issues now. However, -# frame differencing doesn't use a lot of the extra space in the frame buffer. -# But, things like AprilTags do and won't work if you do this... -extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) - -print("About to save background image...") -sensor.skip_frames(time = 2000) # Give the user time to get ready. -extra_fb.replace(sensor.snapshot()) -print("Saved background image - Now frame differencing!") - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # Replace the image with the "abs(NEW-OLD)" frame difference. - img.difference(extra_fb) - - hist = img.get_histogram() - # This code below works by comparing the 99th percentile value (e.g. the - # non-outlier max value against the 90th percentile value (e.g. a non-max - # value. The difference between the two values will grow as the difference - # image seems more pixels change. - diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value() - triggered = diff > TRIGGER_THRESHOLD - - print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/in_memory_structural_similarity.py b/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/in_memory_structural_similarity.py deleted file mode 100644 index 53a32c4ff..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/in_memory_structural_similarity.py +++ /dev/null @@ -1,38 +0,0 @@ -# Structural Similarity (SSIM) Example -# -# This example shows off how to use the SSIM algorithm on your OpenMV Cam -# to detect differences between two images. The SSIM algorithm compares -# 8x8 blocks of pixels between two images to determine a similarity -# score between two images. - -import sensor, image, pyb, os, time - -# The image has likely changed if the sim.min() is lower than this. -MIN_TRIGGER_THRESHOLD = -0.4 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -# Take from the main frame buffer's RAM to allocate a second frame buffer. -# There's a lot more RAM in the frame buffer than in the MicroPython heap. -# However, after doing this you have a lot less RAM for some algorithms... -# So, be aware that it's a lot easier to get out of RAM issues now. However, -# frame differencing doesn't use a lot of the extra space in the frame buffer. -# But, things like AprilTags do and won't work if you do this... -extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) - -print("About to save background image...") -sensor.skip_frames(time = 2000) # Give the user time to get ready. -extra_fb.replace(sensor.snapshot()) -print("Saved background image!") - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - sim = img.get_similarity(extra_fb) - change = "- Change -" if sim.min() < MIN_TRIGGER_THRESHOLD else "- No Change -" - - print(clock.fps(), change, sim) diff --git a/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/on_disk_basic_frame_differencing.py b/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/on_disk_basic_frame_differencing.py deleted file mode 100644 index a168a0c50..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/on_disk_basic_frame_differencing.py +++ /dev/null @@ -1,42 +0,0 @@ -# Basic Frame Differencing Example -# -# Note: You will need an SD card to run this example. -# -# This example demonstrates using frame differencing with your OpenMV Cam. It's -# called basic frame differencing because there's no background image update. -# So, as time passes the background image may change resulting in issues. - -import sensor, image, pyb, os, time - -TRIGGER_THRESHOLD = 5 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory - -print("About to save background image...") -sensor.skip_frames(time = 2000) # Give the user time to get ready. -sensor.snapshot().save("temp/bg.bmp") -print("Saved background image - Now frame differencing!") - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # Replace the image with the "abs(NEW-OLD)" frame difference. - img.difference("temp/bg.bmp") - - hist = img.get_histogram() - # This code below works by comparing the 99th percentile value (e.g. the - # non-outlier max value against the 90th percentile value (e.g. a non-max - # value. The difference between the two values will grow as the difference - # image seems more pixels change. - diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value() - triggered = diff > TRIGGER_THRESHOLD - - print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/on_disk_structural_similarity.py b/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/on_disk_structural_similarity.py deleted file mode 100644 index bff55ecc7..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/20-Frame-Differencing/on_disk_structural_similarity.py +++ /dev/null @@ -1,34 +0,0 @@ -# Structural Similarity (SSIM) Example -# -# Note: You will need an SD card to run this example. -# -# This example shows off how to use the SSIM algorithm on your OpenMV Cam -# to detect differences between two images. The SSIM algorithm compares -# 8x8 blocks of pixels between two images to determine a similarity -# score between two images. - -import sensor, image, pyb, os, time - -# The image has likely changed if the sim.min() is lower than this. -MIN_TRIGGER_THRESHOLD = -0.4 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory - -print("About to save background image...") -sensor.skip_frames(time = 2000) # Give the user time to get ready. -sensor.snapshot().save("temp/bg.bmp") -print("Saved background image!") - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - sim = img.get_similarity("temp/bg.bmp") - change = "- Change -" if sim.min() < MIN_TRIGGER_THRESHOLD else "- No Change -" - - print(clock.fps(), change, sim) diff --git a/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/nn_stm32cubeai.py b/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/nn_stm32cubeai.py deleted file mode 100644 index a1e260969..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/nn_stm32cubeai.py +++ /dev/null @@ -1,34 +0,0 @@ -# STM32 CUBE.AI on OpenMV MNIST Example -# See https://github.com/openmv/openmv/blob/master/src/stm32cubeai/README.MD - -import sensor, image, time, nn_st - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to Grayscale -sensor.set_framesize(sensor.QQQVGA) # Set frame size to 80x60 -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -# [CUBE.AI] Initialize the network -net = nn_st.loadnnst('network') - -nn_input_sz = 28 # The NN input is 28x28 - -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - - # Crop in the middle (avoids vignetting) - img.crop((img.width()//2-nn_input_sz//2, - img.height()//2-nn_input_sz//2, - nn_input_sz, - nn_input_sz)) - - # Binarize the image - img.midpoint(2, bias=0.5, threshold=True, offset=5, invert=True) - - # [CUBE.AI] Run the inference - out = net.predict(img) - print('Network argmax output: {}'.format( out.index(max(out)) )) - img.draw_string(0, 0, str(out.index(max(out)))) - print('FPS {}'.format(clock.fps())) # Note: OpenMV Cam runs about half as fast when connected diff --git a/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_face_collection.py b/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_face_collection.py deleted file mode 100644 index 7ae684ded..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_face_collection.py +++ /dev/null @@ -1,31 +0,0 @@ -# Face Collection -# -# Use this script to gather face images for building a TensorFlow dataset. This script automatically -# zooms in the largest face in the field of view which you can then save using the data set editor. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) - -clock = time.clock() - -largest_face = None -largest_face_timeout = 0 - -while(True): - clock.tick() - - faces = sensor.snapshot().gamma_corr(contrast=1.5).find_features(image.HaarCascade("frontalface")) - - if faces: - largest_face = max(faces, key = lambda f: f[2] * f[3]) - largest_face_timeout = 20 - - if largest_face_timeout > 0: - sensor.get_fb().crop(roi=largest_face) - largest_face_timeout -= 1 - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_face_recognition.py b/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_face_recognition.py deleted file mode 100644 index b31bc16b9..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_face_recognition.py +++ /dev/null @@ -1,41 +0,0 @@ -# Face Recognition -# -# Use this script to run a TensorFlow lite image classifier on faces detected within an image. -# The classifier is free to do facial recognition, expression detection, or whatever. - -import sensor, image, time, tf - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) - -clock = time.clock() - -net = tf.load("trained.tflite", load_to_fb=True) -labels = [l.rstrip('\n') for l in open("labels.txt")] - -while(True): - clock.tick() - - # Take a picture and brighten things up for the frontal face detector. - img = sensor.snapshot().gamma_corr(contrast=1.5) - - # Returns a list of rects (x, y, w, h) where faces are. - faces = img.find_features(image.HaarCascade("frontalface")) - - for f in faces: - - # Classify a face and get the class scores list - scores = net.classify(img, roi=f)[0].output() - - # Find the highest class score and lookup the label for that - label = labels[scores.index(max(scores))] - - # Draw a box around the face - img.draw_rectangle(f) - - # Draw the label above the face - img.draw_string(f[0]+3, f[1]-1, label, mono_space=False) - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_mobilenet_search_just_center.py b/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_mobilenet_search_just_center.py deleted file mode 100644 index e8f12f924..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_mobilenet_search_just_center.py +++ /dev/null @@ -1,66 +0,0 @@ -# TensorFlow Lite Mobilenet V1 Example -# -# Google's Mobilenet V1 detects 1000 classes of objects -# -# WARNING: Mobilenet is trained on ImageNet and isn't meant to classify anything -# in the real world. It's just designed to score well on the ImageNet dataset. -# This example just shows off running mobilenet on the OpenMV Cam. However, the -# default model is not really usable for anything. You have to use transfer -# learning to apply the model to a target problem by re-training the model. -# -# NOTE: This example only works on the OpenMV Cam H7 Pro (that has SDRAM) and better! -# To get the models please see the CNN Network library in OpenMV IDE under -# Tools -> Machine Vision. The labels are there too. -# You should insert a microSD card into your camera and copy-paste the mobilenet_labels.txt -# file and your chosen model into the root folder for ths script to work. -# -# In this example we slide the detector window over the image and get a list -# of activations. Note that use a CNN with a sliding window is extremely compute -# expensive so for an exhaustive search do not expect the CNN to be real-time. - -import sensor, image, time, os, tf - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.set_windowing((240, 240)) # Set 240x240 window. -sensor.skip_frames(time=2000) # Let the camera adjust. - -mobilenet_version = "1" # 1 -mobilenet_width = "0.5" # 1.0, 0.75, 0.50, 0.25 -mobilenet_resolution = "128" # 224, 192, 160, 128 - -mobilenet = "mobilenet_v%s_%s_%s_quant.tflite" % (mobilenet_version, mobilenet_width, mobilenet_resolution) -labels = [line.rstrip('\n') for line in open("mobilenet_labels.txt")] - -clock = time.clock() -while(True): - clock.tick() - - img = sensor.snapshot() - - # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not - # specified). A classification score output vector will be generated for each location. At each scale the - # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. - # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note - # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after - # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) - # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. - # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... - - # Setting x_overlap=-1 forces the window to stay centered in the ROI in the x direction always. If - # y_overlap is not -1 the method will search in all vertical positions. - - # Setting y_overlap=-1 forces the window to stay centered in the ROI in the y direction always. If - # x_overlap is not -1 the method will serach in all horizontal positions. - - # default settings just do one detection... change them to search the image... - for obj in tf.classify(mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=-1, y_overlap=-1): - print("**********\nTop 5 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) - img.draw_rectangle(obj.rect()) - # This combines the labels and confidence values into a list of tuples - # and then sorts that list by the confidence values. - sorted_list = sorted(zip(labels, obj.output()), key = lambda x: x[1], reverse = True) - for i in range(5): - print("%s = %f" % (sorted_list[i][0], sorted_list[i][1])) - print(clock.fps(), "fps") diff --git a/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_mobilenet_search_whole_window.py b/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_mobilenet_search_whole_window.py deleted file mode 100644 index da7869c31..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/25-Machine-Learning/tf_mobilenet_search_whole_window.py +++ /dev/null @@ -1,60 +0,0 @@ -# TensorFlow Lite Mobilenet V1 Example -# -# Google's Mobilenet V1 detects 1000 classes of objects -# -# WARNING: Mobilenet is trained on ImageNet and isn't meant to classify anything -# in the real world. It's just designed to score well on the ImageNet dataset. -# This example just shows off running mobilenet on the OpenMV Cam. However, the -# default model is not really usable for anything. You have to use transfer -# learning to apply the model to a target problem by re-training the model. -# -# NOTE: This example only works on the OpenMV Cam H7 Pro (that has SDRAM) and better! -# To get the models please see the CNN Network library in OpenMV IDE under -# Tools -> Machine Vision. The labels are there too. -# You should insert a microSD card into your camera and copy-paste the mobilenet_labels.txt -# file and your chosen model into the root folder for ths script to work. -# -# In this example we slide the detector window over the image and get a list -# of activations. Note that use a CNN with a sliding window is extremely compute -# expensive so for an exhaustive search do not expect the CNN to be real-time. - -import sensor, image, time, os, tf - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.set_windowing((240, 240)) # Set 240x240 window. -sensor.skip_frames(time=2000) # Let the camera adjust. - -mobilenet_version = "1" # 1 -mobilenet_width = "0.5" # 1.0, 0.75, 0.50, 0.25 -mobilenet_resolution = "128" # 224, 192, 160, 128 - -mobilenet = "mobilenet_v%s_%s_%s_quant.tflite" % (mobilenet_version, mobilenet_width, mobilenet_resolution) -labels = [line.rstrip('\n') for line in open("mobilenet_labels.txt")] - -clock = time.clock() -while(True): - clock.tick() - - img = sensor.snapshot() - - # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not - # specified). A classification score output vector will be generated for each location. At each scale the - # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. - # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note - # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after - # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) - # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. - # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... - - # default settings just do one detection... change them to search the image... - for obj in tf.classify(mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0): - print("**********\nTop 5 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) - img.draw_rectangle(obj.rect()) - # This combines the labels and confidence values into a list of tuples - # and then sorts that list by the confidence values. - sorted_list = sorted(zip(labels, obj.output()), key = lambda x: x[1], reverse = True) - for i in range(5): - print("%s = %f" % (sorted_list[i][0], sorted_list[i][1])) - print(clock.fps(), "fps") diff --git a/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags.py b/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags.py deleted file mode 100644 index bbb274f81..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags.py +++ /dev/null @@ -1,55 +0,0 @@ -# AprilTags Example -# -# This example shows the power of the OpenMV Cam to detect April Tags -# on the OpenMV Cam M7. The M4 versions cannot detect April Tags. - -import sensor, image, time, math - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QQVGA) # we run out of memory if the resolution is much bigger... -sensor.skip_frames(time = 2000) -clock = time.clock() - -# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work. - -# The apriltag code supports up to 6 tag families which can be processed at the same time. -# Returned tag objects will have their tag family and id within the tag family. - -tag_families = 0 -tag_families |= image.TAG16H5 # comment out to disable this family -tag_families |= image.TAG25H7 # comment out to disable this family -tag_families |= image.TAG25H9 # comment out to disable this family -tag_families |= image.TAG36H10 # comment out to disable this family -tag_families |= image.TAG36H11 # comment out to disable this family (default family) -tag_families |= image.ARTOOLKIT # comment out to disable this family - -# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively -# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which -# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve -# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a -# reason to use the other tags families just use TAG36H11 which is the default family. - -def family_name(tag): - if(tag.family() == image.TAG16H5): - return "TAG16H5" - if(tag.family() == image.TAG25H7): - return "TAG25H7" - if(tag.family() == image.TAG25H9): - return "TAG25H9" - if(tag.family() == image.TAG36H10): - return "TAG36H10" - if(tag.family() == image.TAG36H11): - return "TAG36H11" - if(tag.family() == image.ARTOOLKIT): - return "ARTOOLKIT" - -while(True): - clock.tick() - img = sensor.snapshot() - for tag in img.find_apriltags(families=tag_families): # defaults to TAG36H11 without "families". - img.draw_rectangle(tag.rect(), color = (255, 0, 0)) - img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0)) - print_args = (family_name(tag), tag.id(), (180 * tag.rotation()) / math.pi) - print("Tag Family %s, Tag ID %d, rotation %f (degrees)" % print_args) - print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_3d_pose.py b/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_3d_pose.py deleted file mode 100644 index c4cb289d6..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_3d_pose.py +++ /dev/null @@ -1,55 +0,0 @@ -# AprilTags Example -# -# This example shows the power of the OpenMV Cam to detect April Tags -# on the OpenMV Cam M7. The M4 versions cannot detect April Tags. - -import sensor, image, time, math - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QQVGA) # we run out of memory if the resolution is much bigger... -sensor.skip_frames(time = 2000) -clock = time.clock() - -# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work. - -# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively -# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which -# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve -# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a -# reason to use the other tags families just use TAG36H11 which is the default family. - -# The AprilTags library outputs the pose information for tags. This is the x/y/z translation and -# x/y/z rotation. The x/y/z rotation is in radians and can be converted to degrees. As for -# translation the units are dimensionless and you must apply a conversion function. - -# f_x is the x focal length of the camera. It should be equal to the lens focal length in mm -# divided by the x sensor size in mm times the number of pixels in the image. -# The below values are for the OV7725 camera with a 2.8 mm lens. - -# f_y is the y focal length of the camera. It should be equal to the lens focal length in mm -# divided by the y sensor size in mm times the number of pixels in the image. -# The below values are for the OV7725 camera with a 2.8 mm lens. - -# c_x is the image x center position in pixels. -# c_y is the image y center position in pixels. - -f_x = (2.8 / 3.984) * 160 # find_apriltags defaults to this if not set -f_y = (2.8 / 2.952) * 120 # find_apriltags defaults to this if not set -c_x = 160 * 0.5 # find_apriltags defaults to this if not set (the image.w * 0.5) -c_y = 120 * 0.5 # find_apriltags defaults to this if not set (the image.h * 0.5) - -def degrees(radians): - return (180 * radians) / math.pi - -while(True): - clock.tick() - img = sensor.snapshot() - for tag in img.find_apriltags(fx=f_x, fy=f_y, cx=c_x, cy=c_y): # defaults to TAG36H11 - img.draw_rectangle(tag.rect(), color = (255, 0, 0)) - img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0)) - print_args = (tag.x_translation(), tag.y_translation(), tag.z_translation(), \ - degrees(tag.x_rotation()), degrees(tag.y_rotation()), degrees(tag.z_rotation())) - # Translation units are unknown. Rotation units are in degrees. - print("Tx: %f, Ty %f, Tz %f, Rx %f, Ry %f, Rz %f" % print_args) - print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_max_res.py b/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_max_res.py deleted file mode 100644 index 106650770..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_max_res.py +++ /dev/null @@ -1,56 +0,0 @@ -# AprilTags Max Res Example -# -# This example shows the power of the OpenMV Cam to detect April Tags -# on the OpenMV Cam M7. The M4 versions cannot detect April Tags. - -import sensor, image, time, math, omv - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.VGA) -sensor.set_windowing((240, 240)) -sensor.skip_frames(time = 2000) -clock = time.clock() - -# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work. - -# The apriltag code supports up to 6 tag families which can be processed at the same time. -# Returned tag objects will have their tag family and id within the tag family. - -tag_families = 0 -tag_families |= image.TAG16H5 # comment out to disable this family -tag_families |= image.TAG25H7 # comment out to disable this family -tag_families |= image.TAG25H9 # comment out to disable this family -tag_families |= image.TAG36H10 # comment out to disable this family -tag_families |= image.TAG36H11 # comment out to disable this family (default family) -tag_families |= image.ARTOOLKIT # comment out to disable this family - -# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively -# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which -# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve -# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a -# reason to use the other tags families just use TAG36H11 which is the default family. - -def family_name(tag): - if(tag.family() == image.TAG16H5): - return "TAG16H5" - if(tag.family() == image.TAG25H7): - return "TAG25H7" - if(tag.family() == image.TAG25H9): - return "TAG25H9" - if(tag.family() == image.TAG36H10): - return "TAG36H10" - if(tag.family() == image.TAG36H11): - return "TAG36H11" - if(tag.family() == image.ARTOOLKIT): - return "ARTOOLKIT" - -while(True): - clock.tick() - img = sensor.snapshot() - for tag in img.find_apriltags(families=tag_families): # defaults to TAG36H11 without "families". - img.draw_rectangle(tag.rect(), color = 127) - img.draw_cross(tag.cx(), tag.cy(), color = 127) - print_args = (family_name(tag), tag.id(), (180 * tag.rotation()) / math.pi) - print("Tag Family %s, Tag ID %d, rotation %f (degrees)" % print_args) - print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_w_lens_zoom.py b/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_w_lens_zoom.py deleted file mode 100644 index b15b05f4c..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_apriltags_w_lens_zoom.py +++ /dev/null @@ -1,31 +0,0 @@ -# AprilTags Example -# -# This example shows the power of the OpenMV Cam to detect April Tags -# on the OpenMV Cam M7. The M4 versions cannot detect April Tags. - -import sensor, image, time, math - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.VGA) # we run out of memory if the resolution is much bigger... -sensor.set_windowing((160, 120)) # Look at center 160x120 pixels of the VGA resolution. -sensor.skip_frames(time = 2000) -clock = time.clock() - -# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work. - -# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively -# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which -# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve -# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a -# reason to use the other tags families just use TAG36H11 which is the default family. - -while(True): - clock.tick() - img = sensor.snapshot() - for tag in img.find_apriltags(): # defaults to TAG36H11 - img.draw_rectangle(tag.rect(), color = (255, 0, 0)) - img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0)) - print_args = (tag.id(), (180 * tag.rotation()) / math.pi) - print("Tag Family TAG36H11, Tag ID %d, rotation %f (degrees)" % print_args) - print(clock.fps()) diff --git a/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_small_apriltags.py b/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_small_apriltags.py deleted file mode 100644 index d89db5046..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/26-April-Tags/find_small_apriltags.py +++ /dev/null @@ -1,67 +0,0 @@ -# Find Small Apriltags -# -# This script shows off how to use blob tracking as a pre-filter to -# finding Apriltags in the image using blob tracking to find the -# area of where the tag is first and then calling find_apriltags -# on that blob. - -# Note, this script works well assuming most parts of the image do not -# pass the thresholding test... otherwise, you don't get a distance -# benefit. - -import sensor, image, time, math, omv - -# Set the thresholds to find a white object (i.e. tag border) -thresholds = (150, 255) - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 200) # increase this to let the auto methods run for longer -clock = time.clock() - -# The apriltag code supports up to 6 tag families which can be processed at the same time. -# Returned tag objects will have their tag family and id within the tag family. -tag_families = 0 -tag_families |= image.TAG16H5 # comment out to disable this family -tag_families |= image.TAG25H7 # comment out to disable this family -tag_families |= image.TAG25H9 # comment out to disable this family -tag_families |= image.TAG36H10 # comment out to disable this family -tag_families |= image.TAG36H11 # comment out to disable this family (default family) -tag_families |= image.ARTOOLKIT # comment out to disable this family - -while(True): - clock.tick() - img = sensor.snapshot() - - # First, we find blobs that may be candidates for tags. - box_list = [] - - # AprilTags may fail due to not having enough ram given the image sie being passed. - tag_list = [] - - for blob in img.find_blobs([thresholds], pixels_threshold=100, area_threshold=100, merge=True): - # Next we look for a tag in an ROI that's bigger than the blob. - w = min(max(int(blob.w() * 1.2), 10), 160) # Not too small, not too big. - h = min(max(int(blob.h() * 1.2), 10), 160) # Not too small, not too big. - x = min(max(int(blob.x() + (blob.w()/4) - (w * 0.1)), 0), img.width()-1) - y = min(max(int(blob.y() + (blob.h()/4) - (h * 0.1)), 0), img.height()-1) - - box_list.append((x, y, w, h)) # We'll draw these later. - - # Since we constrict the roi size apriltags shouldn't run out of ram. - # But, if it does we handle it... - try: - tag_list.extend(img.find_apriltags(roi=(x,y,w,h), families=tag_families)) - except (MemoryError): # Don't catch all exceptions otherwise you can't stop the script. - pass - - for b in box_list: - img.draw_rectangle(b) - # Now print out the found tags - for tag in tag_list: - img.draw_rectangle(tag.rect()) - img.draw_cross(tag.cx(), tag.cy()) - for c in tag.corners(): - img.draw_circle(c[0], c[1], 5) - print("Tag:", tag.cx(), tag.cy(), tag.rotation(), tag.id()) diff --git a/scripts/examples/Arduino/Nicla-Vision/34-Remote-Control/popular_features_as_the_remote_device.py b/scripts/examples/Arduino/Nicla-Vision/34-Remote-Control/popular_features_as_the_remote_device.py deleted file mode 100644 index 583eb78fa..000000000 --- a/scripts/examples/Arduino/Nicla-Vision/34-Remote-Control/popular_features_as_the_remote_device.py +++ /dev/null @@ -1,271 +0,0 @@ -# Remote Control - As The Remote Device -# -# This script configures your OpenMV Cam as a co-processor that can be remotely controlled by -# another microcontroller or computer such as an Arduino, ESP8266/ESP32, RaspberryPi, and -# even another OpenMV Cam. -# -# This script is designed to pair with "popular_features_as_the_controller_device.py". - -import image, network, math, rpc, sensor, struct, tf - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) - -# The RPC library above is installed on your OpenMV Cam and provides mutliple classes for -# allowing your OpenMV Cam to be controlled over CAN, I2C, SPI, UART, USB VCP, or LAN/WLAN. - -################################################################ -# Choose the interface you wish to control your OpenMV Cam over. -################################################################ - -# Uncomment the below line to setup your OpenMV Cam for control over CAN. -# -# * message_id - CAN message to use for data transport on the can bus (11-bit). -# * bit_rate - CAN bit rate. -# * sample_point - Tseg1/Tseg2 ratio. Typically 75%. (50.0, 62.5, 75, 87.5, etc.) -# -# NOTE: Master and slave message ids and can bit rates must match. Connect master can high to slave -# can high and master can low to slave can lo. The can bus must be terminated with 120 ohms. -# -# interface = rpc.rpc_can_slave(message_id=0x7FF, bit_rate=250000, sample_point=75) - -# Uncomment the below line to setup your OpenMV Cam for control over I2C. -# -# * slave_addr - I2C address. -# -# NOTE: Master and slave addresses must match. Connect master scl to slave scl and master sda -# to slave sda. You must use external pull ups. Finally, both devices must share a ground. -# -# interface = rpc.rpc_i2c_slave(slave_addr=0x12) - -# Uncomment the below line to setup your OpenMV Cam for control over SPI. -# -# * cs_pin - Slave Select Pin. -# * clk_polarity - Idle clock level (0 or 1). -# * clk_phase - Sample data on the first (0) or second edge (1) of the clock. -# -# NOTE: Master and slave settings much match. Connect CS, SCLK, MOSI, MISO to CS, SCLK, MOSI, MISO. -# Finally, both devices must share a common ground. -# -# interface = rpc.rpc_spi_slave(cs_pin="P3", clk_polarity=1, clk_phase=0) - -# Uncomment the below line to setup your OpenMV Cam for control over UART. -# -# * baudrate - Serial Baudrate. -# -# NOTE: Master and slave baud rates must match. Connect master tx to slave rx and master rx to -# slave tx. Finally, both devices must share a common ground. -# -interface = rpc.rpc_uart_slave(baudrate=115200) - -# Uncomment the below line to setup your OpenMV Cam for control over a USB VCP. -# -# interface = rpc.rpc_usb_vcp_slave() - -# Uncomment the below line to setup your OpenMV Cam for control over the lan. -# -# network_if = network.LAN() -# network_if.active(True) -# network_if.ifconfig('dhcp') -# -# interface = rpc.rpc_network_slave(network_if) - -# Uncomment the below line to setup your OpenMV Cam for control over the wlan. -# -# network_if = network.WLAN(network.STA_IF) -# network_if.active(True) -# network_if.connect('your-ssid', 'your-password') -# -# interface = rpc.rpc_network_slave(network_if) - -################################################################ -# Call Backs -################################################################ - -# Helper methods used by the call backs below. - -def draw_detections(img, dects): - for d in dects: - c = d.corners() - l = len(c) - for i in range(l): img.draw_line(c[(i+0)%l] + c[(i+1)%l], color = (0, 255, 0)) - img.draw_rectangle(d.rect(), color = (255, 0, 0)) - -# Remote control works via call back methods that the controller -# device calls via the rpc module on this device. Call backs -# are functions which take a bytes() object as their argument -# and return a bytes() object as their result. The rpc module -# takes care of moving the bytes() objects across the link. -# bytes() may be the micropython int max in size. - -# When called returns x, y, w, and h of the largest face within view. -# -# data is unused -def face_detection(data): - sensor.set_pixformat(sensor.GRAYSCALE) - sensor.set_framesize(sensor.QVGA) - faces = sensor.snapshot().gamma_corr(contrast=1.5).find_features(image.HaarCascade("frontalface")) - if not faces: return bytes() # No detections. - for f in faces: sensor.get_fb().draw_rectangle(f, color = (255, 255, 255)) - out_face = max(faces, key = lambda f: f[2] * f[3]) - return struct.pack("> 8) & 3 - # To test combining different formats - if (image_format==1): small_img = small_img.to_bitmap(copy=True); status = 'bitmap ' - if (image_format==2): small_img = small_img.to_grayscale(copy=True); status = 'grayscale ' - if (image_format==3): small_img = small_img.to_rgb565(copy=True); status = 'rgb565 ' - - # update small image location - if BOUNCE: - x = x + xd - if (xxmax): - xd = -xd - - y = y + yd - if (yymax): - yd = -yd - - # Update small image scale - if RESCALE: - rescale = rescale + rd - if (rescalemax_rescale): - rd = -rd - - # Find the center of the image - scaled_width = int(small_img.width() * abs(rescale)) - scaled_height= int(small_img.height() * abs(rescale)) - - apply_mask = CYCLE_MASK and ((value_mixer >> 9) & 1) - if apply_mask: - img.draw_image(small_img, int(x), int(y), mask=small_img.to_bitmap(copy=True), x_scale=rescale, y_scale=rescale, alpha=240, hint=image.IMAGE_HINT_BILINEAR | image.IMAGE_HINT_CENTER) - status += 'alpha:240 ' - status += '+mask ' - else: - img.draw_image(small_img, int(x), int(y), x_scale=rescale, y_scale=rescale, alpha=128, hint=image.IMAGE_HINT_BILINEAR | image.IMAGE_HINT_CENTER) - status += 'alpha:128 ' - - img.draw_string(8, 0, status, mono_space = False) - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_alpha_blending_test.py b/scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_alpha_blending_test.py deleted file mode 100644 index e5a1da6f2..000000000 --- a/scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_alpha_blending_test.py +++ /dev/null @@ -1,71 +0,0 @@ -# Image Drawing Alpha Blending Test -# -# This script tests the performance and quality of the draw_image() -# method which can perform nearest neighbor, bilinear, bicubic, and -# area scaling along with color channel extraction, alpha blending, -# color palette application, and alpha palette application. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) - -hint = image.BICUBIC # image.BILINEAR image.BICUBIC - -small_img = image.Image(4, 4, sensor.GRAYSCALE) -small_img.set_pixel(0, 0, (0, 0, 127)) -small_img.set_pixel(1, 0, (47, 255, 199)) -small_img.set_pixel(2, 0, (0, 188, 255)) -small_img.set_pixel(3, 0, (0, 0, 127)) -small_img.set_pixel(0, 1, (0, 176, 255)) -small_img.set_pixel(1, 1, (222, 0, 0 )) -small_img.set_pixel(2, 1, (50, 255, 195)) -small_img.set_pixel(3, 1, (86, 255, 160)) -small_img.set_pixel(0, 2, (255, 211, 0 )) -small_img.set_pixel(1, 2, (83, 255, 163)) -small_img.set_pixel(2, 2, (255, 211, 0)) -small_img.set_pixel(3, 2, (0, 80, 255)) -small_img.set_pixel(0, 3, (255, 118, 0 )) -small_img.set_pixel(1, 3, (127, 0, 0 )) -small_img.set_pixel(2, 3, (0, 144, 255)) -small_img.set_pixel(3, 3, (50, 255, 195)) -#small_img.to_grayscale() -#small_img.to_bitmap() - -big_img = image.Image(128, 128, sensor.GRAYSCALE) -big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint) -#big_img.to_grayscale() -#big_img.to_bitmap() - -alpha_div = 1 -alpha_value = 0 -alpha_step = 2 - -x_bounce = sensor.width()//2 -x_bounce_toggle = 1 - -y_bounce = sensor.height()//2 -y_bounce_toggle = 1 - -clock = time.clock() -while(True): - clock.tick() - - img = sensor.snapshot() - #img.to_grayscale() - #img.to_bitmap() - img.draw_image(big_img, x_bounce, y_bounce, - rgb_channel=-1, alpha=alpha_value//alpha_div, - hint=hint|image.CENTER) - - x_bounce += x_bounce_toggle - if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle - - y_bounce += y_bounce_toggle - if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle - - alpha_value += alpha_step - if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_alpha_blending_with_color_table_test.py b/scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_alpha_blending_with_color_table_test.py deleted file mode 100644 index e7c0d8cbb..000000000 --- a/scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_alpha_blending_with_color_table_test.py +++ /dev/null @@ -1,81 +0,0 @@ -# Image Drawing Color Table with Alpha Blending Test -# -# This script tests the performance and quality of the draw_image() -# method which can perform nearest neighbor, bilinear, bicubic, and -# area scaling along with color channel extraction, alpha blending, -# color palette application, and alpha palette application. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) - -hint = image.BICUBIC # image.BILINEAR image.BICUBIC - -# RGB channel extraction is done after scaling normally, this -# may produce false colors. Set this flag to do it before. -# -hint |= 0 # image.EXTRACT_RGB_CHANNEL_FIRST - -# Color table application is done after scaling normally, this -# may produce false colors. Set this flag to do it before. -# -hint |= 0 # image.APPLY_COLOR_PALETTE_FIRST - -small_img = image.Image(4, 4, sensor.GRAYSCALE) -small_img.set_pixel(0, 0, (0, 0, 127)) -small_img.set_pixel(1, 0, (47, 255, 199)) -small_img.set_pixel(2, 0, (0, 188, 255)) -small_img.set_pixel(3, 0, (0, 0, 127)) -small_img.set_pixel(0, 1, (0, 176, 255)) -small_img.set_pixel(1, 1, (222, 0, 0 )) -small_img.set_pixel(2, 1, (50, 255, 195)) -small_img.set_pixel(3, 1, (86, 255, 160)) -small_img.set_pixel(0, 2, (255, 211, 0 )) -small_img.set_pixel(1, 2, (83, 255, 163)) -small_img.set_pixel(2, 2, (255, 211, 0)) -small_img.set_pixel(3, 2, (0, 80, 255)) -small_img.set_pixel(0, 3, (255, 118, 0 )) -small_img.set_pixel(1, 3, (127, 0, 0 )) -small_img.set_pixel(2, 3, (0, 144, 255)) -small_img.set_pixel(3, 3, (50, 255, 195)) -#small_img.to_grayscale() -#small_img.to_bitmap() - -big_img = image.Image(128, 128, sensor.GRAYSCALE) -big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint) -#big_img.to_grayscale() -#big_img.to_bitmap() - -alpha_div = 1 -alpha_value = 0 -alpha_step = 2 - -x_bounce = sensor.width()//2 -x_bounce_toggle = 1 - -y_bounce = sensor.height()//2 -y_bounce_toggle = 1 - -clock = time.clock() -while(True): - clock.tick() - - img = sensor.snapshot() - #img.to_grayscale() - #img.to_bitmap() - img.draw_image(big_img, x_bounce, y_bounce, - rgb_channel=-1, alpha=alpha_value//alpha_div, - color_palette=sensor.PALETTE_IRONBOW, hint=hint|image.CENTER) - - x_bounce += x_bounce_toggle - if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle - - y_bounce += y_bounce_toggle - if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle - - alpha_value += alpha_step - if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_alpha_table_test.py b/scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_alpha_table_test.py deleted file mode 100644 index 4262874d5..000000000 --- a/scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_alpha_table_test.py +++ /dev/null @@ -1,75 +0,0 @@ -# Image Drawing Alpha Table Test -# -# This script tests the performance and quality of the draw_image() -# method which can perform nearest neighbor, bilinear, bicubic, and -# area scaling along with color channel extraction, alpha blending, -# color palette application, and alpha palette application. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) - -hint = image.BICUBIC # image.BILINEAR image.BICUBIC - -small_img = image.Image(4, 4, sensor.GRAYSCALE) -small_img.set_pixel(0, 0, (0, 0, 127)) -small_img.set_pixel(1, 0, (47, 255, 199)) -small_img.set_pixel(2, 0, (0, 188, 255)) -small_img.set_pixel(3, 0, (0, 0, 127)) -small_img.set_pixel(0, 1, (0, 176, 255)) -small_img.set_pixel(1, 1, (222, 0, 0 )) -small_img.set_pixel(2, 1, (50, 255, 195)) -small_img.set_pixel(3, 1, (86, 255, 160)) -small_img.set_pixel(0, 2, (255, 211, 0 )) -small_img.set_pixel(1, 2, (83, 255, 163)) -small_img.set_pixel(2, 2, (255, 211, 0)) -small_img.set_pixel(3, 2, (0, 80, 255)) -small_img.set_pixel(0, 3, (255, 118, 0 )) -small_img.set_pixel(1, 3, (127, 0, 0 )) -small_img.set_pixel(2, 3, (0, 144, 255)) -small_img.set_pixel(3, 3, (50, 255, 195)) -#small_img.to_grayscale() -#small_img.to_bitmap() - -big_img = image.Image(128, 128, sensor.GRAYSCALE) -big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint) -#big_img.to_grayscale() -#big_img.to_bitmap() - -alpha_lut = image.Image(256, 1, sensor.GRAYSCALE) -for i in range(256): - alpha_lut.set_pixel(i, 0, 255 if i > 127 else 0) - -alpha_div = 1 -alpha_value = 0 -alpha_step = 2 - -x_bounce = sensor.width()//2 -x_bounce_toggle = 1 - -y_bounce = sensor.height()//2 -y_bounce_toggle = 1 - -clock = time.clock() -while(True): - clock.tick() - - img = sensor.snapshot() - #img.to_grayscale() - #img.to_bitmap() - img.draw_image(big_img, x_bounce, y_bounce, - rgb_channel=-1, alpha=alpha_value//alpha_div, - alpha_palette=alpha_lut, hint=hint|image.CENTER) - - x_bounce += x_bounce_toggle - if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle - - y_bounce += y_bounce_toggle - if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle - - alpha_value += alpha_step - if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_alpha_table_with_color_table_test.py b/scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_alpha_table_with_color_table_test.py deleted file mode 100644 index 8bb3096e3..000000000 --- a/scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_alpha_table_with_color_table_test.py +++ /dev/null @@ -1,85 +0,0 @@ -# Image Drawing Color Table with Alpha Table Test -# -# This script tests the performance and quality of the draw_image() -# method which can perform nearest neighbor, bilinear, bicubic, and -# area scaling along with color channel extraction, alpha blending, -# color palette application, and alpha palette application. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) - -hint = image.BICUBIC # image.BILINEAR image.BICUBIC - -# RGB channel extraction is done after scaling normally, this -# may produce false colors. Set this flag to do it before. -# -hint |= 0 # image.EXTRACT_RGB_CHANNEL_FIRST - -# Color table application is done after scaling normally, this -# may produce false colors. Set this flag to do it before. -# -hint |= 0 # image.APPLY_COLOR_PALETTE_FIRST - -small_img = image.Image(4, 4, sensor.GRAYSCALE) -small_img.set_pixel(0, 0, (0, 0, 127)) -small_img.set_pixel(1, 0, (47, 255, 199)) -small_img.set_pixel(2, 0, (0, 188, 255)) -small_img.set_pixel(3, 0, (0, 0, 127)) -small_img.set_pixel(0, 1, (0, 176, 255)) -small_img.set_pixel(1, 1, (222, 0, 0 )) -small_img.set_pixel(2, 1, (50, 255, 195)) -small_img.set_pixel(3, 1, (86, 255, 160)) -small_img.set_pixel(0, 2, (255, 211, 0 )) -small_img.set_pixel(1, 2, (83, 255, 163)) -small_img.set_pixel(2, 2, (255, 211, 0)) -small_img.set_pixel(3, 2, (0, 80, 255)) -small_img.set_pixel(0, 3, (255, 118, 0 )) -small_img.set_pixel(1, 3, (127, 0, 0 )) -small_img.set_pixel(2, 3, (0, 144, 255)) -small_img.set_pixel(3, 3, (50, 255, 195)) -#small_img.to_grayscale() -#small_img.to_bitmap() - -big_img = image.Image(128, 128, sensor.GRAYSCALE) -big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint) -#big_img.to_grayscale() -#big_img.to_bitmap() - -alpha_lut = image.Image(256, 1, sensor.GRAYSCALE) -for i in range(256): - alpha_lut.set_pixel(i, 0, 255 if i > 127 else 0) - -alpha_div = 1 -alpha_value = 0 -alpha_step = 2 - -x_bounce = sensor.width()//2 -x_bounce_toggle = 1 - -y_bounce = sensor.height()//2 -y_bounce_toggle = 1 - -clock = time.clock() -while(True): - clock.tick() - - img = sensor.snapshot() - #img.to_grayscale() - #img.to_bitmap() - img.draw_image(big_img, x_bounce, y_bounce, - rgb_channel=-1, alpha=alpha_value//alpha_div, - color_palette=sensor.PALETTE_IRONBOW, alpha_palette=alpha_lut, hint=hint|image.CENTER) - - x_bounce += x_bounce_toggle - if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle - - y_bounce += y_bounce_toggle - if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle - - alpha_value += alpha_step - if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_scale_down_test.py b/scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_scale_down_test.py deleted file mode 100644 index 0d4b8e573..000000000 --- a/scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_scale_down_test.py +++ /dev/null @@ -1,69 +0,0 @@ -# Image Scaling Down Drawing Test -# -# This script tests the performance and quality of the draw_image() -# method which can perform nearest neighbor, bilinear, bicubic, and -# area scaling along with color channel extraction, alpha blending, -# color palette application, and alpha palette application. - -# DISABLE THE FRAME BUFFER TO SEE THE REAL FPS - -import sensor, image, time - -up_hint = 0 # image.BILINEAR image.BICUBIC -down_hint = image.AREA # image.BILINEAR image.BICUBIC image.AREA - -bounce_div = 128 - -medium_img = image.Image(32, 32, sensor.RGB565, copy_to_fb=True) -#medium_img.to_grayscale() -#medium_img.to_bitmap() - -small_img = image.Image(4, 4, sensor.GRAYSCALE) -small_img.set_pixel(0, 0, (0, 0, 127)) -small_img.set_pixel(1, 0, (47, 255, 199)) -small_img.set_pixel(2, 0, (0, 188, 255)) -small_img.set_pixel(3, 0, (0, 0, 127)) -small_img.set_pixel(0, 1, (0, 176, 255)) -small_img.set_pixel(1, 1, (222, 0, 0 )) -small_img.set_pixel(2, 1, (50, 255, 195)) -small_img.set_pixel(3, 1, (86, 255, 160)) -small_img.set_pixel(0, 2, (255, 211, 0 )) -small_img.set_pixel(1, 2, (83, 255, 163)) -small_img.set_pixel(2, 2, (255, 211, 0)) -small_img.set_pixel(3, 2, (0, 80, 255)) -small_img.set_pixel(0, 3, (255, 118, 0 )) -small_img.set_pixel(1, 3, (127, 0, 0 )) -small_img.set_pixel(2, 3, (0, 144, 255)) -small_img.set_pixel(3, 3, (50, 255, 195)) -#small_img.to_grayscale() -#small_img.to_bitmap() - -big_img = image.Image(128, 128, sensor.GRAYSCALE) -big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=up_hint) -#big_img.to_grayscale() -#big_img.to_bitmap() - -x_bounce = 0 -x_bounce_toggle = 0 - -y_bounce = 0 -y_bounce_toggle = 0 - -clock = time.clock() -while(True): - clock.tick() - - medium_img.clear() - medium_img.draw_image(big_img, - x_bounce // bounce_div, y_bounce // bounce_div, - x_scale=0.25, y_scale=0.25, - hint=down_hint) - sensor.flush() - - x_bounce += x_bounce_toggle - if abs(x_bounce // bounce_div) >= (medium_img.width()*1.1): x_bounce_toggle = -x_bounce_toggle - - y_bounce += y_bounce_toggle - if abs(y_bounce // bounce_div) >= (medium_img.height()*1.1): y_bounce_toggle = -y_bounce_toggle - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_scale_up_test.py b/scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_scale_up_test.py deleted file mode 100644 index 5f664a8c1..000000000 --- a/scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_scale_up_test.py +++ /dev/null @@ -1,63 +0,0 @@ -# Image Scaling Up Drawing Test -# -# This script tests the performance and quality of the draw_image() -# method which can perform nearest neighbor, bilinear, bicubic, and -# area scaling along with color channel extraction, alpha blending, -# color palette application, and alpha palette application. - -# DISABLE THE FRAME BUFFER TO SEE THE REAL FPS - -import sensor, image, time - -hint = 0 # image.BILINEAR image.BICUBIC - -bounce_div = 32 - -big_img = image.Image(128, 128, sensor.RGB565, copy_to_fb=True) -#big_img.to_grayscale() -#big_img.to_bitmap() - -small_img = image.Image(4, 4, sensor.GRAYSCALE) -small_img.set_pixel(0, 0, (0, 0, 127)) -small_img.set_pixel(1, 0, (47, 255, 199)) -small_img.set_pixel(2, 0, (0, 188, 255)) -small_img.set_pixel(3, 0, (0, 0, 127)) -small_img.set_pixel(0, 1, (0, 176, 255)) -small_img.set_pixel(1, 1, (222, 0, 0 )) -small_img.set_pixel(2, 1, (50, 255, 195)) -small_img.set_pixel(3, 1, (86, 255, 160)) -small_img.set_pixel(0, 2, (255, 211, 0 )) -small_img.set_pixel(1, 2, (83, 255, 163)) -small_img.set_pixel(2, 2, (255, 211, 0)) -small_img.set_pixel(3, 2, (0, 80, 255)) -small_img.set_pixel(0, 3, (255, 118, 0 )) -small_img.set_pixel(1, 3, (127, 0, 0 )) -small_img.set_pixel(2, 3, (0, 144, 255)) -small_img.set_pixel(3, 3, (50, 255, 195)) -#small_img.to_grayscale() -#small_img.to_bitmap() - -x_bounce = 0 -x_bounce_toggle = 0 - -y_bounce = 0 -y_bounce_toggle = 0 - -clock = time.clock() -while(True): - clock.tick() - - big_img.clear() - big_img.draw_image(small_img, - x_bounce // bounce_div, y_bounce // bounce_div, - x_scale=32, y_scale=32, - hint=hint) - sensor.flush() - - x_bounce += x_bounce_toggle - if abs(x_bounce // bounce_div) >= (big_img.width()*1.1): x_bounce_toggle = -x_bounce_toggle - - y_bounce += y_bounce_toggle - if abs(y_bounce // bounce_div) >= (big_img.height()*1.1): y_bounce_toggle = -y_bounce_toggle - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_with_custom_palette.py b/scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_with_custom_palette.py deleted file mode 100644 index 2665e6f3a..000000000 --- a/scripts/examples/Arduino/Portenta-H7/03-Drawing/image_drawing_with_custom_palette.py +++ /dev/null @@ -1,43 +0,0 @@ -# Draw Image Example with custom color palette -# -# This example shows off how to draw images in the frame buffer with a custom generated color palette. - -import sensor, image, time, pyb - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) # or GRAYSCALE... -sensor.set_framesize(sensor.QQVGA) # or QQVGA... -sensor.skip_frames(time = 2000) -clock = time.clock() - -# the color palette is actually an image, this allows you to use image ops to create palettes -# the image must have 256 entries i.e. 256x1, 64x4, 16x16 and have the format rgb565 - -# Initialise palette source colors into an image -palette_source_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 0, 255)] -palette_source_color_image = image.Image(len(palette_source_colors), 1, sensor.GRAYSCALE) -for i, color in enumerate(palette_source_colors): - palette_source_color_image[i] = color - -# Scale the image to palette width and smooth them -palette = image.Image(256,1, sensor.GRAYSCALE) -palette.draw_image(palette_source_color_image, 0, 0, x_scale=palette.width() / palette_source_color_image.width()) -palette.mean(int(palette.width() / palette_source_color_image.width()/2)) - -while(True): - clock.tick() - - img = sensor.snapshot() - # Get a copy of grayscale image before converting to color - img_copy = img.copy() - - img.to_rgb565() - - palette_boundary_inset = int(sensor.width() / 40) - palette_scale_x = (sensor.width() - palette_boundary_inset * 2) / palette.width() - - img.draw_image(img_copy, 0, 0, color_palette=palette) - img.draw_image(palette, palette_boundary_inset, palette_boundary_inset, x_scale=palette_scale_x, y_scale=8) - img.draw_rectangle(palette_boundary_inset, palette_boundary_inset, int(palette.width()*palette_scale_x), 8, color=(255,255,255), thickness=1) - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/03-Drawing/keypoints_drawing.py b/scripts/examples/Arduino/Portenta-H7/03-Drawing/keypoints_drawing.py deleted file mode 100644 index 84c56562b..000000000 --- a/scripts/examples/Arduino/Portenta-H7/03-Drawing/keypoints_drawing.py +++ /dev/null @@ -1,31 +0,0 @@ -# Keypoints Drawing -# -# This example shows off drawing keypoints on the OpenMV Cam. Usually you call draw_keypoints() -# on a keypoints object but you can also call it on a list of 3-value tuples... - -import sensor, image, time, pyb - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - img = sensor.snapshot() - - for i in range(20): - x = (pyb.rng() % (2*img.width())) - (img.width()//2) - y = (pyb.rng() % (2*img.height())) - (img.height()//2) - rot = pyb.rng() % 360 - - r = (pyb.rng() % 127) + 128 - g = (pyb.rng() % 127) + 128 - b = (pyb.rng() % 127) + 128 - - # This method draws a keypoints object or a list of (x, y, rot) tuples... - img.draw_keypoints([(x, y, rot)], color = (r, g, b), size = 20, thickness = 2, fill = False) - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/03-Drawing/line_drawing.py b/scripts/examples/Arduino/Portenta-H7/03-Drawing/line_drawing.py deleted file mode 100644 index 76ebad142..000000000 --- a/scripts/examples/Arduino/Portenta-H7/03-Drawing/line_drawing.py +++ /dev/null @@ -1,31 +0,0 @@ -# Line Drawing -# -# This example shows off drawing lines on the OpenMV Cam. - -import sensor, image, time, pyb - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - img = sensor.snapshot() - - for i in range(10): - x0 = (pyb.rng() % (2*img.width())) - (img.width()//2) - y0 = (pyb.rng() % (2*img.height())) - (img.height()//2) - x1 = (pyb.rng() % (2*img.width())) - (img.width()//2) - y1 = (pyb.rng() % (2*img.height())) - (img.height()//2) - r = (pyb.rng() % 127) + 128 - g = (pyb.rng() % 127) + 128 - b = (pyb.rng() % 127) + 128 - - # If the first argument is a scaler then this method expects - # to see x0, y0, x1, and y1. Otherwise, it expects a (x0,y0,x1,y1) tuple. - img.draw_line(x0, y0, x1, y1, color = (r, g, b), thickness = 2) - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/03-Drawing/rectangle_drawing.py b/scripts/examples/Arduino/Portenta-H7/03-Drawing/rectangle_drawing.py deleted file mode 100644 index a268a29e5..000000000 --- a/scripts/examples/Arduino/Portenta-H7/03-Drawing/rectangle_drawing.py +++ /dev/null @@ -1,31 +0,0 @@ -# Rectangle Drawing -# -# This example shows off drawing rectangles on the OpenMV Cam. - -import sensor, image, time, pyb - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - img = sensor.snapshot() - - for i in range(10): - x = (pyb.rng() % (2*img.width())) - (img.width()//2) - y = (pyb.rng() % (2*img.height())) - (img.height()//2) - w = (pyb.rng() % (img.width()//2)) - h = (pyb.rng() % (img.height()//2)) - r = (pyb.rng() % 127) + 128 - g = (pyb.rng() % 127) + 128 - b = (pyb.rng() % 127) + 128 - - # If the first argument is a scaler then this method expects - # to see x, y, w, and h. Otherwise, it expects a (x,y,w,h) tuple. - img.draw_rectangle(x, y, w, h, color = (r, g, b), thickness = 2, fill = False) - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/03-Drawing/text_drawing.py b/scripts/examples/Arduino/Portenta-H7/03-Drawing/text_drawing.py deleted file mode 100644 index 3bd016e10..000000000 --- a/scripts/examples/Arduino/Portenta-H7/03-Drawing/text_drawing.py +++ /dev/null @@ -1,33 +0,0 @@ -# Text Drawing -# -# This example shows off drawing text on the OpenMV Cam. - -import sensor, image, time, pyb - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - img = sensor.snapshot() - - for i in range(10): - x = (pyb.rng() % (2*img.width())) - (img.width()//2) - y = (pyb.rng() % (2*img.height())) - (img.height()//2) - r = (pyb.rng() % 127) + 128 - g = (pyb.rng() % 127) + 128 - b = (pyb.rng() % 127) + 128 - - # If the first argument is a scaler then this method expects - # to see x, y, and text. Otherwise, it expects a (x,y,text) tuple. - - # Character and string rotation can be done at 0, 90, 180, 270, and etc. degrees. - img.draw_string(x, y, "Hello World!", color = (r, g, b), scale = 2, mono_space = False, - char_rotation = 0, char_hmirror = False, char_vflip = False, - string_rotation = 0, string_hmirror = False, string_vflip = False) - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/adaptive_histogram_equalization.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/adaptive_histogram_equalization.py deleted file mode 100644 index cd3b46fd5..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/adaptive_histogram_equalization.py +++ /dev/null @@ -1,29 +0,0 @@ -# Adaptive Histogram Equalization -# -# This example shows off how to use adaptive histogram equalization to improve -# the contrast in the image. Adaptive histogram equalization splits the image -# into regions and then equalizes the histogram in those regions to improve -# the image contrast versus a global histogram equalization. Additionally, -# you may specify a clip limit to prevent the contrast from going wild. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - # A clip_limit of < 0 gives you normal adaptive histogram equalization - # which may result in huge amounts of contrast noise... - - # A clip_limit of 1 does nothing. For best results go slightly higher - # than 1 like below. The higher you go the closer you get back to - # standard adaptive histogram equalization with huge contrast swings. - - img = sensor.snapshot().histeq(adaptive=True, clip_limit=3) - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/blur_filter.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/blur_filter.py deleted file mode 100644 index 6074d2f2b..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/blur_filter.py +++ /dev/null @@ -1,21 +0,0 @@ -# Blur Filter Example -# -# This example shows off using the guassian filter to blur images. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # Run the kernel on every pixel of the image. - img.gaussian(1) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/cartoon_filter.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/cartoon_filter.py deleted file mode 100644 index 94ea4a18e..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/cartoon_filter.py +++ /dev/null @@ -1,29 +0,0 @@ -# Cartoon Filter -# -# This example shows off a simple cartoon filter on images. The cartoon -# filter works by joining similar pixel areas of an image and replacing -# the pixels in those areas with the area mean. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - # seed_threshold controls the maximum area growth of a colored - # region. Making this larger will merge more pixels. - - # floating_threshold controls the maximum pixel-to-pixel difference - # when growing a region. Settings this very high will quickly combine - # all pixels in the image. You should keep this small. - - # cartoon() will grow regions while both thresholds are statisfied... - - img = sensor.snapshot().cartoon(seed_threshold=0.05, floating_thresholds=0.05) - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/color_bilateral_filter.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/color_bilateral_filter.py deleted file mode 100644 index 0345ede12..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/color_bilateral_filter.py +++ /dev/null @@ -1,33 +0,0 @@ -# Color Bilteral Filter Example -# -# This example shows off using the bilateral filter on color images. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # color_sigma controls how close color wise pixels have to be to each other to be - # blured togheter. A smaller value means they have to be closer. - # A larger value is less strict. - - # space_sigma controls how close space wise pixels have to be to each other to be - # blured togheter. A smaller value means they have to be closer. - # A larger value is less strict. - - # Run the kernel on every pixel of the image. - img.bilateral(3, color_sigma=0.1, space_sigma=1) - - # Note that the bilateral filter can introduce image defects if you set - # color_sigma/space_sigma to aggresively. Increase the sigma values until - # the defects go away if you see them. - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/color_binary_filter.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/color_binary_filter.py deleted file mode 100644 index 642347908..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/color_binary_filter.py +++ /dev/null @@ -1,61 +0,0 @@ -# Color Binary Filter Example -# -# This script shows off the binary image filter. You may pass binary any -# number of thresholds to segment the image by. - -import sensor, image, time - -sensor.reset() -sensor.set_framesize(sensor.QVGA) -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.skip_frames(time = 2000) -clock = time.clock() - -# Use the Tools -> Machine Vision -> Threshold Edtor to pick better thresholds. -red_threshold = (0,100, 0,127, 0,127) # L A B -green_threshold = (0,100, -128,0, 0,127) # L A B -blue_threshold = (0,100, -128,127, -128,0) # L A B - -while(True): - - # Test red threshold - for i in range(100): - clock.tick() - img = sensor.snapshot() - img.binary([red_threshold]) - print(clock.fps()) - - # Test green threshold - for i in range(100): - clock.tick() - img = sensor.snapshot() - img.binary([green_threshold]) - print(clock.fps()) - - # Test blue threshold - for i in range(100): - clock.tick() - img = sensor.snapshot() - img.binary([blue_threshold]) - print(clock.fps()) - - # Test not red threshold - for i in range(100): - clock.tick() - img = sensor.snapshot() - img.binary([red_threshold], invert = 1) - print(clock.fps()) - - # Test not green threshold - for i in range(100): - clock.tick() - img = sensor.snapshot() - img.binary([green_threshold], invert = 1) - print(clock.fps()) - - # Test not blue threshold - for i in range(100): - clock.tick() - img = sensor.snapshot() - img.binary([blue_threshold], invert = 1) - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/color_light_removal.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/color_light_removal.py deleted file mode 100644 index 738e60168..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/color_light_removal.py +++ /dev/null @@ -1,25 +0,0 @@ -# Color Light Removal -# -# This example shows off how to remove bright lights from the image. -# You can do this using the binary() method with the "zero=" argument. -# -# Removing bright lights from the image allows you to now use -# histeq() on the image without outliers from oversaturated -# parts of the image breaking the algorithm... - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -thresholds = (90, 100, -128, 127, -128, 127) - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot().binary([thresholds], invert=False, zero=True) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/edge_filter.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/edge_filter.py deleted file mode 100644 index cdc03ddf3..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/edge_filter.py +++ /dev/null @@ -1,21 +0,0 @@ -# Edge Filter Example -# -# This example shows off using the laplacian filter to detect edges. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # Run the kernel on every pixel of the image. - img.laplacian(1) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/erode_and_dilate.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/erode_and_dilate.py deleted file mode 100644 index db755b2ae..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/erode_and_dilate.py +++ /dev/null @@ -1,35 +0,0 @@ -# Erode and Dilate Example -# -# This example shows off the erode and dilate functions which you can run on -# a binary image to remove noise. This example was originally a test but its -# useful for showing off how these functions work. - -import pyb, sensor, image - -sensor.reset() -sensor.set_framesize(sensor.QVGA) - -grayscale_thres = (170, 255) -rgb565_thres = (70, 100, -128, 127, -128, 127) - -while(True): - - sensor.set_pixformat(sensor.GRAYSCALE) - for i in range(20): - img = sensor.snapshot() - img.binary([grayscale_thres]) - img.erode(2) - for i in range(20): - img = sensor.snapshot() - img.binary([grayscale_thres]) - img.dilate(2) - - sensor.set_pixformat(sensor.GRAYSCALE) - for i in range(20): - img = sensor.snapshot() - img.binary([rgb565_thres]) - img.erode(2) - for i in range(20): - img = sensor.snapshot() - img.binary([rgb565_thres]) - img.dilate(2) diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/gamma_correction.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/gamma_correction.py deleted file mode 100644 index 7c735fd1b..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/gamma_correction.py +++ /dev/null @@ -1,21 +0,0 @@ -# Gamma Correction -# -# This example shows off gamma correction to make the image brighter. The gamma -# correction method can also fix contrast and brightness too. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - # Gamma, contrast, and brightness correction are applied to each color channel. The - # values are scaled to the range per color channel per image type... - img = sensor.snapshot().gamma_corr(gamma = 0.5, contrast = 1.0, brightness = 0.0) - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/grayscale_bilateral_filter.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/grayscale_bilateral_filter.py deleted file mode 100644 index 6b3a67b21..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/grayscale_bilateral_filter.py +++ /dev/null @@ -1,33 +0,0 @@ -# Grayscale Bilteral Filter Example -# -# This example shows off using the bilateral filter on grayscale images. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # color_sigma controls how close color wise pixels have to be to each other to be - # blured togheter. A smaller value means they have to be closer. - # A larger value is less strict. - - # space_sigma controls how close space wise pixels have to be to each other to be - # blured togheter. A smaller value means they have to be closer. - # A larger value is less strict. - - # Run the kernel on every pixel of the image. - img.bilateral(3, color_sigma=0.1, space_sigma=1) - - # Note that the bilateral filter can introduce image defects if you set - # color_sigma/space_sigma to aggresively. Increase the sigma values until - # the defects go away if you see them. - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/grayscale_binary_filter.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/grayscale_binary_filter.py deleted file mode 100644 index dfaed5012..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/grayscale_binary_filter.py +++ /dev/null @@ -1,45 +0,0 @@ -# Grayscale Binary Filter Example -# -# This script shows off the binary image filter. You may pass binary any -# number of thresholds to segment the image by. - -import sensor, image, time - -sensor.reset() -sensor.set_framesize(sensor.QVGA) -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.skip_frames(time = 2000) -clock = time.clock() - -low_threshold = (0, 50) -high_threshold = (205, 255) - -while(True): - - # Test low threshold - for i in range(100): - clock.tick() - img = sensor.snapshot() - img.binary([low_threshold]) - print(clock.fps()) - - # Test high threshold - for i in range(100): - clock.tick() - img = sensor.snapshot() - img.binary([high_threshold]) - print(clock.fps()) - - # Test not low threshold - for i in range(100): - clock.tick() - img = sensor.snapshot() - img.binary([low_threshold], invert = 1) - print(clock.fps()) - - # Test not high threshold - for i in range(100): - clock.tick() - img = sensor.snapshot() - img.binary([high_threshold], invert = 1) - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/grayscale_light_removal.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/grayscale_light_removal.py deleted file mode 100644 index d42b8a8e9..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/grayscale_light_removal.py +++ /dev/null @@ -1,25 +0,0 @@ -# Grayscale Light Removal -# -# This example shows off how to remove bright lights from the image. -# You can do this using the binary() method with the "zero=" argument. -# -# Removing bright lights from the image allows you to now use -# histeq() on the image without outliers from oversaturated -# parts of the image breaking the algorithm... - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -thresholds = (220, 255) - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot().binary([thresholds], invert=False, zero=True) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/histogram_equalization.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/histogram_equalization.py deleted file mode 100644 index 00ea2f319..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/histogram_equalization.py +++ /dev/null @@ -1,19 +0,0 @@ -# Histogram Equalization -# -# This example shows off how to use histogram equalization to improve -# the contrast in the image. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - img = sensor.snapshot().histeq() - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/kernel_filters.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/kernel_filters.py deleted file mode 100644 index 9b9dd565f..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/kernel_filters.py +++ /dev/null @@ -1,27 +0,0 @@ -# Kernel Filtering Example -# -# This example shows off how to use a generic kernel filter. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -kernel_size = 1 # 3x3==1, 5x5==2, 7x7==3, etc. - -kernel = [-2, -1, 0, \ - -1, 1, 1, \ - 0, 1, 2] - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # Run the kernel on every pixel of the image. - img.morph(kernel_size, kernel) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/lens_correction.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/lens_correction.py deleted file mode 100644 index b4945cafa..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/lens_correction.py +++ /dev/null @@ -1,21 +0,0 @@ -# Lens Correction -# -# This example shows off how to use the lens correction method to fix lens -# distortion in an image. You need to do this for qrcode / barcode / data matrix -# detection. Increase the strength below until lines are straight in the view. -# Zoom in (higher) or out (lower) until you see enough of the image. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - img = sensor.snapshot().lens_corr(strength = 1.8, zoom = 1.0) - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/linear_polar.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/linear_polar.py deleted file mode 100644 index 239c6a1bc..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/linear_polar.py +++ /dev/null @@ -1,21 +0,0 @@ -# Linear Polar Mapping Example -# -# This example shows off re-projecting the image using a linear polar -# transformation. Linear polar images are useful in that rotations -# become translations in the X direction and linear changes -# in scale become linear translations in the Y direction. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot().linpolar(reverse=False) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/log_polar.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/log_polar.py deleted file mode 100644 index 0bac70bf4..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/log_polar.py +++ /dev/null @@ -1,21 +0,0 @@ -# Log Polar Mapping Example -# -# This example shows off re-projecting the image using a log polar -# transformation. Log polar images are useful in that rotations -# become translations in the X direction and exponential changes -# in scale (x2, x4, etc.) become linear translations in the Y direction. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot().logpolar(reverse=False) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/mean_adaptive_threshold_filter.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/mean_adaptive_threshold_filter.py deleted file mode 100644 index e33eae788..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/mean_adaptive_threshold_filter.py +++ /dev/null @@ -1,25 +0,0 @@ -# Mean Adaptive Threshold Filter Example -# -# This example shows off mean filtering with adaptive thresholding. -# When mean(threshold=True) the mean() method adaptive thresholds the image -# by comparing the mean of the pixels around a pixel, minus an offset, with that pixel. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # The first argument is the kernel size. N coresponds to a ((N*2)+1)^2 - # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You - # shouldn't ever need to use a value bigger than 2. - img.mean(1, threshold=True, offset=5, invert=True) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/mean_filter.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/mean_filter.py deleted file mode 100644 index 7e6c4833e..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/mean_filter.py +++ /dev/null @@ -1,25 +0,0 @@ -# Mean Filter Example -# -# This example shows off mean filtering. Mean filtering is your standard average -# filter in a NxN neighborhood. Mean filtering removes noise in the image by -# bluring everything. But, it's the fastest kernel filter operation. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # The only argument is the kernel size. N coresponds to a ((N*2)+1)^2 - # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You - # shouldn't ever need to use a value bigger than 2. - img.mean(1) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/median_adaptive_threshold_filter.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/median_adaptive_threshold_filter.py deleted file mode 100644 index c13a95a6d..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/median_adaptive_threshold_filter.py +++ /dev/null @@ -1,27 +0,0 @@ -# Median Adaptive Threshold Filter Example -# -# This example shows off median filtering with adaptive thresholding. -# When median(threshold=True) the median() method adaptive thresholds the image -# by comparing the median of the pixels around a pixel, minus an offset, with that pixel. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # The first argument to the median filter is the kernel size, it can be - # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. The second - # argument "percentile" is the percentile number to choose from the NxN - # neighborhood. 0.5 is the median, 0.25 is the lower quartile, and 0.75 - # would be the upper quartile. - img.median(1, percentile=0.5, threshold=True, offset=5, invert=True) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/median_filter.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/median_filter.py deleted file mode 100644 index dcd144910..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/median_filter.py +++ /dev/null @@ -1,27 +0,0 @@ -# Median Filter Example -# -# This example shows off median filtering. Median filtering replaces every pixel -# with the median value of it's NxN neighborhood. Median filtering is good for -# removing noise in the image while preserving edges. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # The first argument to the median filter is the kernel size, it can be - # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. The second - # argument "percentile" is the percentile number to choose from the NxN - # neighborhood. 0.5 is the median, 0.25 is the lower quartile, and 0.75 - # would be the upper quartile. - img.median(1, percentile=0.5) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/midpoint_adaptive_threshold_filter.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/midpoint_adaptive_threshold_filter.py deleted file mode 100644 index 12fa15b71..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/midpoint_adaptive_threshold_filter.py +++ /dev/null @@ -1,28 +0,0 @@ -# Midpoint Adaptive Threshold Filter Example -# -# This example shows off midpoint filtering with adaptive thresholding. -# When midpoint(threshold=True) the midpoint() method adaptive thresholds the image -# by comparing the midpoint of the pixels around a pixel, minus an offset, with that pixel. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # The first argument is the kernel size. N coresponds to a ((N*2)+1)^2 - # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You - # shouldn't ever need to use a value bigger than 2. The "bias" argument - # lets you select between min and max blending. 0.5 == midpoint filter, - # 0.0 == min filter, and 1.0 == max filter. Note that the min filter - # makes images darker while the max filter makes images lighter. - img.midpoint(1, bias=0.5, threshold=True, offset=5, invert=True) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/midpoint_filter.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/midpoint_filter.py deleted file mode 100644 index 09313e192..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/midpoint_filter.py +++ /dev/null @@ -1,27 +0,0 @@ -# Midpoint Filter Example -# -# This example shows off midpoint filtering. Midpoint filtering replaces each -# pixel by the average of the min and max pixel values for a NxN neighborhood. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # The first argument is the kernel size. N coresponds to a ((N*2)+1)^2 - # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You - # shouldn't ever need to use a value bigger than 2. The "bias" argument - # lets you select between min and max blending. 0.5 == midpoint filter, - # 0.0 == min filter, and 1.0 == max filter. Note that the min filter - # makes images darker while the max filter makes images lighter. - img.midpoint(1, bias=0.5) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/mode_adaptive_threshold_filter.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/mode_adaptive_threshold_filter.py deleted file mode 100644 index 8ab9a0675..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/mode_adaptive_threshold_filter.py +++ /dev/null @@ -1,25 +0,0 @@ -# Mode Adaptive Threshold Filter Example -# -# This example shows off mode filtering with adaptive thresholding. -# When mode(threshold=True) the mode() method adaptive thresholds the image -# by comparing the mode of the pixels around a pixel, minus an offset, with that pixel. -# Avoid using the mode filter on RGB565 images. It will cause artifacts on image edges... - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # The only argument to the median filter is the kernel size, it can be - # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. - img.mode(1, threshold=True, offset=5, invert=True) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/mode_filter.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/mode_filter.py deleted file mode 100644 index 170937c58..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/mode_filter.py +++ /dev/null @@ -1,25 +0,0 @@ -# Mode Filter Example -# -# This example shows off mode filtering. Mode filtering is a highly non-linear -# operation which replaces each pixel with the mode of the NxN neighborhood -# of pixels around it. Avoid using the mode filter on RGB565 images. It will -# cause artifacts on image edges... - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # The only argument to the median filter is the kernel size, it can be - # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. - img.mode(1) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/negative.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/negative.py deleted file mode 100644 index 669ecb4d6..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/negative.py +++ /dev/null @@ -1,19 +0,0 @@ -# Negative Example -# -# This example shows off negating the image. This is not a particularly -# useful method but it can come in handy once in a while. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot().negate() - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/perspective_and_rotation_correction.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/perspective_and_rotation_correction.py deleted file mode 100644 index 2dc3b6aad..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/perspective_and_rotation_correction.py +++ /dev/null @@ -1,71 +0,0 @@ -# Rotation Correction -# -# This example shows off how to use the rotation_corr() to both correct for -# perspective distortion and then to rotate the new corrected image in 3D -# space aftwards to handle movement. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -# The image will be warped such that the following points become the new: -# -# (0, 0) -# (w-1, 0) -# (w-1, h-1) -# (0, h-1) -# -# Try setting the points below to the corners of a quadrilateral -# (in clock-wise order) in the field-of-view. You can get points -# on the image by clicking and dragging on the frame buffer and -# recording the values shown in the histogram widget. - -w = sensor.width() -h = sensor.height() - -TARGET_POINTS = [(0, 0), # (x, y) CHANGE ME! - (w-1, 0), # (x, y) CHANGE ME! - (w-1, h-1), # (x, y) CHANGE ME! - (0, h-1)] # (x, y) CHANGE ME! - -# Degrees per frame to rotation by... -X_ROTATION_DEGREE_RATE = 5 -Y_ROTATION_DEGREE_RATE = 0.5 -Z_ROTATION_DEGREE_RATE = 0 -X_OFFSET = 0 -Y_OFFSET = 0 - -ZOOM_AMOUNT = 1 # Lower zooms out - Higher zooms in. -FOV_WINDOW = 25 # Between 0 and 180. Represents the field-of-view of the scene - # window when rotating the image in 3D space. When closer to - # zero results in lines becoming straighter as the window - # moves away from the image being rotated in 3D space. A large - # value moves the window closer to the image in 3D space which - # results in the more perspective distortion and sometimes - # the image in 3D intersecting the scene window. - -x_rotation_counter = 0 -y_rotation_counter = 0 -z_rotation_counter = 0 - -while(True): - clock.tick() - - img = sensor.snapshot().rotation_corr(x_rotation = x_rotation_counter, \ - y_rotation = y_rotation_counter, \ - z_rotation = z_rotation_counter, \ - x_translation = X_OFFSET, \ - y_translation = Y_OFFSET, \ - zoom = ZOOM_AMOUNT, \ - fov = FOV_WINDOW, \ - corners = TARGET_POINTS) - - x_rotation_counter += X_ROTATION_DEGREE_RATE - y_rotation_counter += Y_ROTATION_DEGREE_RATE - z_rotation_counter += Z_ROTATION_DEGREE_RATE - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/perspective_correction.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/perspective_correction.py deleted file mode 100644 index 83f149ce1..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/perspective_correction.py +++ /dev/null @@ -1,39 +0,0 @@ -# Perspective Correction -# -# This example shows off how to use the rotation_corr() to fix perspective -# issues related to how your OpenMV Cam is mounted. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -# The image will be warped such that the following points become the new: -# -# (0, 0) -# (w-1, 0) -# (w-1, h-1) -# (0, h-1) -# -# Try setting the points below to the corners of a quadrilateral -# (in clock-wise order) in the field-of-view. You can get points -# on the image by clicking and dragging on the frame buffer and -# recording the values shown in the histogram widget. - -w = sensor.width() -h = sensor.height() - -TARGET_POINTS = [(0, 0), # (x, y) CHANGE ME! - (w-1, 0), # (x, y) CHANGE ME! - (w-1, h-1), # (x, y) CHANGE ME! - (0, h-1)] # (x, y) CHANGE ME! - -while(True): - clock.tick() - - img = sensor.snapshot().rotation_corr(corners = TARGET_POINTS) - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/rotation_correction.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/rotation_correction.py deleted file mode 100644 index cc3fbdc7d..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/rotation_correction.py +++ /dev/null @@ -1,49 +0,0 @@ -# Rotation Correction -# -# This example shows off how to use the rotation_corr() to play with the scene -# window your OpenMV Cam sees. - -import sensor, image, time - -# Degrees per frame to rotation by... -X_ROTATION_DEGREE_RATE = 5 -Y_ROTATION_DEGREE_RATE = 0.5 -Z_ROTATION_DEGREE_RATE = 0 -X_OFFSET = 0 -Y_OFFSET = 0 - -ZOOM_AMOUNT = 1 # Lower zooms out - Higher zooms in. -FOV_WINDOW = 60 # Between 0 and 180. Represents the field-of-view of the scene - # window when rotating the image in 3D space. When closer to - # zero results in lines becoming straighter as the window - # moves away from the image being rotated in 3D space. A large - # value moves the window closer to the image in 3D space which - # results in the more perspective distortion and sometimes - # the image in 3D intersecting the scene window. - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -x_rotation_counter = 0 -y_rotation_counter = 0 -z_rotation_counter = 0 - -while(True): - clock.tick() - - img = sensor.snapshot().rotation_corr(x_rotation = x_rotation_counter, \ - y_rotation = y_rotation_counter, \ - z_rotation = z_rotation_counter, \ - x_translation = X_OFFSET, \ - y_translation = Y_OFFSET, \ - zoom = ZOOM_AMOUNT, \ - fov = FOV_WINDOW) - - x_rotation_counter += X_ROTATION_DEGREE_RATE - y_rotation_counter += Y_ROTATION_DEGREE_RATE - z_rotation_counter += Z_ROTATION_DEGREE_RATE - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/sharpen_filter.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/sharpen_filter.py deleted file mode 100644 index 0f541e203..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/sharpen_filter.py +++ /dev/null @@ -1,21 +0,0 @@ -# Sharpen Filter Example -# -# This example shows off using the laplacian filter to sharpen images. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # Run the kernel on every pixel of the image. - img.laplacian(1, sharpen=True) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/ulab.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/ulab.py deleted file mode 100644 index 8af9d1a38..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/ulab.py +++ /dev/null @@ -1,18 +0,0 @@ -# Ulab is a numpy-like module for micropython, meant to simplify and speed up common -# mathematical operations on arrays. This basic example shows mean/std on an image. -# -# NOTE: ndarrays cause the heap to be fragmented easily. If you run out of memory, -# there's not much that can be done about it, lowering the resolution might help. - -import sensor, image, time, ulab as np -from ulab import numerical - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QQVGA) # Set frame size to QVGA (320x240) -clock = time.clock() # Create a clock object to track the FPS. - -while (True): - img = sensor.snapshot() # Take a picture and return the image. - a = np.array(img, dtype=np.uint8) - print("mean: %d std:%d"%(numerical.mean(a), numerical.std(a))) diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/unsharp_filter.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/unsharp_filter.py deleted file mode 100644 index eb8eb2270..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/unsharp_filter.py +++ /dev/null @@ -1,21 +0,0 @@ -# Unsharp Filter Example -# -# This example shows off using the guassian filter to unsharp mask filter images. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # Run the kernel on every pixel of the image. - img.gaussian(1, unsharp=True) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/vflip_hmirror_transpose.py b/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/vflip_hmirror_transpose.py deleted file mode 100644 index 2953eb254..000000000 --- a/scripts/examples/Arduino/Portenta-H7/04-Image-Filters/vflip_hmirror_transpose.py +++ /dev/null @@ -1,33 +0,0 @@ -# Vertical Flip - Horizontal Mirror - Transpose -# -# This example shows off how to vertically flip, horizontally mirror, or -# transpose an image. Note that: -# -# vflip=False, hmirror=False, transpose=False -> 0 degree rotation -# vflip=True, hmirror=False, transpose=True -> 90 degree rotation -# vflip=True, hmirror=True, transpose=False -> 180 degree rotation -# vflip=False, hmirror=True, transpose=True -> 270 degree rotation - -import sensor, image, time, pyb - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -mills = pyb.millis() -counter = 0 - -while(True): - clock.tick() - - img = sensor.snapshot().replace(vflip=(counter//2)%2, - hmirror=(counter//4)%2, - transpose=(counter//8)%2) - - if (pyb.millis() > (mills + 1000)): - mills = pyb.millis() - counter += 1 - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/05-Snapshot/emboss_snapshot.py b/scripts/examples/Arduino/Portenta-H7/05-Snapshot/emboss_snapshot.py deleted file mode 100644 index 46e5c3b10..000000000 --- a/scripts/examples/Arduino/Portenta-H7/05-Snapshot/emboss_snapshot.py +++ /dev/null @@ -1,33 +0,0 @@ -# Emboss Snapshot Example -# -# Note: You will need an SD card to run this example. -# -# You can use your OpenMV Cam to save modified image files. - -import sensor, image, pyb - -RED_LED_PIN = 1 -BLUE_LED_PIN = 3 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. - -pyb.LED(RED_LED_PIN).on() -sensor.skip_frames(time = 2000) # Give the user time to get ready. - -pyb.LED(RED_LED_PIN).off() -pyb.LED(BLUE_LED_PIN).on() - -print("You're on camera!") -img = sensor.snapshot() - -img.morph(1, [+2, +1, +0,\ - +1, +1, -1,\ - +0, -1, -2]) # Emboss the image. - -img.save("example.jpg") # or "example.bmp" (or others) - -pyb.LED(BLUE_LED_PIN).off() -print("Done! Reset the camera to see the saved image.") diff --git a/scripts/examples/Arduino/Portenta-H7/05-Snapshot/snapshot.py b/scripts/examples/Arduino/Portenta-H7/05-Snapshot/snapshot.py deleted file mode 100644 index ce5a5fcec..000000000 --- a/scripts/examples/Arduino/Portenta-H7/05-Snapshot/snapshot.py +++ /dev/null @@ -1,27 +0,0 @@ -# Snapshot Example -# -# Note: You will need an SD card to run this example. -# -# You can use your OpenMV Cam to save image files. - -import sensor, image, pyb - -RED_LED_PIN = 1 -BLUE_LED_PIN = 3 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. - -pyb.LED(RED_LED_PIN).on() -sensor.skip_frames(time = 2000) # Give the user time to get ready. - -pyb.LED(RED_LED_PIN).off() -pyb.LED(BLUE_LED_PIN).on() - -print("You're on camera!") -sensor.snapshot().save("example.jpg") # or "example.bmp" (or others) - -pyb.LED(BLUE_LED_PIN).off() -print("Done! Reset the camera to see the saved image.") diff --git a/scripts/examples/Arduino/Portenta-H7/05-Snapshot/snapshot_on_face_detection.py b/scripts/examples/Arduino/Portenta-H7/05-Snapshot/snapshot_on_face_detection.py deleted file mode 100644 index a716df263..000000000 --- a/scripts/examples/Arduino/Portenta-H7/05-Snapshot/snapshot_on_face_detection.py +++ /dev/null @@ -1,51 +0,0 @@ -# Snapshot on Face Detection Example -# -# Note: You will need an SD card to run this example. -# -# This example demonstrates using face tracking on your OpenMV Cam to take a -# picture. - -import sensor, image, pyb - -RED_LED_PIN = 1 -BLUE_LED_PIN = 3 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.HQVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. - -# Load up a face detection HaarCascade. This is object that your OpenMV Cam -# can use to detect faces using the find_features() method below. Your OpenMV -# Cam has fontalface HaarCascade built-in. By default, all the stages of the -# HaarCascade are loaded. However, You can adjust the number of stages to speed -# up processing at the expense of accuracy. The frontalface HaarCascade has 25 -# stages. -face_cascade = image.HaarCascade("frontalface", stages=25) - -while(True): - - pyb.LED(RED_LED_PIN).on() - print("About to start detecting faces...") - sensor.skip_frames(time = 2000) # Give the user time to get ready. - - pyb.LED(RED_LED_PIN).off() - print("Now detecting faces!") - pyb.LED(BLUE_LED_PIN).on() - - diff = 10 # We'll say we detected a face after 10 frames. - while(diff): - img = sensor.snapshot() - # Threshold can be between 0.0 and 1.0. A higher threshold results in a - # higher detection rate with more false positives. The scale value - # controls the matching scale allowing you to detect smaller faces. - faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5) - - if faces: - diff -= 1 - for r in faces: - img.draw_rectangle(r) - - pyb.LED(BLUE_LED_PIN).off() - print("Face detected! Saving image...") - sensor.snapshot().save("snapshot-%d.jpg" % pyb.rng()) # Save Pic. diff --git a/scripts/examples/Arduino/Portenta-H7/05-Snapshot/snapshot_on_movement.py b/scripts/examples/Arduino/Portenta-H7/05-Snapshot/snapshot_on_movement.py deleted file mode 100644 index a834f0d4f..000000000 --- a/scripts/examples/Arduino/Portenta-H7/05-Snapshot/snapshot_on_movement.py +++ /dev/null @@ -1,44 +0,0 @@ -# Snapshot on Movement Example -# -# Note: You will need an SD card to run this example. -# -# This example demonstrates using frame differencing with your OpenMV Cam to do -# motion detection. After motion is detected your OpenMV Cam will take picture. - -import sensor, image, pyb, os - -RED_LED_PIN = 1 -BLUE_LED_PIN = 3 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. - -if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory - -while(True): - - pyb.LED(RED_LED_PIN).on() - print("About to save background image...") - sensor.skip_frames(time = 2000) # Give the user time to get ready. - - pyb.LED(RED_LED_PIN).off() - sensor.snapshot().save("temp/bg.bmp") - print("Saved background image - Now detecting motion!") - pyb.LED(BLUE_LED_PIN).on() - - diff = 10 # We'll say we detected motion after 10 frames of motion. - while(diff): - img = sensor.snapshot() - img.difference("temp/bg.bmp") - stats = img.statistics() - # Stats 5 is the max of the lighting color channel. The below code - # triggers when the lighting max for the whole image goes above 20. - # The lighting difference maximum should be zero normally. - if (stats[5] > 20): - diff -= 1 - - pyb.LED(BLUE_LED_PIN).off() - print("Movement detected! Saving image...") - sensor.snapshot().save("temp/snapshot-%d.jpg" % pyb.rng()) # Save Pic. diff --git a/scripts/examples/Arduino/Portenta-H7/05-Snapshot/time_lapse_photos.py b/scripts/examples/Arduino/Portenta-H7/05-Snapshot/time_lapse_photos.py deleted file mode 100644 index be6c370dd..000000000 --- a/scripts/examples/Arduino/Portenta-H7/05-Snapshot/time_lapse_photos.py +++ /dev/null @@ -1,67 +0,0 @@ -# Time Lapse Photos (Credit nedhorning) -# -# This example shows off how to take time lapse photos using your OpenMV -# Cam and using the RTC module along with a timer interrupt to achieve -# very low power operation. -# -# Note that if the USB is still plugged in when the camera is taking -# pictures it will run the bootloader each time. Please power the camera -# from something other than USB to not have the bootloader run. - -import pyb, machine, sensor, image, pyb, os - -# Create and init RTC object. This will allow us to set the current time for -# the RTC and let us set an interrupt to wake up later on. -rtc = pyb.RTC() -newFile = False - -try: - os.stat('time.txt') -except OSError: # If the log file doesn't exist then set the RTC and set newFile to True - # datetime format: year, month, day, weekday (Monday=1, Sunday=7), - # hours (24 hour clock), minutes, seconds, subseconds (counds down from 255 to 0) - rtc.datetime((2018, 3, 9, 5, 13, 0, 0, 0)) - newFile = True - -# Extract the date and time from the RTC object. -dateTime = rtc.datetime() -year = str(dateTime[0]) -month = '%02d' % dateTime[1] -day = '%02d' % dateTime[2] -hour = '%02d' % dateTime[4] -minute = '%02d' % dateTime[5] -second = '%02d' % dateTime[6] -subSecond = str(dateTime[7]) - -newName='I'+year+month+day+hour+minute+second # Image file name based on RTC - -# Enable RTC interrupts every 10 seconds, camera will RESET after wakeup from deepsleep Mode. -rtc.wakeup(10000) - -BLUE_LED_PIN = 3 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.VGA) -sensor.skip_frames(time = 1000) # Let new settings take affect. - -# Let folks know we are about to take a picture. -pyb.LED(BLUE_LED_PIN).on() - -if(newFile): # If log file does not exist then create it. - with open('time.txt', 'a') as timeFile: # Write text file to keep track of date, time and image number. - timeFile.write('Date and time format: year, month, day, hours, minutes, seconds, subseconds' + '\n') - timeFile.write(newName + ',' + year + ',' + month + ',' + day + ',' + hour + ',' + minute + ',' + second + ',' + subSecond + '\n') -else: - with open('time.txt', 'a') as timeFile: # Append to date, time and image number to text file. - timeFile.write(newName + ',' + year + ',' + month + ',' + day + ',' + hour + ',' + minute + ',' + second + ',' + subSecond + '\n') - -if not "images" in os.listdir(): os.mkdir("images") # Make a temp directory - -# Take photo and save to SD card -img = sensor.snapshot() -img.save('images/' + newName, quality=90) -pyb.LED(BLUE_LED_PIN).off() - -# Enter Deepsleep Mode (i.e. the OpenMV Cam effectively turns itself off except for the RTC). -machine.deepsleep() diff --git a/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/gif.py b/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/gif.py deleted file mode 100644 index f96c719cb..000000000 --- a/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/gif.py +++ /dev/null @@ -1,37 +0,0 @@ -# GIF Video Recording Example -# -# Note: You will need an SD card to run this example. -# -# You can use your OpenMV Cam to record gif files. You can either feed the -# recorder object RGB565 frames or Grayscale frames. Use photo editing software -# like GIMP to compress and optimize the Gif before uploading it to the web. - -import sensor, image, time, gif, pyb - -RED_LED_PIN = 1 -BLUE_LED_PIN = 3 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -pyb.LED(RED_LED_PIN).on() -sensor.skip_frames(time = 2000) # Give the user time to get ready. - -pyb.LED(RED_LED_PIN).off() -pyb.LED(BLUE_LED_PIN).on() - -g = gif.Gif("example.gif", loop=True) - -print("You're on camera!") -for i in range(100): - clock.tick() - # clock.avg() returns the milliseconds between frames - gif delay is in - g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds. - print(clock.fps()) - -g.close() -pyb.LED(BLUE_LED_PIN).off() -print("Done! Reset the camera to see the saved recording.") diff --git a/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/gif_on_face_detection.py b/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/gif_on_face_detection.py deleted file mode 100644 index 0732ca1c8..000000000 --- a/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/gif_on_face_detection.py +++ /dev/null @@ -1,65 +0,0 @@ -# GIF Video Recording on Face Detection Example -# -# Note: You will need an SD card to run this example. -# -# You can use your OpenMV Cam to record gif files. You can either feed the -# recorder object RGB565 frames or Grayscale frames. Use photo editing software -# like GIMP to compress and optimize the Gif before uploading it to the web. -# -# This example demonstrates using face tracking on your OpenMV Cam to take a -# gif. - -import sensor, image, time, gif, pyb - -RED_LED_PIN = 1 -BLUE_LED_PIN = 3 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor. -sensor.set_framesize(sensor.QQVGA) # or sensor.HQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. - -# Load up a face detection HaarCascade. This is object that your OpenMV Cam -# can use to detect faces using the find_features() method below. Your OpenMV -# Cam has fontalface HaarCascade built-in. By default, all the stages of the -# HaarCascade are loaded. However, You can adjust the number of stages to speed -# up processing at the expense of accuracy. The frontalface HaarCascade has 25 -# stages. -face_cascade = image.HaarCascade("frontalface", stages=25) - -while(True): - - pyb.LED(RED_LED_PIN).on() - print("About to start detecting faces...") - sensor.skip_frames(time = 2000) # Give the user time to get ready. - - pyb.LED(RED_LED_PIN).off() - print("Now detecting faces!") - pyb.LED(BLUE_LED_PIN).on() - - diff = 10 # We'll say we detected a face after 10 frames. - while(diff): - img = sensor.snapshot() - # Threshold can be between 0.0 and 1.0. A higher threshold results in a - # higher detection rate with more false positives. The scale value - # controls the matching scale allowing you to detect smaller faces. - faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5) - - if faces: - diff -= 1 - for r in faces: - img.draw_rectangle(r) - - g = gif.Gif("example-%d.gif" % pyb.rng(), loop=True) - - clock = time.clock() # Tracks FPS. - print("You're on camera!") - for i in range(100): - clock.tick() - # clock.avg() returns the milliseconds between frames - gif delay is in - g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds. - print(clock.fps()) - - g.close() - pyb.LED(BLUE_LED_PIN).off() - print("Restarting...") diff --git a/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/gif_on_movement.py b/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/gif_on_movement.py deleted file mode 100644 index 4f52d6c32..000000000 --- a/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/gif_on_movement.py +++ /dev/null @@ -1,58 +0,0 @@ -# GIF Video Recording on Movement Example -# -# Note: You will need an SD card to run this example. -# -# You can use your OpenMV Cam to record gif files. You can either feed the -# recorder object RGB565 frames or Grayscale frames. Use photo editing software -# like GIMP to compress and optimize the Gif before uploading it to the web. -# -# This example demonstrates using frame differencing with your OpenMV Cam to do -# motion detection. After motion is detected your OpenMV Cam will take video. - -import sensor, image, time, gif, pyb, os - -RED_LED_PIN = 1 -BLUE_LED_PIN = 3 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. - -if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory - -while(True): - - pyb.LED(RED_LED_PIN).on() - print("About to save background image...") - sensor.skip_frames(time = 2000) # Give the user time to get ready. - - pyb.LED(RED_LED_PIN).off() - sensor.snapshot().save("temp/bg.bmp") - print("Saved background image - Now detecting motion!") - pyb.LED(BLUE_LED_PIN).on() - - diff = 10 # We'll say we detected motion after 10 frames of motion. - while(diff): - img = sensor.snapshot() - img.difference("temp/bg.bmp") - stats = img.statistics() - # Stats 5 is the max of the lighting color channel. The below code - # triggers when the lighting max for the whole image goes above 20. - # The lighting difference maximum should be zero normally. - if (stats[5] > 20): - diff -= 1 - - g = gif.Gif("example-%d.gif" % pyb.rng(), loop=True) - - clock = time.clock() # Tracks FPS. - print("You're on camera!") - for i in range(100): - clock.tick() - # clock.avg() returns the milliseconds between frames - gif delay is in - g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds. - print(clock.fps()) - - g.close() - pyb.LED(BLUE_LED_PIN).off() - print("Restarting...") diff --git a/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/imageio_memory.py b/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/imageio_memory.py deleted file mode 100644 index c64349b7b..000000000 --- a/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/imageio_memory.py +++ /dev/null @@ -1,33 +0,0 @@ -# Image Memory Stream I/O Example -# -# This example shows how to use the ImageIO stream to record frames in memory and play them back. -# Note: While this should work on any board, the board should have an SDRAM to be of any use. -import sensor, image, time - -# Number of frames to pre-allocate and record -N_FRAMES = 500 - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) - -# This frame size must match the image size passed to ImageIO -sensor.set_windowing((120, 120)) -sensor.skip_frames(time = 2000) - -clock = time.clock() - -# Write to memory stream -stream = image.ImageIO((120, 120, sensor.GRAYSCALE), N_FRAMES) - -for i in range(0, N_FRAMES): - clock.tick() - stream.write(sensor.snapshot()) - print(clock.fps()) - -while (True): - # Rewind stream and play back - stream.seek(0) - for i in range(0, N_FRAMES): - img = stream.read(copy_to_fb=True, pause=True)) - # Do machine vision algorithms on the image here. diff --git a/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/imageio_read.py b/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/imageio_read.py deleted file mode 100644 index 639ee86fd..000000000 --- a/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/imageio_read.py +++ /dev/null @@ -1,32 +0,0 @@ -# Image Reader Example -# -# USE THIS EXAMPLE WITH A USD CARD! -# -# This example shows how to use the Image Reader object to replay snapshots of what your -# OpenMV Cam saw saved by the Image Writer object for testing machine vision algorithms. - -# Altered to allow full speed reading from SD card for extraction of sequences to the network etc. -# Set the new pause parameter to false - -import sensor, image, time - -snapshot_source = False # Set to true once finished to pull data from sensor. - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -stream = None -if snapshot_source == False: - stream = image.ImageIO("/stream.bin", "r") - -while(True): - clock.tick() - if snapshot_source: - img = sensor.snapshot() - else: - img = stream.read(copy_to_fb=True, loop=True, pause=True) - # Do machine vision algorithms on the image here. - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/imageio_write.py b/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/imageio_write.py deleted file mode 100644 index 2922f345f..000000000 --- a/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/imageio_write.py +++ /dev/null @@ -1,36 +0,0 @@ -# Image Writer Example -# -# USE THIS EXAMPLE WITH A USD CARD! Reset the camera after recording to see the file. -# -# This example shows how to use the Image Writer object to record snapshots of what your -# OpenMV Cam sees for later analysis using the Image Reader object. Images written to disk -# by the Image Writer object are stored in a simple file format readable by your OpenMV Cam. - -import sensor, image, pyb, time - -record_time = 10000 # 10 seconds in milliseconds - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -stream = image.ImageIO("/stream.bin", "w") - -# Red LED on means we are capturing frames. -pyb.LED(1).on() - -start = pyb.millis() -while pyb.elapsed_millis(start) < record_time: - clock.tick() - img = sensor.snapshot() - # Modify the image if you feel like here... - stream.write(img) - print(clock.fps()) - -stream.close() - -# Blue LED on means we are done. -pyb.LED(1).off() -pyb.LED(3).on() diff --git a/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/mjpeg.py b/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/mjpeg.py deleted file mode 100644 index f961beca6..000000000 --- a/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/mjpeg.py +++ /dev/null @@ -1,37 +0,0 @@ -# MJPEG Video Recording Example -# -# Note: You will need an SD card to run this demo. -# -# You can use your OpenMV Cam to record mjpeg files. You can either feed the -# recorder object JPEG frames or RGB565/Grayscale frames. Once you've finished -# recording a Mjpeg file you can use VLC to play it. If you are on Ubuntu then -# the built-in video player will work too. - -import sensor, image, time, mjpeg, pyb - -RED_LED_PIN = 1 -BLUE_LED_PIN = 3 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -pyb.LED(RED_LED_PIN).on() -sensor.skip_frames(time = 2000) # Give the user time to get ready. - -pyb.LED(RED_LED_PIN).off() -pyb.LED(BLUE_LED_PIN).on() - -m = mjpeg.Mjpeg("example.mjpeg") - -print("You're on camera!") -for i in range(200): - clock.tick() - m.add_frame(sensor.snapshot()) - print(clock.fps()) - -m.close(clock.fps()) -pyb.LED(BLUE_LED_PIN).off() -print("Done! Reset the camera to see the saved recording.") diff --git a/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/mjpeg_on_face_detection.py b/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/mjpeg_on_face_detection.py deleted file mode 100644 index fd567a4f6..000000000 --- a/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/mjpeg_on_face_detection.py +++ /dev/null @@ -1,65 +0,0 @@ -# MJPEG Video Recording on Face Detection Example -# -# Note: You will need an SD card to run this example. -# -# You can use your OpenMV Cam to record mjpeg files. You can either feed the -# recorder object JPEG frames or RGB565/Grayscale frames. Once you've finished -# recording a Mjpeg file you can use VLC to play it. If you are on Ubuntu then -# the built-in video player will work too. -# -# This example demonstrates using face tracking on your OpenMV Cam to take a -# mjpeg. - -import sensor, image, time, mjpeg, pyb - -RED_LED_PIN = 1 -BLUE_LED_PIN = 3 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor. -sensor.set_framesize(sensor.QQVGA) # or sensor.HQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. - -# Load up a face detection HaarCascade. This is object that your OpenMV Cam -# can use to detect faces using the find_features() method below. Your OpenMV -# Cam has fontalface HaarCascade built-in. By default, all the stages of the -# HaarCascade are loaded. However, You can adjust the number of stages to speed -# up processing at the expense of accuracy. The frontalface HaarCascade has 25 -# stages. -face_cascade = image.HaarCascade("frontalface", stages=25) - -while(True): - - pyb.LED(RED_LED_PIN).on() - print("About to start detecting faces...") - sensor.skip_frames(time = 2000) # Give the user time to get ready. - - pyb.LED(RED_LED_PIN).off() - print("Now detecting faces!") - pyb.LED(BLUE_LED_PIN).on() - - diff = 10 # We'll say we detected a face after 10 frames. - while(diff): - img = sensor.snapshot() - # Threshold can be between 0.0 and 1.0. A higher threshold results in a - # higher detection rate with more false positives. The scale value - # controls the matching scale allowing you to detect smaller faces. - faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5) - - if faces: - diff -= 1 - for r in faces: - img.draw_rectangle(r) - - m = mjpeg.Mjpeg("example-%d.mjpeg" % pyb.rng()) - - clock = time.clock() # Tracks FPS. - print("You're on camera!") - for i in range(200): - clock.tick() - m.add_frame(sensor.snapshot()) - print(clock.fps()) - - m.close(clock.fps()) - pyb.LED(BLUE_LED_PIN).off() - print("Restarting...") diff --git a/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/mjpeg_on_movement.py b/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/mjpeg_on_movement.py deleted file mode 100644 index cb6003da1..000000000 --- a/scripts/examples/Arduino/Portenta-H7/06-Video-Recording/mjpeg_on_movement.py +++ /dev/null @@ -1,58 +0,0 @@ -# MJPEG Video Recording on Movement Example -# -# Note: You will need an SD card to run this example. -# -# You can use your OpenMV Cam to record mjpeg files. You can either feed the -# recorder object JPEG frames or RGB565/Grayscale frames. Once you've finished -# recording a Mjpeg file you can use VLC to play it. If you are on Ubuntu then -# the built-in video player will work too. -# -# This example demonstrates using frame differencing with your OpenMV Cam to do -# motion detection. After motion is detected your OpenMV Cam will take video. - -import sensor, image, time, mjpeg, pyb, os - -RED_LED_PIN = 1 -BLUE_LED_PIN = 3 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. - -if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory - -while(True): - - pyb.LED(RED_LED_PIN).on() - print("About to save background image...") - sensor.skip_frames(time = 2000) # Give the user time to get ready. - - pyb.LED(RED_LED_PIN).off() - sensor.snapshot().save("temp/bg.bmp") - print("Saved background image - Now detecting motion!") - pyb.LED(BLUE_LED_PIN).on() - - diff = 10 # We'll say we detected motion after 10 frames of motion. - while(diff): - img = sensor.snapshot() - img.difference("temp/bg.bmp") - stats = img.statistics() - # Stats 5 is the max of the lighting color channel. The below code - # triggers when the lighting max for the whole image goes above 20. - # The lighting difference maximum should be zero normally. - if (stats[5] > 20): - diff -= 1 - - m = mjpeg.Mjpeg("example-%d.mjpeg" % pyb.rng()) - - clock = time.clock() # Tracks FPS. - print("You're on camera!") - for i in range(200): - clock.tick() - m.add_frame(sensor.snapshot()) - print(clock.fps()) - - m.close(clock.fps()) - pyb.LED(BLUE_LED_PIN).off() - print("Restarting...") diff --git a/scripts/examples/Arduino/Portenta-H7/07-Face-Detection/face_recognition.py b/scripts/examples/Arduino/Portenta-H7/07-Face-Detection/face_recognition.py deleted file mode 100644 index 8a514664a..000000000 --- a/scripts/examples/Arduino/Portenta-H7/07-Face-Detection/face_recognition.py +++ /dev/null @@ -1,27 +0,0 @@ -# Face recognition with LBP descriptors. -# See Timo Ahonen's "Face Recognition with Local Binary Patterns". -# -# Before running the example: -# 1) Download the AT&T faces database http://www.cl.cam.ac.uk/Research/DTG/attarchive/pub/data/att_faces.zip -# 2) Exract and copy the orl_faces directory to the SD card root. -# -# NOTE: This is just a PoC implementation of the paper mentioned above, it does Not work well in real life conditions. - -import sensor, time, image - -SUB = "s2" -NUM_SUBJECTS = 5 -NUM_SUBJECTS_IMGS = 10 - -img = image.Image("orl_faces/%s/1.pgm"%(SUB)).mask_ellipse() -d0 = img.find_lbp((0, 0, img.width(), img.height())) -img = None - -print("") -for s in range(1, NUM_SUBJECTS+1): - dist = 0 - for i in range(2, NUM_SUBJECTS_IMGS+1): - img = image.Image("orl_faces/s%d/%d.pgm"%(s, i)).mask_ellipse() - d1 = img.find_lbp((0, 0, img.width(), img.height())) - dist += image.match_descriptor(d0, d1) - print("Average dist for subject %d: %d"%(s, dist/NUM_SUBJECTS_IMGS)) diff --git a/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/find_circles.py b/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/find_circles.py deleted file mode 100644 index 50fca976c..000000000 --- a/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/find_circles.py +++ /dev/null @@ -1,39 +0,0 @@ -# Find Circles Example -# -# This example shows off how to find circles in the image using the Hough -# Transform. https://en.wikipedia.org/wiki/Circle_Hough_Transform -# -# Note that the find_circles() method will only find circles which are completely -# inside of the image. Circles which go outside of the image/roi are ignored... - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) # grayscale is faster -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - img = sensor.snapshot().lens_corr(1.8) - - # Circle objects have four values: x, y, r (radius), and magnitude. The - # magnitude is the strength of the detection of the circle. Higher is - # better... - - # `threshold` controls how many circles are found. Increase its value - # to decrease the number of circles detected... - - # `x_margin`, `y_margin`, and `r_margin` control the merging of similar - # circles in the x, y, and r (radius) directions. - - # r_min, r_max, and r_step control what radiuses of circles are tested. - # Shrinking the number of tested circle radiuses yields a big performance boost. - - for c in img.find_circles(threshold = 2000, x_margin = 10, y_margin = 10, r_margin = 10, - r_min = 2, r_max = 100, r_step = 2): - img.draw_circle(c.x(), c.y(), c.r(), color = (255, 0, 0)) - print(c) - - print("FPS %f" % clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/find_line_segments.py b/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/find_line_segments.py deleted file mode 100644 index 836ab5bd5..000000000 --- a/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/find_line_segments.py +++ /dev/null @@ -1,39 +0,0 @@ -# Find Line Segments Example -# -# This example shows off how to find line segments in the image. For each line object -# found in the image a line object is returned which includes the line's rotation. - -# find_line_segments() finds finite length lines (but is slow). -# Use find_line_segments() to find non-infinite lines (and is fast). - -enable_lens_corr = False # turn on for straighter lines... - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) # grayscale is faster -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -# All lines also have `x1()`, `y1()`, `x2()`, and `y2()` methods to get their end-points -# and a `line()` method to get all the above as one 4 value tuple for `draw_line()`. - -while(True): - clock.tick() - img = sensor.snapshot() - if enable_lens_corr: img.lens_corr(1.8) # for 2.8mm lens... - - # `merge_distance` controls the merging of nearby lines. At 0 (the default), no - # merging is done. At 1, any line 1 pixel away from another is merged... and so - # on as you increase this value. You may wish to merge lines as line segment - # detection produces a lot of line segment results. - - # `max_theta_diff` controls the maximum amount of rotation difference between - # any two lines about to be merged. The default setting allows for 15 degrees. - - for l in img.find_line_segments(merge_distance = 0, max_theta_diff = 5): - img.draw_line(l.line(), color = (255, 0, 0)) - # print(l) - - print("FPS %f" % clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/find_lines.py b/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/find_lines.py deleted file mode 100644 index 065e62a06..000000000 --- a/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/find_lines.py +++ /dev/null @@ -1,57 +0,0 @@ -# Find Lines Example -# -# This example shows off how to find lines in the image. For each line object -# found in the image a line object is returned which includes the line's rotation. - -# Note: Line detection is done by using the Hough Transform: -# http://en.wikipedia.org/wiki/Hough_transform -# Please read about it above for more information on what `theta` and `rho` are. - -# find_lines() finds infinite length lines. Use find_line_segments() to find non-infinite lines. - -enable_lens_corr = False # turn on for straighter lines... - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) # grayscale is faster -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -# All line objects have a `theta()` method to get their rotation angle in degrees. -# You can filter lines based on their rotation angle. - -min_degree = 0 -max_degree = 179 - -# All lines also have `x1()`, `y1()`, `x2()`, and `y2()` methods to get their end-points -# and a `line()` method to get all the above as one 4 value tuple for `draw_line()`. - -while(True): - clock.tick() - img = sensor.snapshot() - if enable_lens_corr: img.lens_corr(1.8) # for 2.8mm lens... - - # `threshold` controls how many lines in the image are found. Only lines with - # edge difference magnitude sums greater than `threshold` are detected... - - # More about `threshold` - each pixel in the image contributes a magnitude value - # to a line. The sum of all contributions is the magintude for that line. Then - # when lines are merged their magnitudes are added togheter. Note that `threshold` - # filters out lines with low magnitudes before merging. To see the magnitude of - # un-merged lines set `theta_margin` and `rho_margin` to 0... - - # `theta_margin` and `rho_margin` control merging similar lines. If two lines - # theta and rho value differences are less than the margins then they are merged. - - for l in img.find_lines(threshold = 1000, theta_margin = 25, rho_margin = 25): - if (min_degree <= l.theta()) and (l.theta() <= max_degree): - img.draw_line(l.line(), color = (255, 0, 0)) - # print(l) - - print("FPS %f" % clock.fps()) - -# About negative rho values: -# -# A [theta+0:-rho] tuple is the same as [theta+180:+rho]. diff --git a/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/find_rects.py b/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/find_rects.py deleted file mode 100644 index 9a9890809..000000000 --- a/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/find_rects.py +++ /dev/null @@ -1,31 +0,0 @@ -# Find Rects Example -# -# This example shows off how to find rectangles in the image using the quad threshold -# detection code from our April Tags code. The quad threshold detection algorithm -# detects rectangles in an extremely robust way and is much better than Hough -# Transform based methods. For example, it can still detect rectangles even when lens -# distortion causes those rectangles to look bent. Rounded rectangles are no problem! -# (But, given this the code will also detect small radius circles too)... - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) # grayscale is faster (160x120 max on OpenMV-M7) -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - img = sensor.snapshot() - - # `threshold` below should be set to a high enough value to filter out noise - # rectangles detected in the image which have low edge magnitudes. Rectangles - # have larger edge magnitudes the larger and more contrasty they are... - - for r in img.find_rects(threshold = 10000): - img.draw_rectangle(r.rect(), color = (255, 0, 0)) - for p in r.corners(): img.draw_circle(p[0], p[1], 5, color = (0, 255, 0)) - print(r) - - print("FPS %f" % clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/linear_regression_fast.py b/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/linear_regression_fast.py deleted file mode 100644 index f200e4ace..000000000 --- a/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/linear_regression_fast.py +++ /dev/null @@ -1,43 +0,0 @@ -# Fast Linear Regression Example -# -# This example shows off how to use the get_regression() method on your OpenMV Cam -# to get the linear regression of a ROI. Using this method you can easily build -# a robot which can track lines which all point in the same general direction -# but are not actually connected. Use find_blobs() on lines that are nicely -# connected for better filtering options and control. -# -# This is called the fast linear regression because we use the least-squares -# method to fit the line. However, this method is NOT GOOD FOR ANY images that -# have a lot (or really any) outlier points which corrupt the line fit... - -THRESHOLD = (0, 100) # Grayscale threshold for dark things... -BINARY_VISIBLE = True # Does binary first so you can see what the linear regression - # is being run on... might lower FPS though. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - img = sensor.snapshot().binary([THRESHOLD]) if BINARY_VISIBLE else sensor.snapshot() - - # Returns a line object similar to line objects returned by find_lines() and - # find_line_segments(). You have x1(), y1(), x2(), y2(), length(), - # theta() (rotation in degrees), rho(), and magnitude(). - # - # magnitude() represents how well the linear regression worked. It goes from - # (0, INF] where 0 is returned for a circle. The more linear the - # scene is the higher the magnitude. - line = img.get_regression([(255,255) if BINARY_VISIBLE else THRESHOLD]) - - if (line): img.draw_line(line.line(), color = 127) - print("FPS %f, mag = %s" % (clock.fps(), str(line.magnitude()) if (line) else "N/A")) - -# About negative rho values: -# -# A [theta+0:-rho] tuple is the same as [theta+180:+rho]. diff --git a/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/linear_regression_robust.py b/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/linear_regression_robust.py deleted file mode 100644 index 9f24c618d..000000000 --- a/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/linear_regression_robust.py +++ /dev/null @@ -1,45 +0,0 @@ -# Robust Linear Regression Example -# -# This example shows off how to use the get_regression() method on your OpenMV Cam -# to get the linear regression of a ROI. Using this method you can easily build -# a robot which can track lines which all point in the same general direction -# but are not actually connected. Use find_blobs() on lines that are nicely -# connected for better filtering options and control. -# -# We're using the robust=True argument for get_regression() in this script which -# computes the linear regression using a much more robust algorithm... but potentially -# much slower. The robust algorithm runs in O(N^2) time on the image. So, YOU NEED -# TO LIMIT THE NUMBER OF PIXELS the robust algorithm works on or it can actually -# take seconds for the algorithm to give you a result... THRESHOLD VERY CAREFULLY! - -THRESHOLD = (0, 100) # Grayscale threshold for dark things... -BINARY_VISIBLE = True # Does binary first so you can see what the linear regression - # is being run on... might lower FPS though. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QQQVGA) # 80x60 (4,800 pixels) - O(N^2) max = 2,3040,000. -sensor.skip_frames(time = 2000) # WARNING: If you use QQVGA it may take seconds -clock = time.clock() # to process a frame sometimes. - -while(True): - clock.tick() - img = sensor.snapshot().binary([THRESHOLD]) if BINARY_VISIBLE else sensor.snapshot() - - # Returns a line object similar to line objects returned by find_lines() and - # find_line_segments(). You have x1(), y1(), x2(), y2(), length(), - # theta() (rotation in degrees), rho(), and magnitude(). - # - # magnitude() represents how well the linear regression worked. It means something - # different for the robust linear regression. In general, the larger the value the - # better... - line = img.get_regression([(255,255) if BINARY_VISIBLE else THRESHOLD], robust = True) - - if (line): img.draw_line(line.line(), color = 127) - print("FPS %f, mag = %s" % (clock.fps(), str(line.magnitude()) if (line) else "N/A")) - -# About negative rho values: -# -# A [theta+0:-rho] tuple is the same as [theta+180:+rho]. diff --git a/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/selective_search.py b/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/selective_search.py deleted file mode 100644 index 3e3cca78c..000000000 --- a/scripts/examples/Arduino/Portenta-H7/09-Feature-Detection/selective_search.py +++ /dev/null @@ -1,22 +0,0 @@ -# Selective Search Example - -import sensor, image, time -from random import randint - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.skip_frames(time = 2000) # Wait for settings take effect. -sensor.set_auto_gain(False) -sensor.set_auto_exposure(False, exposure_us=10000) -clock = time.clock() # Create a clock object to track the FPS. - - -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - rois = img.selective_search(threshold = 200, size = 20, a1=0.5, a2=1.0, a3=1.0) - for r in rois: - img.draw_rectangle(r, color=(255, 0, 0)) - #img.draw_rectangle(r, color=(randint(100, 255), randint(100, 255), randint(100, 255))) - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/automatic_grayscale_color_tracking.py b/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/automatic_grayscale_color_tracking.py deleted file mode 100644 index d0251c212..000000000 --- a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/automatic_grayscale_color_tracking.py +++ /dev/null @@ -1,48 +0,0 @@ -# Automatic Grayscale Color Tracking Example -# -# This example shows off single color automatic grayscale color tracking using the OpenMV Cam. - -import sensor, image, time -print("Letting auto algorithms run. Don't put anything in front of the camera!") - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must be turned off for color tracking -clock = time.clock() - -# Capture the color thresholds for whatever was in the center of the image. -r = [(320//2)-(50//2), (240//2)-(50//2), 50, 50] # 50x50 center of QVGA. - -print("Auto algorithms done. Hold the object you want to track in front of the camera in the box.") -print("MAKE SURE THE COLOR OF THE OBJECT YOU WANT TO TRACK IS FULLY ENCLOSED BY THE BOX!") -for i in range(60): - img = sensor.snapshot() - img.draw_rectangle(r) - -print("Learning thresholds...") -threshold = [128, 128] # Middle grayscale values. -for i in range(60): - img = sensor.snapshot() - hist = img.get_histogram(roi=r) - lo = hist.get_percentile(0.01) # Get the CDF of the histogram at the 1% range (ADJUST AS NECESSARY)! - hi = hist.get_percentile(0.99) # Get the CDF of the histogram at the 99% range (ADJUST AS NECESSARY)! - # Average in percentile values. - threshold[0] = (threshold[0] + lo.value()) // 2 - threshold[1] = (threshold[1] + hi.value()) // 2 - for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): - img.draw_rectangle(blob.rect()) - img.draw_cross(blob.cx(), blob.cy()) - img.draw_rectangle(r) - -print("Thresholds learned...") -print("Tracking colors...") - -while(True): - clock.tick() - img = sensor.snapshot() - for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): - img.draw_rectangle(blob.rect()) - img.draw_cross(blob.cx(), blob.cy()) - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/automatic_rgb565_color_tracking.py b/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/automatic_rgb565_color_tracking.py deleted file mode 100644 index 2a03a38e2..000000000 --- a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/automatic_rgb565_color_tracking.py +++ /dev/null @@ -1,52 +0,0 @@ -# Automatic RGB565 Color Tracking Example -# -# This example shows off single color automatic RGB565 color tracking using the OpenMV Cam. - -import sensor, image, time -print("Letting auto algorithms run. Don't put anything in front of the camera!") - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must be turned off for color tracking -clock = time.clock() - -# Capture the color thresholds for whatever was in the center of the image. -r = [(320//2)-(50//2), (240//2)-(50//2), 50, 50] # 50x50 center of QVGA. - -print("Auto algorithms done. Hold the object you want to track in front of the camera in the box.") -print("MAKE SURE THE COLOR OF THE OBJECT YOU WANT TO TRACK IS FULLY ENCLOSED BY THE BOX!") -for i in range(60): - img = sensor.snapshot() - img.draw_rectangle(r) - -print("Learning thresholds...") -threshold = [50, 50, 0, 0, 0, 0] # Middle L, A, B values. -for i in range(60): - img = sensor.snapshot() - hist = img.get_histogram(roi=r) - lo = hist.get_percentile(0.01) # Get the CDF of the histogram at the 1% range (ADJUST AS NECESSARY)! - hi = hist.get_percentile(0.99) # Get the CDF of the histogram at the 99% range (ADJUST AS NECESSARY)! - # Average in percentile values. - threshold[0] = (threshold[0] + lo.l_value()) // 2 - threshold[1] = (threshold[1] + hi.l_value()) // 2 - threshold[2] = (threshold[2] + lo.a_value()) // 2 - threshold[3] = (threshold[3] + hi.a_value()) // 2 - threshold[4] = (threshold[4] + lo.b_value()) // 2 - threshold[5] = (threshold[5] + hi.b_value()) // 2 - for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): - img.draw_rectangle(blob.rect()) - img.draw_cross(blob.cx(), blob.cy()) - img.draw_rectangle(r) - -print("Thresholds learned...") -print("Tracking colors...") - -while(True): - clock.tick() - img = sensor.snapshot() - for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): - img.draw_rectangle(blob.rect()) - img.draw_cross(blob.cx(), blob.cy()) - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/black_grayscale_line_following.py b/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/black_grayscale_line_following.py deleted file mode 100644 index c1e83fb2b..000000000 --- a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/black_grayscale_line_following.py +++ /dev/null @@ -1,84 +0,0 @@ -# Black Grayscale Line Following Example -# -# Making a line following robot requires a lot of effort. This example script -# shows how to do the machine vision part of the line following robot. You -# can use the output from this script to drive a differential drive robot to -# follow a line. This script just generates a single turn value that tells -# your robot to go left or right. -# -# For this script to work properly you should point the camera at a line at a -# 45 or so degree angle. Please make sure that only the line is within the -# camera's field of view. - -import sensor, image, time, math - -# Tracks a black line. Use [(128, 255)] for a tracking a white line. -GRAYSCALE_THRESHOLD = [(0, 64)] - -# Each roi is (x, y, w, h). The line detection algorithm will try to find the -# centroid of the largest blob in each roi. The x position of the centroids -# will then be averaged with different weights where the most weight is assigned -# to the roi near the bottom of the image and less to the next roi and so on. -ROIS = [ # [ROI, weight] - (0, 100, 160, 20, 0.7), # You'll need to tweak the weights for your app - (0, 50, 160, 20, 0.3), # depending on how your robot is setup. - (0, 0, 160, 20, 0.1) - ] - -# Compute the weight divisor (we're computing this so you don't have to make weights add to 1). -weight_sum = 0 -for r in ROIS: weight_sum += r[4] # r[4] is the roi weight. - -# Camera setup... -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # use grayscale. -sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed. -sensor.skip_frames(time = 2000) # Let new settings take affect. -sensor.set_auto_gain(False) # must be turned off for color tracking -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - centroid_sum = 0 - - for r in ROIS: - blobs = img.find_blobs(GRAYSCALE_THRESHOLD, roi=r[0:4], merge=True) # r[0:4] is roi tuple. - - if blobs: - # Find the blob with the most pixels. - largest_blob = max(blobs, key=lambda b: b.pixels()) - - # Draw a rect around the blob. - img.draw_rectangle(largest_blob.rect()) - img.draw_cross(largest_blob.cx(), - largest_blob.cy()) - - centroid_sum += largest_blob.cx() * r[4] # r[4] is the roi weight. - - center_pos = (centroid_sum / weight_sum) # Determine center of line. - - # Convert the center_pos to a deflection angle. We're using a non-linear - # operation so that the response gets stronger the farther off the line we - # are. Non-linear operations are good to use on the output of algorithms - # like this to cause a response "trigger". - deflection_angle = 0 - - # The 80 is from half the X res, the 60 is from half the Y res. The - # equation below is just computing the angle of a triangle where the - # opposite side of the triangle is the deviation of the center position - # from the center and the adjacent side is half the Y res. This limits - # the angle output to around -45 to 45. (It's not quite -45 and 45). - deflection_angle = -math.atan((center_pos-80)/60) - - # Convert angle in radians to degrees. - deflection_angle = math.degrees(deflection_angle) - - # Now you have an angle telling you how much to turn the robot by which - # incorporates the part of the line nearest to the robot and parts of - # the line farther away from the robot for a better prediction. - print("Turn Angle: %f" % deflection_angle) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/image_histogram_info.py b/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/image_histogram_info.py deleted file mode 100644 index a97ac899b..000000000 --- a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/image_histogram_info.py +++ /dev/null @@ -1,24 +0,0 @@ -# Image Histogram Info Example -# -# This script computes the histogram of the image and prints it out. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) # or RGB565. -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must be turned off for color tracking -clock = time.clock() - -while(True): - clock.tick() - img = sensor.snapshot() - # Gets the grayscale histogram for the image into 8 bins. - # Bins defaults to 256 and may be between 2 and 256. - print(img.get_histogram(bins=8)) - print(clock.fps()) - -# You can also pass get_histogram() an "roi=" to get just the histogram of that area. -# get_histogram() allows you to quickly determine the color channel information of -# any any area in the image. diff --git a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/image_statistics_info.py b/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/image_statistics_info.py deleted file mode 100644 index c70042ee0..000000000 --- a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/image_statistics_info.py +++ /dev/null @@ -1,22 +0,0 @@ -# Image Statistics Info Example -# -# This script computes the statistics of the image and prints it out. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) # or RGB565. -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must be turned off for color tracking -clock = time.clock() - -while(True): - clock.tick() - img = sensor.snapshot() - print(img.get_statistics()) - print(clock.fps()) - -# You can also pass get_statistics() an "roi=" to get just the statistics of that area. -# get_statistics() allows you to quickly determine the color channel information of -# any any area in the image. diff --git a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/ir_beacon_grayscale_tracking.py b/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/ir_beacon_grayscale_tracking.py deleted file mode 100644 index 0d2184a6a..000000000 --- a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/ir_beacon_grayscale_tracking.py +++ /dev/null @@ -1,29 +0,0 @@ -# IR Beacon Grayscale Tracking Example -# -# This example shows off IR beacon Grayscale tracking using the OpenMV Cam. - -import sensor, image, time - -thresholds = (255, 255) # thresholds for bright white light from IR. - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.VGA) -sensor.set_windowing((240, 240)) # 240x240 center pixels of VGA -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must be turned off for color tracking -clock = time.clock() - -# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are -# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the -# camera resolution. "merge=True" merges all overlapping blobs in the image. - -while(True): - clock.tick() - img = sensor.snapshot() - for blob in img.find_blobs([thresholds], pixels_threshold=200, area_threshold=200, merge=True): - ratio = blob.w() / blob.h() - if (ratio >= 0.5) and (ratio <= 1.5): # filter out non-squarish blobs - img.draw_rectangle(blob.rect()) - img.draw_cross(blob.cx(), blob.cy()) - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/ir_beacon_rgb565_tracking.py b/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/ir_beacon_rgb565_tracking.py deleted file mode 100644 index da4366ca1..000000000 --- a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/ir_beacon_rgb565_tracking.py +++ /dev/null @@ -1,29 +0,0 @@ -# IR Beacon RGB565 Tracking Example -# -# This example shows off IR beacon RGB565 tracking using the OpenMV Cam. - -import sensor, image, time - -thresholds = (100, 100, 0, 0, 0, 0) # thresholds for bright white light from IR. - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.VGA) -sensor.set_windowing((240, 240)) # 240x240 center pixels of VGA -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must be turned off for color tracking -clock = time.clock() - -# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are -# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the -# camera resolution. "merge=True" merges all overlapping blobs in the image. - -while(True): - clock.tick() - img = sensor.snapshot() - for blob in img.find_blobs([thresholds], pixels_threshold=200, area_threshold=200, merge=True): - ratio = blob.w() / blob.h() - if (ratio >= 0.5) and (ratio <= 1.5): # filter out non-squarish blobs - img.draw_rectangle(blob.rect()) - img.draw_cross(blob.cx(), blob.cy()) - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/multi_color_blob_tracking.py b/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/multi_color_blob_tracking.py deleted file mode 100644 index c287a4a7a..000000000 --- a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/multi_color_blob_tracking.py +++ /dev/null @@ -1,40 +0,0 @@ -# Multi Color Blob Tracking Example -# -# This example shows off multi color blob tracking using the OpenMV Cam. - -import sensor, image, time, math - -# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) -# The below thresholds track in general red/green things. You may wish to tune them... -thresholds = [(30, 100, 15, 127, 15, 127), # generic_red_thresholds - (30, 100, -64, -8, -32, 32), # generic_green_thresholds - (0, 15, 0, 40, -80, -20)] # generic_blue_thresholds -# You may pass up to 16 thresholds above. However, it's not really possible to segment any -# scene with 16 thresholds before color thresholds start to overlap heavily. - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must be turned off for color tracking -clock = time.clock() - -# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are -# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the -# camera resolution. Don't set "merge=True" becuase that will merge blobs which we don't want here. - -while(True): - clock.tick() - img = sensor.snapshot() - for blob in img.find_blobs(thresholds, pixels_threshold=200, area_threshold=200): - # These values depend on the blob not being circular - otherwise they will be shaky. - if blob.elongation() > 0.5: - img.draw_edges(blob.min_corners(), color=(255,0,0)) - img.draw_line(blob.major_axis_line(), color=(0,255,0)) - img.draw_line(blob.minor_axis_line(), color=(0,0,255)) - # These values are stable all the time. - img.draw_rectangle(blob.rect()) - img.draw_cross(blob.cx(), blob.cy()) - # Note - the blob rotation is unique to 0-180 only. - img.draw_keypoints([(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], size=20) - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/multi_color_code_tracking.py b/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/multi_color_code_tracking.py deleted file mode 100644 index 1fec3a140..000000000 --- a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/multi_color_code_tracking.py +++ /dev/null @@ -1,48 +0,0 @@ -# Multi Color Code Tracking Example -# -# This example shows off multi color code tracking using the OpenMV Cam. -# -# A color code is a blob composed of two or more colors. The example below will -# only track colored objects which have two or more the colors below in them. - -import sensor, image, time - -# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) -# The below thresholds track in general red/green things. You may wish to tune them... -thresholds = [(30, 100, 15, 127, 15, 127), # generic_red_thresholds -> index is 0 so code == (1 << 0) - (30, 100, -64, -8, -32, 32), # generic_green_thresholds -> index is 1 so code == (1 << 1) - (0, 15, 0, 40, -80, -20)] # generic_blue_thresholds -> index is 2 so code == (1 << 2) -# Codes are or'ed together when "merge=True" for "find_blobs". - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must be turned off for color tracking -clock = time.clock() - -# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are -# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the -# camera resolution. "merge=True" must be set to merge overlapping color blobs for color codes. - -while(True): - clock.tick() - img = sensor.snapshot() - for blob in img.find_blobs(thresholds, pixels_threshold=100, area_threshold=100, merge=True): - if blob.code() == 3: # r/g code - img.draw_rectangle(blob.rect()) - img.draw_cross(blob.cx(), blob.cy()) - img.draw_string(blob.x() + 2, blob.y() + 2, "r/g") - if blob.code() == 5: # r/b code - img.draw_rectangle(blob.rect()) - img.draw_cross(blob.cx(), blob.cy()) - img.draw_string(blob.x() + 2, blob.y() + 2, "r/b") - if blob.code() == 6: # g/b code - img.draw_rectangle(blob.rect()) - img.draw_cross(blob.cx(), blob.cy()) - img.draw_string(blob.x() + 2, blob.y() + 2, "g/b") - if blob.code() == 7: # r/g/b code - img.draw_rectangle(blob.rect()) - img.draw_cross(blob.cx(), blob.cy()) - img.draw_string(blob.x() + 2, blob.y() + 2, "r/g/b") - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/single_color_code_tracking.py b/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/single_color_code_tracking.py deleted file mode 100644 index c95427545..000000000 --- a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/single_color_code_tracking.py +++ /dev/null @@ -1,42 +0,0 @@ -# Single Color Code Tracking Example -# -# This example shows off single color code tracking using the OpenMV Cam. -# -# A color code is a blob composed of two or more colors. The example below will -# only track colored objects which have both the colors below in them. - -import sensor, image, time, math - -# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) -# The below thresholds track in general red/green things. You may wish to tune them... -thresholds = [(30, 100, 15, 127, 15, 127), # generic_red_thresholds -> index is 0 so code == (1 << 0) - (30, 100, -64, -8, -32, 32)] # generic_green_thresholds -> index is 1 so code == (1 << 1) -# Codes are or'ed together when "merge=True" for "find_blobs". - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must be turned off for color tracking -clock = time.clock() - -# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are -# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the -# camera resolution. "merge=True" must be set to merge overlapping color blobs for color codes. - -while(True): - clock.tick() - img = sensor.snapshot() - for blob in img.find_blobs(thresholds, pixels_threshold=100, area_threshold=100, merge=True): - if blob.code() == 3: # r/g code == (1 << 1) | (1 << 0) - # These values depend on the blob not being circular - otherwise they will be shaky. - if blob.elongation() > 0.5: - img.draw_edges(blob.min_corners(), color=(255,0,0)) - img.draw_line(blob.major_axis_line(), color=(0,255,0)) - img.draw_line(blob.minor_axis_line(), color=(0,0,255)) - # These values are stable all the time. - img.draw_rectangle(blob.rect()) - img.draw_cross(blob.cx(), blob.cy()) - # Note - the blob rotation is unique to 0-180 only. - img.draw_keypoints([(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], size=20) - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/single_color_grayscale_blob_tracking.py b/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/single_color_grayscale_blob_tracking.py deleted file mode 100644 index 2f200275e..000000000 --- a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/single_color_grayscale_blob_tracking.py +++ /dev/null @@ -1,36 +0,0 @@ -# Single Color Grayscale Blob Tracking Example -# -# This example shows off single color grayscale tracking using the OpenMV Cam. - -import sensor, image, time, math - -# Color Tracking Thresholds (Grayscale Min, Grayscale Max) -# The below grayscale threshold is set to only find extremely bright white areas. -thresholds = (245, 255) - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.VGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must be turned off for color tracking -clock = time.clock() - -# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are -# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the -# camera resolution. "merge=True" merges all overlapping blobs in the image. - -while(True): - clock.tick() - img = sensor.snapshot() - for blob in img.find_blobs([thresholds], pixels_threshold=100, area_threshold=100, merge=True): - # These values depend on the blob not being circular - otherwise they will be shaky. - if blob.elongation() > 0.5: - img.draw_edges(blob.min_corners(), color=0) - img.draw_line(blob.major_axis_line(), color=0) - img.draw_line(blob.minor_axis_line(), color=0) - # These values are stable all the time. - img.draw_rectangle(blob.rect(), color=127) - img.draw_cross(blob.cx(), blob.cy(), color=127) - # Note - the blob rotation is unique to 0-180 only. - img.draw_keypoints([(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], size=40, color=127) - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/single_color_rgb565_blob_tracking.py b/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/single_color_rgb565_blob_tracking.py deleted file mode 100644 index 70b539c26..000000000 --- a/scripts/examples/Arduino/Portenta-H7/10-Color-Tracking/single_color_rgb565_blob_tracking.py +++ /dev/null @@ -1,40 +0,0 @@ -# Single Color RGB565 Blob Tracking Example -# -# This example shows off single color RGB565 tracking using the OpenMV Cam. - -import sensor, image, time, math - -threshold_index = 0 # 0 for red, 1 for green, 2 for blue - -# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) -# The below thresholds track in general red/green/blue things. You may wish to tune them... -thresholds = [(30, 100, 15, 127, 15, 127), # generic_red_thresholds - (30, 100, -64, -8, -32, 32), # generic_green_thresholds - (0, 30, 0, 64, -128, 0)] # generic_blue_thresholds - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must be turned off for color tracking -clock = time.clock() - -# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are -# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the -# camera resolution. "merge=True" merges all overlapping blobs in the image. - -while(True): - clock.tick() - img = sensor.snapshot() - for blob in img.find_blobs([thresholds[threshold_index]], pixels_threshold=200, area_threshold=200, merge=True): - # These values depend on the blob not being circular - otherwise they will be shaky. - if blob.elongation() > 0.5: - img.draw_edges(blob.min_corners(), color=(255,0,0)) - img.draw_line(blob.major_axis_line(), color=(0,255,0)) - img.draw_line(blob.minor_axis_line(), color=(0,0,255)) - # These values are stable all the time. - img.draw_rectangle(blob.rect()) - img.draw_cross(blob.cx(), blob.cy()) - # Note - the blob rotation is unique to 0-180 only. - img.draw_keypoints([(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], size=20) - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/16-Codes/find_barcodes.py b/scripts/examples/Arduino/Portenta-H7/16-Codes/find_barcodes.py deleted file mode 100644 index 5c6488ad1..000000000 --- a/scripts/examples/Arduino/Portenta-H7/16-Codes/find_barcodes.py +++ /dev/null @@ -1,64 +0,0 @@ -# Barcode Example -# -# This example shows off how easy it is to detect bar codes using the -# OpenMV Cam M7. Barcode detection does not work on the M4 Camera. - -import sensor, image, time, math - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.VGA) # High Res! -sensor.set_windowing((640, 80)) # V Res of 80 == less work (40 for 2X the speed). -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must turn this off to prevent image washout... -clock = time.clock() - -# Barcode detection can run at the full 640x480 resolution of your OpenMV Cam's -# OV7725 camera module. Barcode detection will also work in RGB565 mode but at -# a lower resolution. That said, barcode detection requires a higher resolution -# to work well so it should always be run at 640x480 in grayscale... - -def barcode_name(code): - if(code.type() == image.EAN2): - return "EAN2" - if(code.type() == image.EAN5): - return "EAN5" - if(code.type() == image.EAN8): - return "EAN8" - if(code.type() == image.UPCE): - return "UPCE" - if(code.type() == image.ISBN10): - return "ISBN10" - if(code.type() == image.UPCA): - return "UPCA" - if(code.type() == image.EAN13): - return "EAN13" - if(code.type() == image.ISBN13): - return "ISBN13" - if(code.type() == image.I25): - return "I25" - if(code.type() == image.DATABAR): - return "DATABAR" - if(code.type() == image.DATABAR_EXP): - return "DATABAR_EXP" - if(code.type() == image.CODABAR): - return "CODABAR" - if(code.type() == image.CODE39): - return "CODE39" - if(code.type() == image.PDF417): - return "PDF417" - if(code.type() == image.CODE93): - return "CODE93" - if(code.type() == image.CODE128): - return "CODE128" - -while(True): - clock.tick() - img = sensor.snapshot() - codes = img.find_barcodes() - for code in codes: - img.draw_rectangle(code.rect()) - print_args = (barcode_name(code), code.payload(), (180 * code.rotation()) / math.pi, code.quality(), clock.fps()) - print("Barcode %s, Payload \"%s\", rotation %f (degrees), quality %d, FPS %f" % print_args) - if not codes: - print("FPS %f" % clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/16-Codes/find_datamatrices.py b/scripts/examples/Arduino/Portenta-H7/16-Codes/find_datamatrices.py deleted file mode 100644 index 25ac857fa..000000000 --- a/scripts/examples/Arduino/Portenta-H7/16-Codes/find_datamatrices.py +++ /dev/null @@ -1,26 +0,0 @@ -# Find Data Matrices Example -# -# This example shows off how easy it is to detect data matrices using the -# OpenMV Cam M7. Data matrices detection does not work on the M4 Camera. - -import sensor, image, time, math - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must turn this off to prevent image washout... -clock = time.clock() - -while(True): - clock.tick() - img = sensor.snapshot() - img.lens_corr(1.8) # strength of 1.8 is good for the 2.8mm lens. - - matrices = img.find_datamatrices() - for matrix in matrices: - img.draw_rectangle(matrix.rect(), color = (255, 0, 0)) - print_args = (matrix.rows(), matrix.columns(), matrix.payload(), (180 * matrix.rotation()) / math.pi, clock.fps()) - print("Matrix [%d:%d], Payload \"%s\", rotation %f (degrees), FPS %f" % print_args) - if not matrices: - print("FPS %f" % clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/16-Codes/find_datamatrices_w_lens_zoom.py b/scripts/examples/Arduino/Portenta-H7/16-Codes/find_datamatrices_w_lens_zoom.py deleted file mode 100644 index c62ff0453..000000000 --- a/scripts/examples/Arduino/Portenta-H7/16-Codes/find_datamatrices_w_lens_zoom.py +++ /dev/null @@ -1,26 +0,0 @@ -# Find Data Matrices w/ Lens Zoom Example -# -# This example shows off how easy it is to detect data matrices using the -# OpenMV Cam M7. Data matrices detection does not work on the M4 Camera. - -import sensor, image, time, math - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.VGA) -sensor.set_windowing((320, 240)) # 2x Zoom -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must turn this off to prevent image washout... -clock = time.clock() - -while(True): - clock.tick() - img = sensor.snapshot() - - matrices = img.find_datamatrices() - for matrix in matrices: - img.draw_rectangle(matrix.rect(), color = (255, 0, 0)) - print_args = (matrix.rows(), matrix.columns(), matrix.payload(), (180 * matrix.rotation()) / math.pi, clock.fps()) - print("Matrix [%d:%d], Payload \"%s\", rotation %f (degrees), FPS %f" % print_args) - if not matrices: - print("FPS %f" % clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/16-Codes/qrcodes_with_lens_corr.py b/scripts/examples/Arduino/Portenta-H7/16-Codes/qrcodes_with_lens_corr.py deleted file mode 100644 index 9ca0691ad..000000000 --- a/scripts/examples/Arduino/Portenta-H7/16-Codes/qrcodes_with_lens_corr.py +++ /dev/null @@ -1,22 +0,0 @@ -# QRCode Example -# -# This example shows the power of the OpenMV Cam to detect QR Codes -# using lens correction (see the qrcodes_with_lens_corr.py script for higher performance). - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must turn this off to prevent image washout... -clock = time.clock() - -while(True): - clock.tick() - img = sensor.snapshot() - img.lens_corr(1.8) # strength of 1.8 is good for the 2.8mm lens. - for code in img.find_qrcodes(): - img.draw_rectangle(code.rect(), color = (255, 0, 0)) - print(code) - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/17-Pixy-Emulation/pixy_i2c_emulation.py b/scripts/examples/Arduino/Portenta-H7/17-Pixy-Emulation/pixy_i2c_emulation.py deleted file mode 100644 index 21d31fc87..000000000 --- a/scripts/examples/Arduino/Portenta-H7/17-Pixy-Emulation/pixy_i2c_emulation.py +++ /dev/null @@ -1,328 +0,0 @@ -# Pixy I2C Emulation Script -# -# This script allows your OpenMV Cam to emulate the Pixy (CMUcam5) in I2C mode. -# Note that you need to setup the lab color thresholds below for your application. -# -# P4 = SCL -# P5 = SDA -# -# P7 = Servo 1 -# P8 = Servo 2 - -# Pixy Parameters ############################################################ - -color_code_mode = 1 # 0 == Disabled, 1 == Enabled, 2 == Color Codes Only, 3 == Mixed - -max_blocks = 1000 -max_blocks_per_signature = 1000 -min_block_area = 20 - -i2c_address = 0x54 - -# Pan Servo -s0_lower_limit = 1000 # Servo pulse width lower limit in microseconds. -s0_upper_limit = 2000 # Servo pulse width upper limit in microseconds. - -# Tilt Servo -s1_lower_limit = 1000 # Servo pulse width lower limit in microseconds. -s1_upper_limit = 2000 # Servo pulse width upper limit in microseconds. - -analog_out_enable = False # P6 -> Analog Out (0v - 3.3v). -analog_out_mode = 0 # 0 == x position of largest blob - 1 == y position of largest blob - -# Parameter 0 - L Min. -# Parameter 1 - L Max. -# Parameter 2 - A Min. -# Parameter 3 - A Max. -# Parameter 4 - B Min. -# Parameter 5 - B Max. -# Parameter 6 - Is Color Code Threshold? (True/False). -# Parameter 7 - Enable Threshold? (True/False). -lab_color_thresholds = [(0, 100, 40, 127, -128, 127, True, True), # Generic Red Threshold - (0, 100, -128, -10, -128, 127, True, True), # Generic Green Threshold - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False)] - -fb_pixels_threshold = 500 # minimum number of pixels that must be in a blob -fb_merge_margin = 5 # how close pixel wise blobs can be before merging - -############################################################################## - -e_lab_color_thresholds = [] # enabled thresholds -e_lab_color_code = [] # enabled color code -e_lab_color_signatures = [] # original enabled threshold indexes -for i in range(len(lab_color_thresholds)): - if lab_color_thresholds[i][7]: - e_lab_color_thresholds.append(lab_color_thresholds[i][0:6]) - e_lab_color_code.append(lab_color_thresholds[i][6]) - e_lab_color_signatures.append(i + 1) - -import image, math, pyb, sensor, struct, time - -# Camera Setup - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) - -# LED Setup - -red_led = pyb.LED(1) -green_led = pyb.LED(2) -blue_led = pyb.LED(3) - -red_led.off() -green_led.off() -blue_led.off() - -# DAC Setup - -dac = pyb.DAC("P6") if analog_out_enable else None - -if dac: - dac.write(0) - -# Servo Setup - -min_s0_limit = min(s0_lower_limit, s0_upper_limit) -max_s0_limit = max(s0_lower_limit, s0_upper_limit) -min_s1_limit = min(s1_lower_limit, s1_upper_limit) -max_s1_limit = max(s1_lower_limit, s1_upper_limit) - -s0_pan = pyb.Servo(1) # P7 -s1_tilt = pyb.Servo(2) # P8 - -s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center -s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center - -s0_pan_conversion_factor = (max_s0_limit - min_s0_limit) / 1000 -s1_tilt_conversion_factor = (max_s1_limit - min_s1_limit) / 1000 - -def s0_pan_position(value): - s0_pan.pulse_width(round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor))) - -def s1_tilt_position(value): - s1_tilt.pulse_width(round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor))) - -# Link Setup - -bus = pyb.I2C(2, pyb.I2C.SLAVE, addr = i2c_address) - -def write(data): - # Prepare the data to transmit first so we can do it quickly. - out_data = [] - for i in range(0, len(data), 2): - out_data.append(data[i:i+2]) - # Disable interrupts so we can send all packets without gaps. - state = pyb.disable_irq() - for i in range(len(out_data)): - max_exceptions = 10 - loop = True - while(loop): - try: - bus.send(out_data[i], timeout = 1) - loop = False - except OSError as error: - if(max_exceptions <= 0): - pyb.enable_irq(state) - return - max_exceptions -= 1 - pyb.enable_irq(state) - -def available(): - return 0 # Not implemented as there is no way for the us to be ready to receive the data. - -def read_byte(): - return 0 # Not implemented as there is no way for the us to be ready to receive the data. - -# Helper Stuff - -def checksum(data): - checksum = 0 - for i in range(0, len(data), 2): - checksum += ((data[i+1] & 0xFF) << 8) | ((data[i+0] & 0xFF) << 0) - return checksum & 0xFFFF - -def get_normal_signature(code): - for i in range(len(e_lab_color_signatures)): - if code & (1 << i): - return e_lab_color_signatures[i] - return 0 - -def to_normal_object_block_format(blob): - temp = struct.pack(" 1) or (not color_code(blob.code())) - elif(pri_color_code_mode == 2): # only color codes with two or more colors - return (bits_set(blob.code()) > 1) - elif(pri_color_code_mode == 3): - return True - -clock = time.clock() -while(True): - clock.tick() - img = sensor.snapshot() - blobs = list(filter(blob_filter, img.find_blobs(e_lab_color_thresholds, area_threshold = min_block_area, pixels_threshold = fb_pixels_threshold, merge = True, margin = fb_merge_margin, merge_cb = fb_merge_cb))) - - # Transmit Blobs # - - if blobs and (max_blocks > 0) and (max_blocks_per_signature > 0): # new frame - dat_buf = struct.pack(" Analog Out (0v - 3.3v). -analog_out_mode = 0 # 0 == x position of largest blob - 1 == y position of largest blob - -# Parameter 0 - L Min. -# Parameter 1 - L Max. -# Parameter 2 - A Min. -# Parameter 3 - A Max. -# Parameter 4 - B Min. -# Parameter 5 - B Max. -# Parameter 6 - Is Color Code Threshold? (True/False). -# Parameter 7 - Enable Threshold? (True/False). -lab_color_thresholds = [(0, 100, 40, 127, -128, 127, True, True), # Generic Red Threshold - (0, 100, -128, -10, -128, 127, True, True), # Generic Green Threshold - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False)] - -fb_pixels_threshold = 500 # minimum number of pixels that must be in a blob -fb_merge_margin = 5 # how close pixel wise blobs can be before merging - -############################################################################## - -e_lab_color_thresholds = [] # enabled thresholds -e_lab_color_code = [] # enabled color code -e_lab_color_signatures = [] # original enabled threshold indexes -for i in range(len(lab_color_thresholds)): - if lab_color_thresholds[i][7]: - e_lab_color_thresholds.append(lab_color_thresholds[i][0:6]) - e_lab_color_code.append(lab_color_thresholds[i][6]) - e_lab_color_signatures.append(i + 1) - -import image, math, pyb, sensor, struct, time - -# Camera Setup - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) - -# LED Setup - -red_led = pyb.LED(1) -green_led = pyb.LED(2) -blue_led = pyb.LED(3) - -red_led.off() -green_led.off() -blue_led.off() - -# DAC Setup - -dac = pyb.DAC("P6") if analog_out_enable else None - -if dac: - dac.write(0) - -# Servo Setup - -min_s0_limit = min(s0_lower_limit, s0_upper_limit) -max_s0_limit = max(s0_lower_limit, s0_upper_limit) -min_s1_limit = min(s1_lower_limit, s1_upper_limit) -max_s1_limit = max(s1_lower_limit, s1_upper_limit) - -s0_pan = pyb.Servo(1) # P7 -s1_tilt = pyb.Servo(2) # P8 - -s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center -s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center - -s0_pan_conversion_factor = (max_s0_limit - min_s0_limit) / 1000 -s1_tilt_conversion_factor = (max_s1_limit - min_s1_limit) / 1000 - -def s0_pan_position(value): - s0_pan.pulse_width(round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor))) - -def s1_tilt_position(value): - s1_tilt.pulse_width(round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor))) - -# Link Setup - -bus = pyb.SPI(2, pyb.SPI.SLAVE, polarity = 0, phase = 0, bits = 16) -while(True): - try: - sync_bytes = bus.recv(2, timeout = 10) - if((sync_bytes[0] == 0x00) and (sync_bytes[1] == 0x5A)): - break - except OSError as error: - pass - - bus.deinit() - bus.init(pyb.SPI.SLAVE, polarity = 0, phase = 0, bits = 16) - -def write(data): - - max_exceptions = 10 - loop = True - while(loop): - try: - bus.send(data, timeout = 10) - loop = False - except OSError as error: - if(max_exceptions <= 0): - return - max_exceptions -= 1 - -def available(): - return 0 # Not implemented as there is no way for the us to be ready to receive the data. - -def read_byte(): - return 0 # Not implemented as there is no way for the us to be ready to receive the data. - -# Helper Stuff - -def checksum(data): - checksum = 0 - for i in range(0, len(data), 2): - checksum += ((data[i+1] & 0xFF) << 8) | ((data[i+0] & 0xFF) << 0) - return checksum & 0xFFFF - -def get_normal_signature(code): - for i in range(len(e_lab_color_signatures)): - if code & (1 << i): - return e_lab_color_signatures[i] - return 0 - -def to_normal_object_block_format(blob): - temp = struct.pack(" 1) or (not color_code(blob.code())) - elif(pri_color_code_mode == 2): # only color codes with two or more colors - return (bits_set(blob.code()) > 1) - elif(pri_color_code_mode == 3): - return True - -clock = time.clock() -while(True): - clock.tick() - img = sensor.snapshot() - blobs = list(filter(blob_filter, img.find_blobs(e_lab_color_thresholds, area_threshold = min_block_area, pixels_threshold = fb_pixels_threshold, merge = True, margin = fb_merge_margin, merge_cb = fb_merge_cb))) - - # Transmit Blobs # - - if blobs and (max_blocks > 0) and (max_blocks_per_signature > 0): # new frame - dat_buf = struct.pack(" Analog Out (0v - 3.3v). -analog_out_mode = 0 # 0 == x position of largest blob - 1 == y position of largest blob - -# Parameter 0 - L Min. -# Parameter 1 - L Max. -# Parameter 2 - A Min. -# Parameter 3 - A Max. -# Parameter 4 - B Min. -# Parameter 5 - B Max. -# Parameter 6 - Is Color Code Threshold? (True/False). -# Parameter 7 - Enable Threshold? (True/False). -lab_color_thresholds = [(0, 100, 40, 127, -128, 127, True, True), # Generic Red Threshold - (0, 100, -128, -10, -128, 127, True, True), # Generic Green Threshold - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False)] - -fb_pixels_threshold = 500 # minimum number of pixels that must be in a blob -fb_merge_margin = 5 # how close pixel wise blobs can be before merging - -############################################################################## - -e_lab_color_thresholds = [] # enabled thresholds -e_lab_color_code = [] # enabled color code -e_lab_color_signatures = [] # original enabled threshold indexes -for i in range(len(lab_color_thresholds)): - if lab_color_thresholds[i][7]: - e_lab_color_thresholds.append(lab_color_thresholds[i][0:6]) - e_lab_color_code.append(lab_color_thresholds[i][6]) - e_lab_color_signatures.append(i + 1) - -import image, math, pyb, sensor, struct, time - -# Camera Setup - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) - -# LED Setup - -red_led = pyb.LED(1) -green_led = pyb.LED(2) -blue_led = pyb.LED(3) - -red_led.off() -green_led.off() -blue_led.off() - -# DAC Setup - -dac = pyb.DAC("P6") if analog_out_enable else None - -if dac: - dac.write(0) - -# Servo Setup - -min_s0_limit = min(s0_lower_limit, s0_upper_limit) -max_s0_limit = max(s0_lower_limit, s0_upper_limit) -min_s1_limit = min(s1_lower_limit, s1_upper_limit) -max_s1_limit = max(s1_lower_limit, s1_upper_limit) - -s0_pan = pyb.Servo(1) # P7 -s1_tilt = pyb.Servo(2) # P8 - -s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center -s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center - -s0_pan_conversion_factor = (max_s0_limit - min_s0_limit) / 1000 -s1_tilt_conversion_factor = (max_s1_limit - min_s1_limit) / 1000 - -def s0_pan_position(value): - s0_pan.pulse_width(round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor))) - -def s1_tilt_position(value): - s1_tilt.pulse_width(round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor))) - -# Link Setup - -uart = pyb.UART(3, uart_baudrate, timeout_char = 1000) - -def write(data): - uart.write(data) - -def available(): - return uart.any() - -def read_byte(): - return uart.readchar() - -# Helper Stuff - -def checksum(data): - checksum = 0 - for i in range(0, len(data), 2): - checksum += ((data[i+1] & 0xFF) << 8) | ((data[i+0] & 0xFF) << 0) - return checksum & 0xFFFF - -def get_normal_signature(code): - for i in range(len(e_lab_color_signatures)): - if code & (1 << i): - return e_lab_color_signatures[i] - return 0 - -def to_normal_object_block_format(blob): - temp = struct.pack(" 1) or (not color_code(blob.code())) - elif(pri_color_code_mode == 2): # only color codes with two or more colors - return (bits_set(blob.code()) > 1) - elif(pri_color_code_mode == 3): - return True - -clock = time.clock() -while(True): - clock.tick() - img = sensor.snapshot() - blobs = list(filter(blob_filter, img.find_blobs(e_lab_color_thresholds, area_threshold = min_block_area, pixels_threshold = fb_pixels_threshold, merge = True, margin = fb_merge_margin, merge_cb = fb_merge_cb))) - - # Transmit Blobs # - - if blobs and (max_blocks > 0) and (max_blocks_per_signature > 0): # new frame - dat_buf = struct.pack("= 20: - led.off() - led_state = 0 - -# Link Setup - -uart = pyb.UART(3, uart_baudrate, timeout_char = 1000) - -# Helper Stuff - -packet_sequence = 0 - -def checksum(data, extra): # https://github.com/mavlink/c_library_v1/blob/master/checksum.h - output = 0xFFFF - for i in range(len(data)): - tmp = data[i] ^ (output & 0xFF) - tmp = (tmp ^ (tmp << 4)) & 0xFF - output = ((output >> 8) ^ (tmp << 8) ^ (tmp << 3) ^ (tmp >> 4)) & 0xFFFF - tmp = extra ^ (output & 0xFF) - tmp = (tmp ^ (tmp << 4)) & 0xFF - output = ((output >> 8) ^ (tmp << 8) ^ (tmp << 3) ^ (tmp >> 4)) & 0xFFFF - return output - -MAV_OPTICAL_FLOW_message_id = 100 -MAV_OPTICAL_FLOW_id = 0 # unused -MAV_OPTICAL_FLOW_extra_crc = 175 - -# http://mavlink.org/messages/common#OPTICAL_FLOW -# https://github.com/mavlink/c_library_v1/blob/master/common/mavlink_msg_optical_flow.h -def send_optical_flow_packet(x, y, c): - global packet_sequence - temp = struct.pack(" BG_UPDATE_FRAMES): - frame_count = 0 - # Blend in new frame. We're doing 256-alpha here because we want to - # blend the new frame into the backgound. Not the background into the - # new frame which would be just alpha. Blend replaces each pixel by - # ((NEW*(alpha))+(OLD*(256-alpha)))/256. So, a low alpha results in - # low blending of the new image while a high alpha results in high - # blending of the new image. We need to reverse that for this update. - img.blend(extra_fb, alpha=(256-BG_UPDATE_BLEND)) - extra_fb.replace(img) - - # Replace the image with the "abs(NEW-OLD)" frame difference. - img.difference(extra_fb) - - hist = img.get_histogram() - # This code below works by comparing the 99th percentile value (e.g. the - # non-outlier max value against the 90th percentile value (e.g. a non-max - # value. The difference between the two values will grow as the difference - # image seems more pixels change. - diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value() - triggered = diff > TRIGGER_THRESHOLD - - print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/in_memory_basic_frame_differencing.py b/scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/in_memory_basic_frame_differencing.py deleted file mode 100644 index 1fb207bdf..000000000 --- a/scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/in_memory_basic_frame_differencing.py +++ /dev/null @@ -1,46 +0,0 @@ -# In Memory Basic Frame Differencing Example -# -# This example demonstrates using frame differencing with your OpenMV Cam. It's -# called basic frame differencing because there's no background image update. -# So, as time passes the background image may change resulting in issues. - -import sensor, image, pyb, os, time - -TRIGGER_THRESHOLD = 5 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -# Take from the main frame buffer's RAM to allocate a second frame buffer. -# There's a lot more RAM in the frame buffer than in the MicroPython heap. -# However, after doing this you have a lot less RAM for some algorithms... -# So, be aware that it's a lot easier to get out of RAM issues now. However, -# frame differencing doesn't use a lot of the extra space in the frame buffer. -# But, things like AprilTags do and won't work if you do this... -extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) - -print("About to save background image...") -sensor.skip_frames(time = 2000) # Give the user time to get ready. -extra_fb.replace(sensor.snapshot()) -print("Saved background image - Now frame differencing!") - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # Replace the image with the "abs(NEW-OLD)" frame difference. - img.difference(extra_fb) - - hist = img.get_histogram() - # This code below works by comparing the 99th percentile value (e.g. the - # non-outlier max value against the 90th percentile value (e.g. a non-max - # value. The difference between the two values will grow as the difference - # image seems more pixels change. - diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value() - triggered = diff > TRIGGER_THRESHOLD - - print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/in_memory_shadow_removal.py b/scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/in_memory_shadow_removal.py deleted file mode 100644 index 69c507c61..000000000 --- a/scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/in_memory_shadow_removal.py +++ /dev/null @@ -1,52 +0,0 @@ -# In Memory Shadow Removal w/ Frame Differencing Example -# -# This example demonstrates using frame differencing with your OpenMV Cam using -# shadow removal to help reduce the affects of cast shadows in your scene. - -import sensor, image, pyb, os, time - -TRIGGER_THRESHOLD = 5 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -if sensor.get_id() == sensor.OV7725: # Reduce sensor PLL from 6x to 4x. - sensor.__write_reg(0x0D, (sensor.__read_reg(0x0D) & 0x3F) | 0x40) -sensor.skip_frames(time = 2000) # Let new settings take affect. -sensor.set_auto_gain(False) # Turn this off too. -clock = time.clock() # Tracks FPS. - -# Take from the main frame buffer's RAM to allocate a second frame buffer. -# There's a lot more RAM in the frame buffer than in the MicroPython heap. -# However, after doing this you have a lot less RAM for some algorithms... -# So, be aware that it's a lot easier to get out of RAM issues now. However, -# frame differencing doesn't use a lot of the extra space in the frame buffer. -# But, things like AprilTags do and won't work if you do this... -extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) - -print("About to save background image...") -sensor.skip_frames(time = 2000) # Give the user time to get ready. -extra_fb.replace(sensor.snapshot()) -print("Saved background image - Now frame differencing!") - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # Note that for shadow removal to work the background image must be - # shadow free and have the same lighting as the latest image. Unlike max() - # shadow removal won't remove all dark objects unless they were shadows... - - # Replace the image with the "abs(NEW-OLD)" frame difference. - img.remove_shadows(extra_fb).difference(extra_fb) - - hist = img.get_histogram() - # This code below works by comparing the 99th percentile value (e.g. the - # non-outlier max value against the 90th percentile value (e.g. a non-max - # value. The difference between the two values will grow as the difference - # image seems more pixels change. - diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value() - triggered = diff > TRIGGER_THRESHOLD - - print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/in_memory_structural_similarity.py b/scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/in_memory_structural_similarity.py deleted file mode 100644 index c2cce7c87..000000000 --- a/scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/in_memory_structural_similarity.py +++ /dev/null @@ -1,38 +0,0 @@ -# Structural Similarity (SSIM) Example -# -# This example shows off how to use the SSIM algorithm on your OpenMV Cam -# to detect differences between two images. The SSIM algorithm compares -# 8x8 blocks of pixels between two images to determine a similarity -# score between two images. - -import sensor, image, pyb, os, time - -# The image has likely changed if the sim.min() is lower than this. -MIN_TRIGGER_THRESHOLD = -0.4 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -# Take from the main frame buffer's RAM to allocate a second frame buffer. -# There's a lot more RAM in the frame buffer than in the MicroPython heap. -# However, after doing this you have a lot less RAM for some algorithms... -# So, be aware that it's a lot easier to get out of RAM issues now. However, -# frame differencing doesn't use a lot of the extra space in the frame buffer. -# But, things like AprilTags do and won't work if you do this... -extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) - -print("About to save background image...") -sensor.skip_frames(time = 2000) # Give the user time to get ready. -extra_fb.replace(sensor.snapshot()) -print("Saved background image!") - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - sim = img.get_similarity(extra_fb) - change = "- Change -" if sim.min() < MIN_TRIGGER_THRESHOLD else "- No Change -" - - print(clock.fps(), change, sim) diff --git a/scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/on_disk_advanced_frame_differencing.py b/scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/on_disk_advanced_frame_differencing.py deleted file mode 100644 index fcbac28a0..000000000 --- a/scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/on_disk_advanced_frame_differencing.py +++ /dev/null @@ -1,60 +0,0 @@ -# Advanced Frame Differencing Example -# -# Note: You will need an SD card to run this example. -# -# This example demonstrates using frame differencing with your OpenMV Cam. This -# example is advanced because it preforms a background update to deal with the -# backgound image changing overtime. - -import sensor, image, pyb, os, time - -TRIGGER_THRESHOLD = 5 - -BG_UPDATE_FRAMES = 50 # How many frames before blending. -BG_UPDATE_BLEND = 128 # How much to blend by... ([0-256]==[0.0-1.0]). - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory - -print("About to save background image...") -sensor.skip_frames(time = 2000) # Give the user time to get ready. -sensor.snapshot().save("temp/bg.bmp") -print("Saved background image - Now frame differencing!") - -triggered = False - -frame_count = 0 -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - frame_count += 1 - if (frame_count > BG_UPDATE_FRAMES): - frame_count = 0 - # Blend in new frame. We're doing 256-alpha here because we want to - # blend the new frame into the backgound. Not the background into the - # new frame which would be just alpha. Blend replaces each pixel by - # ((NEW*(alpha))+(OLD*(256-alpha)))/256. So, a low alpha results in - # low blending of the new image while a high alpha results in high - # blending of the new image. We need to reverse that for this update. - img.blend("temp/bg.bmp", alpha=(256-BG_UPDATE_BLEND)) - img.save("temp/bg.bmp") - - # Replace the image with the "abs(NEW-OLD)" frame difference. - img.difference("temp/bg.bmp") - - hist = img.get_histogram() - # This code below works by comparing the 99th percentile value (e.g. the - # non-outlier max value against the 90th percentile value (e.g. a non-max - # value. The difference between the two values will grow as the difference - # image seems more pixels change. - diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value() - triggered = diff > TRIGGER_THRESHOLD - - print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/on_disk_basic_frame_differencing.py b/scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/on_disk_basic_frame_differencing.py deleted file mode 100644 index d1a41fbae..000000000 --- a/scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/on_disk_basic_frame_differencing.py +++ /dev/null @@ -1,42 +0,0 @@ -# Basic Frame Differencing Example -# -# Note: You will need an SD card to run this example. -# -# This example demonstrates using frame differencing with your OpenMV Cam. It's -# called basic frame differencing because there's no background image update. -# So, as time passes the background image may change resulting in issues. - -import sensor, image, pyb, os, time - -TRIGGER_THRESHOLD = 5 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory - -print("About to save background image...") -sensor.skip_frames(time = 2000) # Give the user time to get ready. -sensor.snapshot().save("temp/bg.bmp") -print("Saved background image - Now frame differencing!") - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # Replace the image with the "abs(NEW-OLD)" frame difference. - img.difference("temp/bg.bmp") - - hist = img.get_histogram() - # This code below works by comparing the 99th percentile value (e.g. the - # non-outlier max value against the 90th percentile value (e.g. a non-max - # value. The difference between the two values will grow as the difference - # image seems more pixels change. - diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value() - triggered = diff > TRIGGER_THRESHOLD - - print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/on_disk_shadow_removal.py b/scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/on_disk_shadow_removal.py deleted file mode 100644 index 30d483e8e..000000000 --- a/scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/on_disk_shadow_removal.py +++ /dev/null @@ -1,48 +0,0 @@ -# In Memory Shadow Removal w/ Frame Differencing Example -# -# Note: You will need an SD card to run this example. -# -# This example demonstrates using frame differencing with your OpenMV Cam using -# shadow removal to help reduce the affects of cast shadows in your scene. - -import sensor, image, pyb, os, time - -TRIGGER_THRESHOLD = 5 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -if sensor.get_id() == sensor.OV7725: # Reduce sensor PLL from 6x to 4x. - sensor.__write_reg(0x0D, (sensor.__read_reg(0x0D) & 0x3F) | 0x40) -sensor.skip_frames(time = 2000) # Let new settings take affect. -sensor.set_auto_gain(False) # Turn this off too. -clock = time.clock() # Tracks FPS. - -if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory - -print("About to save background image...") -sensor.skip_frames(time = 2000) # Give the user time to get ready. -sensor.snapshot().save("temp/bg.bmp") -print("Saved background image - Now frame differencing!") - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # Note that for shadow removal to work the background image must be - # shadow free and have the same lighting as the latest image. Unlike max() - # shadow removal won't remove all dark objects unless they were shadows... - - # Replace the image with the "abs(NEW-OLD)" frame difference. - img.remove_shadows("temp/bg.bmp").difference("temp/bg.bmp") - - hist = img.get_histogram() - # This code below works by comparing the 99th percentile value (e.g. the - # non-outlier max value against the 90th percentile value (e.g. a non-max - # value. The difference between the two values will grow as the difference - # image seems more pixels change. - diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value() - triggered = diff > TRIGGER_THRESHOLD - - print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/on_disk_structural_similarity.py b/scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/on_disk_structural_similarity.py deleted file mode 100644 index f7a2977fc..000000000 --- a/scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/on_disk_structural_similarity.py +++ /dev/null @@ -1,34 +0,0 @@ -# Structural Similarity (SSIM) Example -# -# Note: You will need an SD card to run this example. -# -# This example shows off how to use the SSIM algorithm on your OpenMV Cam -# to detect differences between two images. The SSIM algorithm compares -# 8x8 blocks of pixels between two images to determine a similarity -# score between two images. - -import sensor, image, pyb, os, time - -# The image has likely changed if the sim.min() is lower than this. -MIN_TRIGGER_THRESHOLD = -0.4 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory - -print("About to save background image...") -sensor.skip_frames(time = 2000) # Give the user time to get ready. -sensor.snapshot().save("temp/bg.bmp") -print("Saved background image!") - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - sim = img.get_similarity("temp/bg.bmp") - change = "- Change -" if sim.min() < MIN_TRIGGER_THRESHOLD else "- No Change -" - - print(clock.fps(), change, sim) diff --git a/scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/himax_motion_detection.py b/scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/himax_motion_detection.py deleted file mode 100644 index 23b35613d..000000000 --- a/scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/himax_motion_detection.py +++ /dev/null @@ -1,40 +0,0 @@ -# Himax motion detection example. - -import sensor, image, time, pyb -from pyb import Pin, ExtInt - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time=2000) - -# The sensor is less noisy with lower FPS. -sensor.set_framerate(15) - -# Configure and enable motion detection -sensor.ioctl(sensor.IOCTL_HIMAX_MD_THRESHOLD, 0x01) -sensor.ioctl(sensor.IOCTL_HIMAX_MD_WINDOW, (0, 0, 320, 240)) -sensor.ioctl(sensor.IOCTL_HIMAX_MD_CLEAR) -sensor.ioctl(sensor.IOCTL_HIMAX_MD_ENABLE, True) - -motion_detected = False -def on_motion(line): - global motion_detected - motion_detected = True - -led = pyb.LED(3) -# Configure external interrupt pin. When motion is detected, this pin is pulled high -ext = ExtInt(Pin("PC15"), ExtInt.IRQ_RISING, Pin.PULL_DOWN, on_motion) - -clock = time.clock() -while(True): - clock.tick() - img = sensor.snapshot() - if (motion_detected): - led.on() - time.sleep_ms(500) - # Clear motion detection flag - sensor.ioctl(sensor.IOCTL_HIMAX_MD_CLEAR) - motion_detected = False - led.off() - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sensor_auto_gain_control.py b/scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sensor_auto_gain_control.py deleted file mode 100644 index cf5d2d50e..000000000 --- a/scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sensor_auto_gain_control.py +++ /dev/null @@ -1,45 +0,0 @@ -# Sensor Auto Gain Control -# -# This example shows off how to control the sensor's gain -# using the automatic gain control algorithm. - -# What's the difference between gain and exposure control? -# -# Well, by increasing the exposure time for the image you're getting more -# light on the camera. This gives you the best signal to noise ratio. You -# in general always want to increase the expsoure time... except, when you -# increase the exposure time you decrease the maximum possible frame rate -# and if anything moves in the image it will start to blur more with a -# higher exposure time. Gain control allows you to increase the output per -# pixel using analog and digital multipliers... however, it also amplifies -# noise. So, it's best to let the exposure increase as much as possible -# and then use gain control to make up any remaining ground. - -# We can achieve the above by setting a gain ceiling on the automatic -# gain control algorithm. Once this is set the algorithm will have to -# increase the exposure time to meet any gain needs versus using gain -# to do so. However, this comes at the price of the exposure time varying -# more when the lighting changes versus the exposure being constant and -# the gain changing. - -import sensor, image, time - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) - -# The gain db ceiling maxes out at about 24 db for the OV7725 sensor. -sensor.set_auto_gain(True, gain_db_ceiling = 16.0) # Default gain. - -# Note! If you set the gain ceiling to low without adjusting the exposure control -# target value then you'll just get a lot of oscillation from the exposure -# control if it's on. - -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - print("FPS %f, Gain %f dB, Exposure %d us" % \ - (clock.fps(), sensor.get_gain_db(), sensor.get_exposure_us())) diff --git a/scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sensor_exposure_control.py b/scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sensor_exposure_control.py deleted file mode 100644 index a68a0de61..000000000 --- a/scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sensor_exposure_control.py +++ /dev/null @@ -1,66 +0,0 @@ -# Sensor Exposure Control -# -# This example shows off how to cotnrol the camera sensor's -# exposure manually versus letting auto exposure control run. - -# What's the difference between gain and exposure control? -# -# Well, by increasing the exposure time for the image you're getting more -# light on the camera. This gives you the best signal to noise ratio. You -# in general always want to increase the expsoure time... except, when you -# increase the exposure time you decrease the maximum possible frame rate -# and if anything moves in the image it will start to blur more with a -# higher exposure time. Gain control allows you to increase the output per -# pixel using analog and digital multipliers... however, it also amplifies -# noise. So, it's best to let the exposure increase as much as possible -# and then use gain control to make up any remaining ground. - -import sensor, image, time - -# Change this value to adjust the exposure. Try 10.0/0.1/etc. -EXPOSURE_TIME_SCALE = 1.0 - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) - -# Print out the initial exposure time for comparison. -print("Initial exposure == %d" % sensor.get_exposure_us()) - -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -# You have to turn automatic gain control and automatic white blance off -# otherwise they will change the image gains to undo any exposure settings -# that you put in place... -sensor.set_auto_gain(False) -# Need to let the above settings get in... -sensor.skip_frames(time = 500) - -current_exposure_time_in_microseconds = sensor.get_exposure_us() -print("Current Exposure == %d" % current_exposure_time_in_microseconds) - -# Auto exposure control (AEC) is enabled by default. Calling the below function -# disables sensor auto exposure control. The additionally "exposure_us" -# argument then overrides the auto exposure value after AEC is disabled. -sensor.set_auto_exposure(False, \ - exposure_us = int(current_exposure_time_in_microseconds * EXPOSURE_TIME_SCALE)) - -print("New exposure == %d" % sensor.get_exposure_us()) -# sensor.get_exposure_us() returns the exact camera sensor exposure time -# in microseconds. However, this may be a different number than what was -# commanded because the sensor code converts the exposure time in microseconds -# to a row/pixel/clock time which doesn't perfectly match with microseconds... - -# If you want to turn auto exposure back on do: sensor.set_auto_exposure(True) -# Note that the camera sensor will then change the exposure time as it likes. - -# Doing: sensor.set_auto_exposure(False) -# Just disables the exposure value update but does not change the exposure -# value the camera sensor determined was good. - -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected - # to the IDE. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sensor_horizontal_mirror.py b/scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sensor_horizontal_mirror.py deleted file mode 100644 index 2099a1474..000000000 --- a/scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sensor_horizontal_mirror.py +++ /dev/null @@ -1,21 +0,0 @@ -# Sensor Horizontal Mirror Example -# -# This example shows off horizontally mirroring the image in hardware -# from the camera sensor. - -import sensor, image, time - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -# Change this to False to undo the mirror. -sensor.set_hmirror(True) - -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected - # to the IDE. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sensor_manual_whitebal_control.py b/scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sensor_manual_whitebal_control.py deleted file mode 100644 index a15fc6176..000000000 --- a/scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sensor_manual_whitebal_control.py +++ /dev/null @@ -1,38 +0,0 @@ -# Sensor Manual Whitebal Control -# -# This example shows off how to control the camera sensor's -# white balance gain manually versus letting the AWB control run. - -# White balance is achieve by adjusting R/G/B gain values -# such that the average color of the image is gray. The -# automatic white balance (AWB) algorithm does this for -# you but usually ends up with a different result each -# time you turn the camera on making it hard to get -# color tracking settings right. By manually recording -# the gain values you like and then forcing them to -# the sensor on startup you can control the colors -# the camera sees. - -import sensor, image, time - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -# You can control the white balance gains here. The first value is the -# R gain in db, and then the G gain in db, followed by the B gain in db. -# -# Uncomment the below line with gain values you like (get them from the print out). -# - -# Note: Putting (0.0, 0.0, 0.0) for the gain results in something close to zero -# comming out. Do not expect the exact value going in to be equal to the value -# comming out. - -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - print(clock.fps(), \ - sensor.get_rgb_gain_db()) # Prints the AWB current RGB gains. diff --git a/scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sensor_vertical_flip.py b/scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sensor_vertical_flip.py deleted file mode 100644 index 3a9e18377..000000000 --- a/scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sensor_vertical_flip.py +++ /dev/null @@ -1,21 +0,0 @@ -# Sensor Vertical Flip Example -# -# This example shows off vertically flipping the image in hardware -# from the camera sensor. - -import sensor, image, time - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -# Change this to False to undo the flip. -sensor.set_vflip(True) - -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected - # to the IDE. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sesnor_manual_gain_control.py b/scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sesnor_manual_gain_control.py deleted file mode 100644 index 4991b3569..000000000 --- a/scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/sesnor_manual_gain_control.py +++ /dev/null @@ -1,66 +0,0 @@ -# Sensor Manual Gain Control -# -# This example shows off how to control the camera sensor's -# gain manually versus letting auto gain control run. - -# What's the difference between gain and exposure control? -# -# Well, by increasing the exposure time for the image you're getting more -# light on the camera. This gives you the best signal to noise ratio. You -# in general always want to increase the expsoure time... except, when you -# increase the exposure time you decrease the maximum possible frame rate -# and if anything moves in the image it will start to blur more with a -# higher exposure time. Gain control allows you to increase the output per -# pixel using analog and digital multipliers... however, it also amplifies -# noise. So, it's best to let the exposure increase as much as possible -# and then use gain control to make up any remaining ground. - -import sensor, image, time - -# Change this value to adjust the gain. Try 10.0/0/0.1/etc. -GAIN_SCALE = 1.0 - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) - -# Print out the initial gain for comparison. -print("Initial gain == %f db" % sensor.get_gain_db()) - -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -# You have to turn automatic exposure control and automatic white blance off -# otherwise they will change the image exposure to undo any gain settings -# that you put in place... -sensor.set_auto_exposure(False) -# Need to let the above settings get in... -sensor.skip_frames(time = 500) - -current_gain_in_decibels = sensor.get_gain_db() -print("Current Gain == %f db" % current_gain_in_decibels) - -# Auto gain control (AGC) is enabled by default. Calling the below function -# disables sensor auto gain control. The additionally "gain_db" -# argument then overrides the auto gain value after AGC is disabled. -sensor.set_auto_gain(False, \ - gain_db = current_gain_in_decibels * GAIN_SCALE) - -print("New gain == %f db" % sensor.get_gain_db()) -# sensor.get_gain_db() returns the exact camera sensor gain decibels. -# However, this may be a different number than what was commanded because -# the sensor code converts the gain to a small and large gain value which -# aren't able to accept all possible values... - -# If you want to turn auto gain back on do: sensor.set_auto_gain(True) -# Note that the camera sensor will then change the gain as it likes. - -# Doing: sensor.set_auto_gain(False) -# Just disables the gain value update but does not change the gain -# value the camera sensor determined was good. - -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected - # to the IDE. The FPS should increase once disconnected. diff --git a/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/absolute-rotation-scale.py b/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/absolute-rotation-scale.py deleted file mode 100644 index 3163abd89..000000000 --- a/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/absolute-rotation-scale.py +++ /dev/null @@ -1,67 +0,0 @@ -# Absolute Optical Flow Rotation/Scale -# -# This example shows off using your OpenMV Cam to measure -# rotation/scale by comparing the current and a previous -# image against each other. Note that only rotation/scale is -# handled - not X and Y translation in this mode. - -# To run this demo effectively please mount your OpenMV Cam on a steady -# base and SLOWLY rotate the camera around the lens and move the camera -# forward/backwards to see the numbers change. -# I.e. Z direction changes only. - -import sensor, image, time, math - -# NOTE!!! You have to use a small power of 2 resolution when using -# find_displacement(). This is because the algorithm is powered by -# something called phase correlation which does the image comparison -# using FFTs. A non-power of 2 resolution requires padding to a power -# of 2 which reduces the usefulness of the algorithm results. Please -# use a resolution like B64X64 or B64X32 (2x faster). - -# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, -# 128x64, and 128x128. If you want a resolution of 32x32 you can create -# it by doing "img.pool(2, 2)" on a 64x64 image. - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -# Take from the main frame buffer's RAM to allocate a second frame buffer. -# There's a lot more RAM in the frame buffer than in the MicroPython heap. -# However, after doing this you have a lot less RAM for some algorithms... -# So, be aware that it's a lot easier to get out of RAM issues now. -extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) -extra_fb.replace(sensor.snapshot()) - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works. - # Put in a z_rotation value below and you should see the r output be equal to that. - if(0): - expected_rotation = 20.0 - img.rotation_corr(z_rotation=expected_rotation) - - # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works. - # Put in a zoom value below and you should see the z output be equal to that. - if(0): - expected_zoom = 0.8 - img.rotation_corr(zoom=expected_zoom) - - # For this example we never update the old image to measure absolute change. - displacement = extra_fb.find_displacement(img, logpolar=True) - - # Offset results are noisy without filtering so we drop some accuracy. - rotation_change = int(math.degrees(displacement.rotation()) * 5) / 5.0 - zoom_amount = displacement.scale() - - if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. - print("{0:+f}r {1:+f}z {2} {3} FPS".format(rotation_change, zoom_amount, \ - displacement.response(), - clock.fps())) - else: - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/absolute-translation.py b/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/absolute-translation.py deleted file mode 100644 index 685a7eb18..000000000 --- a/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/absolute-translation.py +++ /dev/null @@ -1,55 +0,0 @@ -# Absolute Optical Flow Translation -# -# This example shows off using your OpenMV Cam to measure translation -# in the X and Y direction by comparing the current and a previous -# image against each other. Note that only X and Y translation is -# handled - not rotation/scale in this mode. - -# To run this demo effectively please mount your OpenMV Cam on a steady -# base and SLOWLY translate it to the left, right, up, and down and -# watch the numbers change. Note that you can see displacement numbers -# up +- half of the hoizontal and vertical resolution. - -import sensor, image, time - -# NOTE!!! You have to use a small power of 2 resolution when using -# find_displacement(). This is because the algorithm is powered by -# something called phase correlation which does the image comparison -# using FFTs. A non-power of 2 resolution requires padding to a power -# of 2 which reduces the usefulness of the algorithm results. Please -# use a resolution like B64X64 or B64X32 (2x faster). - -# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, -# 128x64, and 128x128. If you want a resolution of 32x32 you can create -# it by doing "img.pool(2, 2)" on a 64x64 image. - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -# Take from the main frame buffer's RAM to allocate a second frame buffer. -# There's a lot more RAM in the frame buffer than in the MicroPython heap. -# However, after doing this you have a lot less RAM for some algorithms... -# So, be aware that it's a lot easier to get out of RAM issues now. -extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) -extra_fb.replace(sensor.snapshot()) - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # For this example we never update the old image to measure absolute change. - displacement = extra_fb.find_displacement(img) - - # Offset results are noisy without filtering so we drop some accuracy. - sub_pixel_x = int(displacement.x_translation() * 5) / 5.0 - sub_pixel_y = int(displacement.y_translation() * 5) / 5.0 - - if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. - print("{0:+f}x {1:+f}y {2} {3} FPS".format(sub_pixel_x, sub_pixel_y, - displacement.response(), - clock.fps())) - else: - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/differential-rotation-scale.py b/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/differential-rotation-scale.py deleted file mode 100644 index f8d65f731..000000000 --- a/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/differential-rotation-scale.py +++ /dev/null @@ -1,67 +0,0 @@ -# Differential Optical Flow Rotation/Scale -# -# This example shows off using your OpenMV Cam to measure -# rotation/scale by comparing the current and the previous -# image against each other. Note that only rotation/scale is -# handled - not X and Y translation in this mode. - -# To run this demo effectively please mount your OpenMV Cam on a steady -# base and SLOWLY rotate the camera around the lens and move the camera -# forward/backwards to see the numbers change. -# I.e. Z direction changes only. - -import sensor, image, time, math - -# NOTE!!! You have to use a small power of 2 resolution when using -# find_displacement(). This is because the algorithm is powered by -# something called phase correlation which does the image comparison -# using FFTs. A non-power of 2 resolution requires padding to a power -# of 2 which reduces the usefulness of the algorithm results. Please -# use a resolution like B64X64 or B64X32 (2x faster). - -# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, -# 128x64, and 128x128. If you want a resolution of 32x32 you can create -# it by doing "img.pool(2, 2)" on a 64x64 image. - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -# Take from the main frame buffer's RAM to allocate a second frame buffer. -# There's a lot more RAM in the frame buffer than in the MicroPython heap. -# However, after doing this you have a lot less RAM for some algorithms... -# So, be aware that it's a lot easier to get out of RAM issues now. -extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) -extra_fb.replace(sensor.snapshot()) - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works. - # Put in a z_rotation value below and you should see the r output be equal to that. - if(0): - expected_rotation = 20.0 - extra_fb.rotation_corr(z_rotation=(-expected_rotation)) - - # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works. - # Put in a zoom value below and you should see the z output be equal to that. - if(0): - expected_zoom = 0.8 - extra_fb.rotation_corr(zoom=(2.00-expected_zoom)) - - displacement = extra_fb.find_displacement(img, logpolar=True) - extra_fb.replace(img) - - # Offset results are noisy without filtering so we drop some accuracy. - rotation_change = int(math.degrees(displacement.rotation()) * 5) / 5.0 - zoom_amount = displacement.scale() - - if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. - print("{0:+f}r {1:+f}z {2} {3} FPS".format(rotation_change, zoom_amount, \ - displacement.response(), - clock.fps())) - else: - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/differential-translation.py b/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/differential-translation.py deleted file mode 100644 index fe01c242d..000000000 --- a/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/differential-translation.py +++ /dev/null @@ -1,55 +0,0 @@ -# Differential Optical Flow Translation -# -# This example shows off using your OpenMV Cam to measure translation -# in the X and Y direction by comparing the current and the previous -# image against each other. Note that only X and Y translation is -# handled - not rotation/scale in this mode. - -# To run this demo effectively please mount your OpenMV Cam on a steady -# base and QUICKLY translate it to the left, right, up, and down and -# watch the numbers change. Note that you can see displacement numbers -# up +- half of the hoizontal and vertical resolution. - -import sensor, image, time - -# NOTE!!! You have to use a small power of 2 resolution when using -# find_displacement(). This is because the algorithm is powered by -# something called phase correlation which does the image comparison -# using FFTs. A non-power of 2 resolution requires padding to a power -# of 2 which reduces the usefulness of the algorithm results. Please -# use a resolution like B64X64 or B64X32 (2x faster). - -# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, -# 128x64, and 128x128. If you want a resolution of 32x32 you can create -# it by doing "img.pool(2, 2)" on a 64x64 image. - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -# Take from the main frame buffer's RAM to allocate a second frame buffer. -# There's a lot more RAM in the frame buffer than in the MicroPython heap. -# However, after doing this you have a lot less RAM for some algorithms... -# So, be aware that it's a lot easier to get out of RAM issues now. -extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) -extra_fb.replace(sensor.snapshot()) - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - displacement = extra_fb.find_displacement(img) - extra_fb.replace(img) - - # Offset results are noisy without filtering so we drop some accuracy. - sub_pixel_x = int(displacement.x_translation() * 5) / 5.0 - sub_pixel_y = int(displacement.y_translation() * 5) / 5.0 - - if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. - print("{0:+f}x {1:+f}y {2} {3} FPS".format(sub_pixel_x, sub_pixel_y, - displacement.response(), - clock.fps())) - else: - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/image-patches-absolute-rotation-scale.py b/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/image-patches-absolute-rotation-scale.py deleted file mode 100644 index 414a105e6..000000000 --- a/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/image-patches-absolute-rotation-scale.py +++ /dev/null @@ -1,73 +0,0 @@ -# Image Patches Absolute Optical Flow Rotation/Scale -# -# This example shows off using your OpenMV Cam to measure -# rotation/scale by comparing the current and a previous -# image against each other. Note that only rotation/scale is -# handled - not X and Y translation in this mode. -# -# However, this examples goes beyond doing optical flow on the whole -# image at once. Instead it breaks up the process by working on groups -# of pixels in the image. This gives you a "new" image of results. -# -# NOTE that surfaces need to have some type of "edge" on them for the -# algorithm to work. A featureless surface produces crazy results. - -# NOTE: Unless you have a very nice test rig this example is hard to see usefulness of... - -BLOCK_W = 16 # pow2 -BLOCK_H = 16 # pow2 - -# To run this demo effectively please mount your OpenMV Cam on a steady -# base and SLOWLY rotate the camera around the lens and move the camera -# forward/backwards to see the numbers change. -# I.e. Z direction changes only. - -import sensor, image, time, math - -# NOTE!!! You have to use a small power of 2 resolution when using -# find_displacement(). This is because the algorithm is powered by -# something called phase correlation which does the image comparison -# using FFTs. A non-power of 2 resolution requires padding to a power -# of 2 which reduces the usefulness of the algorithm results. Please -# use a resolution like B128X128 or B128X64 (2x faster). - -# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, -# 128x64, and 128x128. If you want a resolution of 32x32 you can create -# it by doing "img.pool(2, 2)" on a 64x64 image. - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) -sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)... -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -# Take from the main frame buffer's RAM to allocate a second frame buffer. -# There's a lot more RAM in the frame buffer than in the MicroPython heap. -# However, after doing this you have a lot less RAM for some algorithms... -# So, be aware that it's a lot easier to get out of RAM issues now. -extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) -extra_fb.replace(sensor.snapshot()) - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - for y in range(0, sensor.height(), BLOCK_H): - for x in range(0, sensor.width(), BLOCK_W): - # For this example we never update the old image to measure absolute change. - displacement = extra_fb.find_displacement(img, logpolar=True, \ - roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) - - # Below 0.1 or so (YMMV) and the results are just noise. - if(displacement.response() > 0.1): - rotation_change = displacement.rotation() - zoom_amount = displacement.scale() - pixel_x = x + (BLOCK_W//2) + int(math.sin(rotation_change) * zoom_amount * (BLOCK_W//4)) - pixel_y = y + (BLOCK_H//2) + int(math.cos(rotation_change) * zoom_amount * (BLOCK_H//4)) - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ - color = 255) - else: - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \ - color = 0) - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/image-patches-absolute-translation.py b/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/image-patches-absolute-translation.py deleted file mode 100644 index 0bfae8ca6..000000000 --- a/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/image-patches-absolute-translation.py +++ /dev/null @@ -1,69 +0,0 @@ -# Image Patches Absolute Optical Flow Translation -# -# This example shows off using your OpenMV Cam to measure translation -# in the X and Y direction by comparing the current and a previous -# image against each other. Note that only X and Y translation is -# handled - not rotation/scale in this mode. -# -# However, this examples goes beyond doing optical flow on the whole -# image at once. Instead it breaks up the process by working on groups -# of pixels in the image. This gives you a "new" image of results. -# -# NOTE that surfaces need to have some type of "edge" on them for the -# algorithm to work. A featureless surface produces crazy results. - -BLOCK_W = 16 # pow2 -BLOCK_H = 16 # pow2 - -# To run this demo effectively please mount your OpenMV Cam on a steady -# base and SLOWLY translate it to the left, right, up, and down and -# watch the numbers change. Note that you can see displacement numbers -# up +- half of the hoizontal and vertical resolution. - -import sensor, image, time - -# NOTE!!! You have to use a small power of 2 resolution when using -# find_displacement(). This is because the algorithm is powered by -# something called phase correlation which does the image comparison -# using FFTs. A non-power of 2 resolution requires padding to a power -# of 2 which reduces the usefulness of the algorithm results. Please -# use a resolution like B128X128 or B128X64 (2x faster). - -# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, -# 128x64, and 128x128. If you want a resolution of 32x32 you can create -# it by doing "img.pool(2, 2)" on a 64x64 image. - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) -sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)... -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -# Take from the main frame buffer's RAM to allocate a second frame buffer. -# There's a lot more RAM in the frame buffer than in the MicroPython heap. -# However, after doing this you have a lot less RAM for some algorithms... -# So, be aware that it's a lot easier to get out of RAM issues now. -extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) -extra_fb.replace(sensor.snapshot()) - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - for y in range(0, sensor.height(), BLOCK_H): - for x in range(0, sensor.width(), BLOCK_W): - # For this example we never update the old image to measure absolute change. - displacement = extra_fb.find_displacement(img, \ - roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) - - # Below 0.1 or so (YMMV) and the results are just noise. - if(displacement.response() > 0.1): - pixel_x = x + (BLOCK_W//2) + int(displacement.x_translation()) - pixel_y = y + (BLOCK_H//2) + int(displacement.y_translation()) - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ - color = 255) - else: - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \ - color = 0) - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/image-patches-differential-rotation-scale.py b/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/image-patches-differential-rotation-scale.py deleted file mode 100644 index bb1bc2eea..000000000 --- a/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/image-patches-differential-rotation-scale.py +++ /dev/null @@ -1,73 +0,0 @@ -# Image Patches Differential Optical Flow Rotation/Scale -# -# This example shows off using your OpenMV Cam to measure -# rotation/scale by comparing the current and the previous -# image against each other. Note that only rotation/scale is -# handled - not X and Y translation in this mode. -# -# However, this examples goes beyond doing optical flow on the whole -# image at once. Instead it breaks up the process by working on groups -# of pixels in the image. This gives you a "new" image of results. -# -# NOTE that surfaces need to have some type of "edge" on them for the -# algorithm to work. A featureless surface produces crazy results. - -# NOTE: Unless you have a very nice test rig this example is hard to see usefulness of... - -BLOCK_W = 16 # pow2 -BLOCK_H = 16 # pow2 - -# To run this demo effectively please mount your OpenMV Cam on a steady -# base and SLOWLY rotate the camera around the lens and move the camera -# forward/backwards to see the numbers change. -# I.e. Z direction changes only. - -import sensor, image, time, math - -# NOTE!!! You have to use a small power of 2 resolution when using -# find_displacement(). This is because the algorithm is powered by -# something called phase correlation which does the image comparison -# using FFTs. A non-power of 2 resolution requires padding to a power -# of 2 which reduces the usefulness of the algorithm results. Please -# use a resolution like B128X128 or B128X64 (2x faster). - -# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, -# 128x64, and 128x128. If you want a resolution of 32x32 you can create -# it by doing "img.pool(2, 2)" on a 64x64 image. - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) -sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)... -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -# Take from the main frame buffer's RAM to allocate a second frame buffer. -# There's a lot more RAM in the frame buffer than in the MicroPython heap. -# However, after doing this you have a lot less RAM for some algorithms... -# So, be aware that it's a lot easier to get out of RAM issues now. -extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) -extra_fb.replace(sensor.snapshot()) - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - for y in range(0, sensor.height(), BLOCK_H): - for x in range(0, sensor.width(), BLOCK_W): - displacement = extra_fb.find_displacement(img, logpolar=True, \ - roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) - - # Below 0.1 or so (YMMV) and the results are just noise. - if(displacement.response() > 0.1): - rotation_change = displacement.rotation() - zoom_amount = 1.0 + displacement.scale() - pixel_x = x + (BLOCK_W//2) + int(math.sin(rotation_change) * zoom_amount * (BLOCK_W//4)) - pixel_y = y + (BLOCK_H//2) + int(math.cos(rotation_change) * zoom_amount * (BLOCK_H//4)) - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ - color = 255) - else: - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \ - color = 0) - extra_fb.replace(img) - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/image-patches-differential-translation.py b/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/image-patches-differential-translation.py deleted file mode 100644 index 0a87c42f3..000000000 --- a/scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/image-patches-differential-translation.py +++ /dev/null @@ -1,69 +0,0 @@ -# Image Patches Differential Optical Flow Translation -# -# This example shows off using your OpenMV Cam to measure translation -# in the X and Y direction by comparing the current and the previous -# image against each other. Note that only X and Y translation is -# handled - not rotation/scale in this mode. -# -# However, this examples goes beyond doing optical flow on the whole -# image at once. Instead it breaks up the process by working on groups -# of pixels in the image. This gives you a "new" image of results. -# -# NOTE that surfaces need to have some type of "edge" on them for the -# algorithm to work. A featureless surface produces crazy results. - -BLOCK_W = 16 # pow2 -BLOCK_H = 16 # pow2 - -# To run this demo effectively please mount your OpenMV Cam on a steady -# base and SLOWLY translate it to the left, right, up, and down and -# watch the numbers change. Note that you can see displacement numbers -# up +- half of the hoizontal and vertical resolution. - -import sensor, image, time - -# NOTE!!! You have to use a small power of 2 resolution when using -# find_displacement(). This is because the algorithm is powered by -# something called phase correlation which does the image comparison -# using FFTs. A non-power of 2 resolution requires padding to a power -# of 2 which reduces the usefulness of the algorithm results. Please -# use a resolution like B128X128 or B128X64 (2x faster). - -# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, -# 128x64, and 128x128. If you want a resolution of 32x32 you can create -# it by doing "img.pool(2, 2)" on a 64x64 image. - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) -sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)... -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -# Take from the main frame buffer's RAM to allocate a second frame buffer. -# There's a lot more RAM in the frame buffer than in the MicroPython heap. -# However, after doing this you have a lot less RAM for some algorithms... -# So, be aware that it's a lot easier to get out of RAM issues now. -extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) -extra_fb.replace(sensor.snapshot()) - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - for y in range(0, sensor.height(), BLOCK_H): - for x in range(0, sensor.width(), BLOCK_W): - displacement = extra_fb.find_displacement(img, \ - roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) - - # Below 0.1 or so (YMMV) and the results are just noise. - if(displacement.response() > 0.1): - pixel_x = x + (BLOCK_W//2) + int(displacement.x_translation()) - pixel_y = y + (BLOCK_H//2) + int(displacement.y_translation()) - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ - color = 255) - else: - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \ - color = 0) - extra_fb.replace(img) - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/24-External-Sensors/I2C_Lidar_Lite_V3_example_code.py b/scripts/examples/Arduino/Portenta-H7/24-External-Sensors/I2C_Lidar_Lite_V3_example_code.py deleted file mode 100644 index ac9cee166..000000000 --- a/scripts/examples/Arduino/Portenta-H7/24-External-Sensors/I2C_Lidar_Lite_V3_example_code.py +++ /dev/null @@ -1,63 +0,0 @@ -# OpenMV M7 I2C interface with Garmin Lidar Lite V3 - By: Grant Phillips - Sun Apr 8 2018 - - -# Returns a basic distance reading from the lidar in cm for the target point and prints to console -# Uses default lidar settings. For more advanced settings, see the I2C commands in the manual: -# https://static.garmin.com/pumac/LIDAR_Lite_v3_Operation_Manual_and_Technical_Specifications.pdf - -# I2C Control of LIDAR Lite V3 -# 1. Write 0x04 to register 0x00 -# 2. Read register 0x01. Repeat until bit 0 (LSB) goes low. -# 3. Read two bytes from 0x8f (high byte 0x0f then low byte 0x10) to obtain 16 bit measurement in cm - -# HARDWARE CONNECTIONS: -# Connect the lidar SCL line (green) to I2C 2 SCL on openMV (Pin 4) -# Connect the lidar SDA line (blue) to I2C 2 SDA on openMV (pin 5) -# 680uF filter capacitor in parallel with the lidar -# 10k pullup resistors on the SCL and SDA lines to +5Vdc - - -import pyb -from pyb import I2C - - -lidarReady = bytearray([0xff]) # holds the returned data for ready check -lidarReadyCheck = bytes([1]) # to compare bit 0 of lidarReady - -startBuf = bytearray([0x00,0x04]) # step 1 address and data -readyBuf = bytearray([0x01]) # step 2 address for readiness check -distBuf = bytearray([0x8f]) # step 3 address for distance reading -distance = -1 # variable for distance reading - -# I2C setup -Lidar=I2C(2,I2C.MASTER) # initialise I2C 2 bus in master mode - - -while(True): - distance = -1 # reset to -1 so we know when we get a real reading - - try: # handles errors thrown up if we have an I2C error - # Step 1 Write 0x04 to register 0x00 - Lidar.send(startBuf,0x62) # this is making it read (laser visible) - - # Step 2 Read register 0x01 and wait for bit 0 to go low - while (lidarReady[0] & readyBuf[0]): - Lidar.send(readyBuf,0x62) - lidarReady=Lidar.recv(1,0x62) - pyb.delay(50) # This seems to help reduce errors on the I2C bus - lidarReady=bytearray([0xff]) # reset the ready check data for next reading - - # Step 3 Read the distance measurement from 0x8f (0x0f and 0x10) - Lidar.send(distBuf,0x62) - dist=Lidar.recv(2,0x62) - distance=dist[0] - distance<<=8 # move 2 bytes into a 16 bit int - distance|=dist[1] - pyb.delay(100) # allow time between readings, can go faster but more errors - - except OSError: # reninitialise i2c bus if error - Lidar.init(I2C.MASTER) - print("error, reinitialising") - - if distance > -1: - print("Distance:", distance, "cm") diff --git a/scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_face_collection.py b/scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_face_collection.py deleted file mode 100644 index 7ae684ded..000000000 --- a/scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_face_collection.py +++ /dev/null @@ -1,31 +0,0 @@ -# Face Collection -# -# Use this script to gather face images for building a TensorFlow dataset. This script automatically -# zooms in the largest face in the field of view which you can then save using the data set editor. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) - -clock = time.clock() - -largest_face = None -largest_face_timeout = 0 - -while(True): - clock.tick() - - faces = sensor.snapshot().gamma_corr(contrast=1.5).find_features(image.HaarCascade("frontalface")) - - if faces: - largest_face = max(faces, key = lambda f: f[2] * f[3]) - largest_face_timeout = 20 - - if largest_face_timeout > 0: - sensor.get_fb().crop(roi=largest_face) - largest_face_timeout -= 1 - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_face_recognition.py b/scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_face_recognition.py deleted file mode 100644 index b31bc16b9..000000000 --- a/scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_face_recognition.py +++ /dev/null @@ -1,41 +0,0 @@ -# Face Recognition -# -# Use this script to run a TensorFlow lite image classifier on faces detected within an image. -# The classifier is free to do facial recognition, expression detection, or whatever. - -import sensor, image, time, tf - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) - -clock = time.clock() - -net = tf.load("trained.tflite", load_to_fb=True) -labels = [l.rstrip('\n') for l in open("labels.txt")] - -while(True): - clock.tick() - - # Take a picture and brighten things up for the frontal face detector. - img = sensor.snapshot().gamma_corr(contrast=1.5) - - # Returns a list of rects (x, y, w, h) where faces are. - faces = img.find_features(image.HaarCascade("frontalface")) - - for f in faces: - - # Classify a face and get the class scores list - scores = net.classify(img, roi=f)[0].output() - - # Find the highest class score and lookup the label for that - label = labels[scores.index(max(scores))] - - # Draw a box around the face - img.draw_rectangle(f) - - # Draw the label above the face - img.draw_string(f[0]+3, f[1]-1, label, mono_space=False) - - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_mobilenet_search_just_center.py b/scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_mobilenet_search_just_center.py deleted file mode 100644 index e8f12f924..000000000 --- a/scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_mobilenet_search_just_center.py +++ /dev/null @@ -1,66 +0,0 @@ -# TensorFlow Lite Mobilenet V1 Example -# -# Google's Mobilenet V1 detects 1000 classes of objects -# -# WARNING: Mobilenet is trained on ImageNet and isn't meant to classify anything -# in the real world. It's just designed to score well on the ImageNet dataset. -# This example just shows off running mobilenet on the OpenMV Cam. However, the -# default model is not really usable for anything. You have to use transfer -# learning to apply the model to a target problem by re-training the model. -# -# NOTE: This example only works on the OpenMV Cam H7 Pro (that has SDRAM) and better! -# To get the models please see the CNN Network library in OpenMV IDE under -# Tools -> Machine Vision. The labels are there too. -# You should insert a microSD card into your camera and copy-paste the mobilenet_labels.txt -# file and your chosen model into the root folder for ths script to work. -# -# In this example we slide the detector window over the image and get a list -# of activations. Note that use a CNN with a sliding window is extremely compute -# expensive so for an exhaustive search do not expect the CNN to be real-time. - -import sensor, image, time, os, tf - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.set_windowing((240, 240)) # Set 240x240 window. -sensor.skip_frames(time=2000) # Let the camera adjust. - -mobilenet_version = "1" # 1 -mobilenet_width = "0.5" # 1.0, 0.75, 0.50, 0.25 -mobilenet_resolution = "128" # 224, 192, 160, 128 - -mobilenet = "mobilenet_v%s_%s_%s_quant.tflite" % (mobilenet_version, mobilenet_width, mobilenet_resolution) -labels = [line.rstrip('\n') for line in open("mobilenet_labels.txt")] - -clock = time.clock() -while(True): - clock.tick() - - img = sensor.snapshot() - - # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not - # specified). A classification score output vector will be generated for each location. At each scale the - # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. - # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note - # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after - # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) - # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. - # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... - - # Setting x_overlap=-1 forces the window to stay centered in the ROI in the x direction always. If - # y_overlap is not -1 the method will search in all vertical positions. - - # Setting y_overlap=-1 forces the window to stay centered in the ROI in the y direction always. If - # x_overlap is not -1 the method will serach in all horizontal positions. - - # default settings just do one detection... change them to search the image... - for obj in tf.classify(mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=-1, y_overlap=-1): - print("**********\nTop 5 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) - img.draw_rectangle(obj.rect()) - # This combines the labels and confidence values into a list of tuples - # and then sorts that list by the confidence values. - sorted_list = sorted(zip(labels, obj.output()), key = lambda x: x[1], reverse = True) - for i in range(5): - print("%s = %f" % (sorted_list[i][0], sorted_list[i][1])) - print(clock.fps(), "fps") diff --git a/scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_mobilenet_search_whole_window.py b/scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_mobilenet_search_whole_window.py deleted file mode 100644 index da7869c31..000000000 --- a/scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_mobilenet_search_whole_window.py +++ /dev/null @@ -1,60 +0,0 @@ -# TensorFlow Lite Mobilenet V1 Example -# -# Google's Mobilenet V1 detects 1000 classes of objects -# -# WARNING: Mobilenet is trained on ImageNet and isn't meant to classify anything -# in the real world. It's just designed to score well on the ImageNet dataset. -# This example just shows off running mobilenet on the OpenMV Cam. However, the -# default model is not really usable for anything. You have to use transfer -# learning to apply the model to a target problem by re-training the model. -# -# NOTE: This example only works on the OpenMV Cam H7 Pro (that has SDRAM) and better! -# To get the models please see the CNN Network library in OpenMV IDE under -# Tools -> Machine Vision. The labels are there too. -# You should insert a microSD card into your camera and copy-paste the mobilenet_labels.txt -# file and your chosen model into the root folder for ths script to work. -# -# In this example we slide the detector window over the image and get a list -# of activations. Note that use a CNN with a sliding window is extremely compute -# expensive so for an exhaustive search do not expect the CNN to be real-time. - -import sensor, image, time, os, tf - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.set_windowing((240, 240)) # Set 240x240 window. -sensor.skip_frames(time=2000) # Let the camera adjust. - -mobilenet_version = "1" # 1 -mobilenet_width = "0.5" # 1.0, 0.75, 0.50, 0.25 -mobilenet_resolution = "128" # 224, 192, 160, 128 - -mobilenet = "mobilenet_v%s_%s_%s_quant.tflite" % (mobilenet_version, mobilenet_width, mobilenet_resolution) -labels = [line.rstrip('\n') for line in open("mobilenet_labels.txt")] - -clock = time.clock() -while(True): - clock.tick() - - img = sensor.snapshot() - - # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not - # specified). A classification score output vector will be generated for each location. At each scale the - # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. - # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note - # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after - # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) - # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. - # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... - - # default settings just do one detection... change them to search the image... - for obj in tf.classify(mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0): - print("**********\nTop 5 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) - img.draw_rectangle(obj.rect()) - # This combines the labels and confidence values into a list of tuples - # and then sorts that list by the confidence values. - sorted_list = sorted(zip(labels, obj.output()), key = lambda x: x[1], reverse = True) - for i in range(5): - print("%s = %f" % (sorted_list[i][0], sorted_list[i][1])) - print(clock.fps(), "fps") diff --git a/scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_object_detection.py b/scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_object_detection.py deleted file mode 100644 index 48a2254ba..000000000 --- a/scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_object_detection.py +++ /dev/null @@ -1,51 +0,0 @@ -# TensorFlow Lite Object Detection Example -# -# This example shows off object detection. Object detect is much more powerful than -# object classification. It can locate multiple objects in the image. - -import sensor, image, time, os, tf - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.set_windowing((240, 240)) # Set 240x240 window. -sensor.skip_frames(time=2000) # Let the camera adjust. - -net = tf.load('', load_to_fb=True) -labels = [] - -try: # Load labels if they exist - labels = [line.rstrip('\n') for line in open("labels.txt")] -except: - pass - -colors = [ # Add more colors if you are detecting more than 7 types of classes at once. - (255, 0, 0), - ( 0, 255, 0), - (255, 255, 0), - ( 0, 0, 255), - (255, 0, 255), - ( 0, 255, 255), - (255, 255, 255), -] - -clock = time.clock() -while(True): - clock.tick() - - img = sensor.snapshot() - - # detect() segments an object using the provided segmentation model. This produces mutliple - # grayscale images per object class that we are trying to detect. detect() then runs - # find_blobs() internally on the segmented images to find all blob locations and then returns - # the bound boxes of all blobs found per object class. So, detect() returns a list of lists of - # classification objects and the respective confidence level. - - for i, detection_list in enumerate(net.detect(img, thresholds=[(128, 255)])): - if (i < len(labels)): - print("********** %s **********" % labels[i]) - for d in detection_list: - print(d) - img.draw_rectangle(d.rect(), color=colors[i]) - - print(clock.fps(), "fps", end="\n\n") diff --git a/scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_person_detection_search_just_center.py b/scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_person_detection_search_just_center.py deleted file mode 100644 index b1531f58b..000000000 --- a/scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_person_detection_search_just_center.py +++ /dev/null @@ -1,48 +0,0 @@ -# TensorFlow Lite Person Dection Example -# -# Google's Person Detection Model detects if a person is in view. -# -# In this example we slide the detector window over the image and get a list -# of activations. Note that use a CNN with a sliding window is extremely compute -# expensive so for an exhaustive search do not expect the CNN to be real-time. - -import sensor, image, time, os, tf - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.set_windowing((240, 240)) # Set 240x240 window. -sensor.skip_frames(time=2000) # Let the camera adjust. - -# Load the built-in person detection network (the network is in your OpenMV Cam's firmware). -labels, net = tf.load_builtin_model('person_detection') - -clock = time.clock() -while(True): - clock.tick() - - img = sensor.snapshot() - - # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not - # specified). A classification score output vector will be generated for each location. At each scale the - # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. - # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note - # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after - # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) - # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. - # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... - - # Setting x_overlap=-1 forces the window to stay centered in the ROI in the x direction always. If - # y_overlap is not -1 the method will search in all vertical positions. - - # Setting y_overlap=-1 forces the window to stay centered in the ROI in the y direction always. If - # x_overlap is not -1 the method will serach in all horizontal positions. - - # default settings just do one detection... change them to search the image... - for obj in net.classify(img, min_scale=0.5, scale_mul=0.5, x_overlap=-1, y_overlap=-1): - print("**********\nDetections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) - for i in range(len(obj.output())): - print("%s = %f" % (labels[i], obj.output()[i])) - img.draw_rectangle(obj.rect()) - img.draw_string(obj.x()+3, obj.y()-1, labels[obj.output().index(max(obj.output()))], mono_space = False) - print(clock.fps(), "fps") diff --git a/scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_person_detection_search_whole_window.py b/scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_person_detection_search_whole_window.py deleted file mode 100644 index ff05e03d6..000000000 --- a/scripts/examples/Arduino/Portenta-H7/25-Machine-Learning/tf_person_detection_search_whole_window.py +++ /dev/null @@ -1,42 +0,0 @@ -# TensorFlow Lite Person Dection Example -# -# Google's Person Detection Model detects if a person is in view. -# -# In this example we slide the detector window over the image and get a list -# of activations. Note that use a CNN with a sliding window is extremely compute -# expensive so for an exhaustive search do not expect the CNN to be real-time. - -import sensor, image, time, os, tf - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.set_windowing((240, 240)) # Set 240x240 window. -sensor.skip_frames(time=2000) # Let the camera adjust. - -# Load the built-in person detection network (the network is in your OpenMV Cam's firmware). -labels, net = tf.load_builtin_model('person_detection') - -clock = time.clock() -while(True): - clock.tick() - - img = sensor.snapshot() - - # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not - # specified). A classification score output vector will be generated for each location. At each scale the - # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. - # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note - # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after - # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) - # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. - # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... - - # default settings just do one detection... change them to search the image... - for obj in net.classify(img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0): - print("**********\nDetections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) - for i in range(len(obj.output())): - print("%s = %f" % (labels[i], obj.output()[i])) - img.draw_rectangle(obj.rect()) - img.draw_string(obj.x()+3, obj.y()-1, labels[obj.output().index(max(obj.output()))], mono_space = False) - print(clock.fps(), "fps") diff --git a/scripts/examples/Arduino/Portenta-H7/26-April-Tags/find_apriltags.py b/scripts/examples/Arduino/Portenta-H7/26-April-Tags/find_apriltags.py deleted file mode 100644 index 33773cd8e..000000000 --- a/scripts/examples/Arduino/Portenta-H7/26-April-Tags/find_apriltags.py +++ /dev/null @@ -1,56 +0,0 @@ -# AprilTags Example -# -# This example shows the power of the OpenMV Cam to detect April Tags -# on the OpenMV Cam M7. The M4 versions cannot detect April Tags. - -import sensor, image, time, math - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QQVGA) # we run out of memory if the resolution is much bigger... -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must turn this off to prevent image washout... -clock = time.clock() - -# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work. - -# The apriltag code supports up to 6 tag families which can be processed at the same time. -# Returned tag objects will have their tag family and id within the tag family. - -tag_families = 0 -tag_families |= image.TAG16H5 # comment out to disable this family -tag_families |= image.TAG25H7 # comment out to disable this family -tag_families |= image.TAG25H9 # comment out to disable this family -tag_families |= image.TAG36H10 # comment out to disable this family -tag_families |= image.TAG36H11 # comment out to disable this family (default family) -tag_families |= image.ARTOOLKIT # comment out to disable this family - -# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively -# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which -# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve -# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a -# reason to use the other tags families just use TAG36H11 which is the default family. - -def family_name(tag): - if(tag.family() == image.TAG16H5): - return "TAG16H5" - if(tag.family() == image.TAG25H7): - return "TAG25H7" - if(tag.family() == image.TAG25H9): - return "TAG25H9" - if(tag.family() == image.TAG36H10): - return "TAG36H10" - if(tag.family() == image.TAG36H11): - return "TAG36H11" - if(tag.family() == image.ARTOOLKIT): - return "ARTOOLKIT" - -while(True): - clock.tick() - img = sensor.snapshot() - for tag in img.find_apriltags(families=tag_families): # defaults to TAG36H11 without "families". - img.draw_rectangle(tag.rect(), color = (255, 0, 0)) - img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0)) - print_args = (family_name(tag), tag.id(), (180 * tag.rotation()) / math.pi) - print("Tag Family %s, Tag ID %d, rotation %f (degrees)" % print_args) - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/26-April-Tags/find_apriltags_3d_pose.py b/scripts/examples/Arduino/Portenta-H7/26-April-Tags/find_apriltags_3d_pose.py deleted file mode 100644 index 8b9c45e81..000000000 --- a/scripts/examples/Arduino/Portenta-H7/26-April-Tags/find_apriltags_3d_pose.py +++ /dev/null @@ -1,56 +0,0 @@ -# AprilTags Example -# -# This example shows the power of the OpenMV Cam to detect April Tags -# on the OpenMV Cam M7. The M4 versions cannot detect April Tags. - -import sensor, image, time, math - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QQVGA) # we run out of memory if the resolution is much bigger... -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must turn this off to prevent image washout... -clock = time.clock() - -# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work. - -# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively -# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which -# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve -# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a -# reason to use the other tags families just use TAG36H11 which is the default family. - -# The AprilTags library outputs the pose information for tags. This is the x/y/z translation and -# x/y/z rotation. The x/y/z rotation is in radians and can be converted to degrees. As for -# translation the units are dimensionless and you must apply a conversion function. - -# f_x is the x focal length of the camera. It should be equal to the lens focal length in mm -# divided by the x sensor size in mm times the number of pixels in the image. -# The below values are for the OV7725 camera with a 2.8 mm lens. - -# f_y is the y focal length of the camera. It should be equal to the lens focal length in mm -# divided by the y sensor size in mm times the number of pixels in the image. -# The below values are for the OV7725 camera with a 2.8 mm lens. - -# c_x is the image x center position in pixels. -# c_y is the image y center position in pixels. - -f_x = (2.8 / 3.984) * 160 # find_apriltags defaults to this if not set -f_y = (2.8 / 2.952) * 120 # find_apriltags defaults to this if not set -c_x = 160 * 0.5 # find_apriltags defaults to this if not set (the image.w * 0.5) -c_y = 120 * 0.5 # find_apriltags defaults to this if not set (the image.h * 0.5) - -def degrees(radians): - return (180 * radians) / math.pi - -while(True): - clock.tick() - img = sensor.snapshot() - for tag in img.find_apriltags(fx=f_x, fy=f_y, cx=c_x, cy=c_y): # defaults to TAG36H11 - img.draw_rectangle(tag.rect(), color = (255, 0, 0)) - img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0)) - print_args = (tag.x_translation(), tag.y_translation(), tag.z_translation(), \ - degrees(tag.x_rotation()), degrees(tag.y_rotation()), degrees(tag.z_rotation())) - # Translation units are unknown. Rotation units are in degrees. - print("Tx: %f, Ty %f, Tz %f, Rx %f, Ry %f, Rz %f" % print_args) - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/26-April-Tags/find_apriltags_max_res.py b/scripts/examples/Arduino/Portenta-H7/26-April-Tags/find_apriltags_max_res.py deleted file mode 100644 index ad4b83fab..000000000 --- a/scripts/examples/Arduino/Portenta-H7/26-April-Tags/find_apriltags_max_res.py +++ /dev/null @@ -1,60 +0,0 @@ -# AprilTags Max Res Example -# -# This example shows the power of the OpenMV Cam to detect April Tags -# on the OpenMV Cam M7. The M4 versions cannot detect April Tags. - -import sensor, image, time, math, omv - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.VGA) # we run out of memory if the resolution is much bigger... -# AprilTags works on a maximum of < 64K pixels. -if omv.board_type() == "H7": sensor.set_windowing((240, 240)) -elif omv.board_type() == "M7": sensor.set_windowing((200, 200)) -else: raise Exception("You need a more powerful OpenMV Cam to run this script") -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must turn this off to prevent image washout... -clock = time.clock() - -# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work. - -# The apriltag code supports up to 6 tag families which can be processed at the same time. -# Returned tag objects will have their tag family and id within the tag family. - -tag_families = 0 -tag_families |= image.TAG16H5 # comment out to disable this family -tag_families |= image.TAG25H7 # comment out to disable this family -tag_families |= image.TAG25H9 # comment out to disable this family -tag_families |= image.TAG36H10 # comment out to disable this family -tag_families |= image.TAG36H11 # comment out to disable this family (default family) -tag_families |= image.ARTOOLKIT # comment out to disable this family - -# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively -# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which -# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve -# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a -# reason to use the other tags families just use TAG36H11 which is the default family. - -def family_name(tag): - if(tag.family() == image.TAG16H5): - return "TAG16H5" - if(tag.family() == image.TAG25H7): - return "TAG25H7" - if(tag.family() == image.TAG25H9): - return "TAG25H9" - if(tag.family() == image.TAG36H10): - return "TAG36H10" - if(tag.family() == image.TAG36H11): - return "TAG36H11" - if(tag.family() == image.ARTOOLKIT): - return "ARTOOLKIT" - -while(True): - clock.tick() - img = sensor.snapshot() - for tag in img.find_apriltags(families=tag_families): # defaults to TAG36H11 without "families". - img.draw_rectangle(tag.rect(), color = 127) - img.draw_cross(tag.cx(), tag.cy(), color = 127) - print_args = (family_name(tag), tag.id(), (180 * tag.rotation()) / math.pi) - print("Tag Family %s, Tag ID %d, rotation %f (degrees)" % print_args) - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/26-April-Tags/find_apriltags_w_lens_zoom.py b/scripts/examples/Arduino/Portenta-H7/26-April-Tags/find_apriltags_w_lens_zoom.py deleted file mode 100644 index 18b3248ae..000000000 --- a/scripts/examples/Arduino/Portenta-H7/26-April-Tags/find_apriltags_w_lens_zoom.py +++ /dev/null @@ -1,32 +0,0 @@ -# AprilTags Example -# -# This example shows the power of the OpenMV Cam to detect April Tags -# on the OpenMV Cam M7. The M4 versions cannot detect April Tags. - -import sensor, image, time, math - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.VGA) # we run out of memory if the resolution is much bigger... -sensor.set_windowing((160, 120)) # Look at center 160x120 pixels of the VGA resolution. -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must turn this off to prevent image washout... -clock = time.clock() - -# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work. - -# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively -# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which -# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve -# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a -# reason to use the other tags families just use TAG36H11 which is the default family. - -while(True): - clock.tick() - img = sensor.snapshot() - for tag in img.find_apriltags(): # defaults to TAG36H11 - img.draw_rectangle(tag.rect(), color = (255, 0, 0)) - img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0)) - print_args = (tag.id(), (180 * tag.rotation()) / math.pi) - print("Tag Family TAG36H11, Tag ID %d, rotation %f (degrees)" % print_args) - print(clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/26-April-Tags/find_small_apriltags.py b/scripts/examples/Arduino/Portenta-H7/26-April-Tags/find_small_apriltags.py deleted file mode 100644 index b634bf9b9..000000000 --- a/scripts/examples/Arduino/Portenta-H7/26-April-Tags/find_small_apriltags.py +++ /dev/null @@ -1,70 +0,0 @@ -# Find Small Apriltags -# -# This script shows off how to use blob tracking as a pre-filter to -# finding Apriltags in the image using blob tracking to find the -# area of where the tag is first and then calling find_apriltags -# on that blob. - -# Note, this script works well assuming most parts of the image do not -# pass the thresholding test... otherwise, you don't get a distance -# benefit. - -import sensor, image, time, math, omv - -# Set the thresholds to find a white object (i.e. tag border) -thresholds = (150, 255) - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -if omv.board_type() == "H7": sensor.set_framesize(sensor.VGA) -elif omv.board_type() == "M7": sensor.set_framesize(sensor.QVGA) -else: raise Exception("You need a more powerful OpenMV Cam to run this script") -sensor.skip_frames(time = 200) # increase this to let the auto methods run for longer -sensor.set_auto_gain(False) # must be turned off for color tracking -clock = time.clock() - -# The apriltag code supports up to 6 tag families which can be processed at the same time. -# Returned tag objects will have their tag family and id within the tag family. -tag_families = 0 -tag_families |= image.TAG16H5 # comment out to disable this family -tag_families |= image.TAG25H7 # comment out to disable this family -tag_families |= image.TAG25H9 # comment out to disable this family -tag_families |= image.TAG36H10 # comment out to disable this family -tag_families |= image.TAG36H11 # comment out to disable this family (default family) -tag_families |= image.ARTOOLKIT # comment out to disable this family - -while(True): - clock.tick() - img = sensor.snapshot() - - # First, we find blobs that may be candidates for tags. - box_list = [] - - # AprilTags may fail due to not having enough ram given the image sie being passed. - tag_list = [] - - for blob in img.find_blobs([thresholds], pixels_threshold=100, area_threshold=100, merge=True): - # Next we look for a tag in an ROI that's bigger than the blob. - w = min(max(int(blob.w() * 1.2), 10), 160) # Not too small, not too big. - h = min(max(int(blob.h() * 1.2), 10), 160) # Not too small, not too big. - x = min(max(int(blob.x() + (blob.w()/4) - (w * 0.1)), 0), img.width()-1) - y = min(max(int(blob.y() + (blob.h()/4) - (h * 0.1)), 0), img.height()-1) - - box_list.append((x, y, w, h)) # We'll draw these later. - - # Since we constrict the roi size apriltags shouldn't run out of ram. - # But, if it does we handle it... - try: - tag_list.extend(img.find_apriltags(roi=(x,y,w,h), families=tag_families)) - except (MemoryError): # Don't catch all exceptions otherwise you can't stop the script. - pass - - for b in box_list: - img.draw_rectangle(b) - # Now print out the found tags - for tag in tag_list: - img.draw_rectangle(tag.rect()) - img.draw_cross(tag.cx(), tag.cy()) - for c in tag.corners(): - img.draw_circle(c[0], c[1], 5) - print("Tag:", tag.cx(), tag.cy(), tag.rotation(), tag.id()) diff --git a/scripts/examples/Arduino/Portenta-H7/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py b/scripts/examples/Arduino/Portenta-H7/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py deleted file mode 100644 index 9e4ffad60..000000000 --- a/scripts/examples/Arduino/Portenta-H7/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py +++ /dev/null @@ -1,87 +0,0 @@ -# Image Transfer - As The Remote Device -# -# This script is meant to talk to the "image_transfer_jpg_as_the_controller_device.py" on your computer. -# -# This script shows off how to transfer the frame buffer to your computer as a jpeg image. - -import image, network, omv, rpc, sensor, struct - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) - -# Turn off the frame buffer connection to the IDE from the OpenMV Cam side. -# -# This needs to be done when manually compressing jpeg images at higher quality -# so that the OpenMV Cam does not try to stream them to the IDE using a fall back -# mechanism if the JPEG image is too large to fit in the IDE JPEG frame buffer on the OpenMV Cam. - -omv.disable_fb(True) - -# The RPC library above is installed on your OpenMV Cam and provides mutliple classes for -# allowing your OpenMV Cam to be controlled over USB or LAN/WLAN. - -################################################################ -# Choose the interface you wish to control your OpenMV Cam over. -################################################################ - -# Uncomment the below line to setup your OpenMV Cam for control over a USB VCP. -# -interface = rpc.rpc_usb_vcp_slave() - -# Uncomment the below line to setup your OpenMV Cam for control over the lan. -# -# network_if = network.LAN() -# network_if.active(True) -# network_if.ifconfig('dhcp') -# -# interface = rpc.rpc_network_slave(network_if) - -# Uncomment the below line to setup your OpenMV Cam for control over the wlan. -# -# network_if = network.WLAN(network.STA_IF) -# network_if.active(True) -# network_if.connect('your-ssid', 'your-password') -# -# interface = rpc.rpc_network_slave(network_if) - -################################################################ -# Call Backs -################################################################ - -# When called sets the pixformat and framesize, takes a snapshot -# and then returns the frame buffer jpg size to store the image in. -# -# data is a pixformat string and framesize string. -def jpeg_image_snapshot(data): - pixformat, framesize = bytes(data).decode().split(",") - sensor.set_pixformat(eval(pixformat)) - sensor.set_framesize(eval(framesize)) - img = sensor.snapshot().compress(quality=90) - return struct.pack(" 200, # Blue - lambda r, g, b: r > 200 and g < 70 and b < 70, # Red - lambda r, g, b: r > 200 and g < 70 and b > 200, # Purple - lambda r, g, b: r < 70 and g > 200 and b < 70, # Green - lambda r, g, b: r < 70 and g > 200 and b > 200, # Aqua - lambda r, g, b: r > 200 and g > 200 and b < 70, # Yellow - lambda r, g, b: r > 200 and g > 200 and b > 200] # White - -# color bars are inverted for OV7725 -if (sensor.get_id() == sensor.OV7725): - t = t[::-1] - -# 320x240 image with 8 color bars each one is approx 40 pixels. -# we start from the center of the frame buffer, and average the -# values of 10 sample pixels from the center of each color bar. -for i in range(0, 8): - avg = (0, 0, 0) - idx = 40*i+20 # center of colorbars - for off in range(0, 10): # avg 10 pixels - rgb = image.get_pixel(idx+off, 120) - avg = tuple(map(sum, zip(avg, rgb))) - - if not t[i](avg[0]/10, avg[1]/10, avg[2]/10): - raise Exception("COLOR BARS TEST FAILED. " - "BAR#(%d): RGB(%d,%d,%d)"%(i+1, avg[0]/10, avg[1]/10, avg[2]/10)) - -print("COLOR BARS TEST PASSED...") diff --git a/scripts/examples/Arduino/Portenta-H7/99-Tests/fps.py b/scripts/examples/Arduino/Portenta-H7/99-Tests/fps.py deleted file mode 100644 index 5e1f8bd59..000000000 --- a/scripts/examples/Arduino/Portenta-H7/99-Tests/fps.py +++ /dev/null @@ -1,14 +0,0 @@ -# FPS Test Script. -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_framesize(sensor.QQVGA) # or sensor.QQVGA (or others) -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE -sensor.set_colorbar(True) # Enable colorbars output - -clock = time.clock() # Tracks FPS. -for i in range(0, 600): - clock.tick() # Track elapsed milliseconds between snapshots(). - sensor.snapshot() # Capture snapshot. - -print("FPS:", clock.fps()) diff --git a/scripts/examples/Arduino/Portenta-H7/99-Tests/selftest.py b/scripts/examples/Arduino/Portenta-H7/99-Tests/selftest.py deleted file mode 100644 index f9bcc56ac..000000000 --- a/scripts/examples/Arduino/Portenta-H7/99-Tests/selftest.py +++ /dev/null @@ -1,77 +0,0 @@ -# Self Test Example -# -# This example shows how your OpenMV Cam tests itself before being allowed out -# of the factory. Every OpenMV Cam should pass this test. - -import sensor, time, pyb - -def test_int_adc(): - adc = pyb.ADCAll(12) - # Test VBAT - vbat = adc.read_core_vbat() - vbat_diff = abs(vbat-3.3) - if (vbat_diff > 0.1): - raise Exception('INTERNAL ADC TEST FAILED VBAT=%fv'%vbat) - - # Test VREF - vref = adc.read_core_vref() - vref_diff = abs(vref-1.2) - if (vref_diff > 0.1): - raise Exception('INTERNAL ADC TEST FAILED VREF=%fv'%vref) - adc = None - print('INTERNAL ADC TEST PASSED...') - -def test_color_bars(): - sensor.reset() - # Set sensor settings - sensor.set_brightness(0) - sensor.set_saturation(3) - sensor.set_gainceiling(8) - sensor.set_contrast(2) - - # Set sensor pixel format - sensor.set_framesize(sensor.QVGA) - sensor.set_pixformat(sensor.GRAYSCALE) - - # Enable colorbar test mode - sensor.set_colorbar(True) - - # Skip a few frames to allow the sensor settle down - for i in range(0, 100): - image = sensor.snapshot() - - #color bars thresholds - t = [lambda r, g, b: r < 70 and g < 70 and b < 70, # Black - lambda r, g, b: r < 70 and g < 70 and b > 200, # Blue - lambda r, g, b: r > 200 and g < 70 and b < 70, # Red - lambda r, g, b: r > 200 and g < 70 and b > 200, # Purple - lambda r, g, b: r < 70 and g > 200 and b < 70, # Green - lambda r, g, b: r < 70 and g > 200 and b > 200, # Aqua - lambda r, g, b: r > 200 and g > 200 and b < 70, # Yellow - lambda r, g, b: r > 200 and g > 200 and b > 200] # White - - # color bars are inverted for OV7725 - if (sensor.get_id() == sensor.OV7725): - t = t[::-1] - - #320x240 image with 8 color bars each one is approx 40 pixels. - #we start from the center of the frame buffer, and average the - #values of 10 sample pixels from the center of each color bar. - for i in range(0, 8): - avg = (0, 0, 0) - idx = 40*i+20 #center of colorbars - for off in range(0, 10): #avg 10 pixels - rgb = image.get_pixel(idx+off, 120) - avg = tuple(map(sum, zip(avg, rgb))) - - if not t[i](avg[0]/10, avg[1]/10, avg[2]/10): - raise Exception('COLOR BARS TEST FAILED.' - 'BAR#(%d): RGB(%d,%d,%d)'%(i+1, avg[0]/10, avg[1]/10, avg[2]/10)) - - print('COLOR BARS TEST PASSED...') - -if __name__ == '__main__': - print('') - test_int_adc() - test_color_bars() - diff --git a/scripts/examples/Arduino/Portenta-H7/99-Tests/unittests.py b/scripts/examples/Arduino/Portenta-H7/99-Tests/unittests.py deleted file mode 100644 index d20932873..000000000 --- a/scripts/examples/Arduino/Portenta-H7/99-Tests/unittests.py +++ /dev/null @@ -1,38 +0,0 @@ -# OpenMV Unit Tests. -# -import os, sensor, gc - -TEST_DIR = "unittest" -TEMP_DIR = "unittest/temp" -DATA_DIR = "unittest/data" -SCRIPT_DIR = "unittest/script" - -if not (TEST_DIR in os.listdir("")): - raise Exception('Unittest dir not found!') - -print("") -test_failed = False - -def print_result(test, passed): - s = "Unittest (%s)"%(test) - padding = "."*(60-len(s)) - print(s + padding + ("PASSED" if passed == True else "FAILED")) - -for test in sorted(os.listdir(SCRIPT_DIR)): - if test.endswith(".py"): - test_passed = True - test_path = "/".join((SCRIPT_DIR, test)) - try: - exec(open(test_path).read()) - gc.collect() - if unittest(DATA_DIR, TEMP_DIR) == False: - raise Exception() - except Exception as e: - test_failed = True - test_passed = False - print_result(test, test_passed) - -if test_failed: - print("\nSome tests have FAILED!!!\n\n") -else: - print("\nAll tests PASSED.\n\n") diff --git a/scripts/examples/OpenMV/00-Arduino/arduino_i2c_slave.py b/scripts/examples/OpenMV/00-Arduino/arduino_i2c_slave.py deleted file mode 100644 index 41aa5e09d..000000000 --- a/scripts/examples/OpenMV/00-Arduino/arduino_i2c_slave.py +++ /dev/null @@ -1,91 +0,0 @@ -# I2C with the Arduino as the master device and the OpenMV Cam as the slave. -# -# Please wire up your OpenMV Cam to your Arduino like this: -# -# OpenMV Cam Master I2C Data (P5) - Arduino Uno Data (A4) -# OpenMV Cam Master I2C Clock (P4) - Arduino Uno Clock (A5) -# OpenMV Cam Ground - Arduino Ground - -import pyb, ustruct - -text = "Hello World!\n" -data = ustruct.pack("<%ds" % len(text), text) -# Use "ustruct" to build data packets to send. -# "<" puts the data in the struct in little endian order. -# "%ds" puts a string in the data stream. E.g. "13s" for "Hello World!\n" (13 chars). -# See https://docs.python.org/3/library/struct.html - -# READ ME!!! -# -# Please understand that when your OpenMV Cam is not the I2C master it may miss responding to -# sending data as a I2C slave no matter if you call "i2c.send()" in an interupt callback or in the -# main loop below. When this happens the Arduino will get a NAK and have to try reading from the -# OpenMV Cam again. Note that both the Arduino and OpenMV Cam I2C drivers are not good at getting -# unstuck after encountering any I2C errors. On the OpenMV Cam and Arduino you can recover by -# de-initing and then re-initing the I2C peripherals. - -# The hardware I2C bus for your OpenMV Cam is always I2C bus 2. -bus = pyb.I2C(2, pyb.I2C.SLAVE, addr=0x12) -bus.deinit() # Fully reset I2C device... -bus = pyb.I2C(2, pyb.I2C.SLAVE, addr=0x12) -print("Waiting for Arduino...") - -# Note that for sync up to work correctly the OpenMV Cam must be running this script before the -# Arduino starts to poll the OpenMV Cam for data. Otherwise the I2C byte framing gets messed up, -# and etc. So, keep the Arduino in reset until the OpenMV Cam is "Waiting for Arduino...". - -while(True): - try: - bus.send(ustruct.pack(" -# #define BAUD_RATE 19200 -# #define CHAR_BUF 128 -# -# void setup() { -# Serial.begin(BAUD_RATE); -# Wire.begin(); -# delay(1000); // Give the OpenMV Cam time to bootup. -# } -# -# void loop() { -# int32_t temp = 0; -# char buff[CHAR_BUF] = {0}; -# -# Wire.requestFrom(0x12, 2); -# if(Wire.available() == 2) { // got length? -# -# temp = Wire.read() | (Wire.read() << 8); -# delay(1); // Give some setup time... -# -# Wire.requestFrom(0x12, temp); -# if(Wire.available() == temp) { // got full message? -# -# temp = 0; -# while(Wire.available()) buff[temp++] = Wire.read(); -# -# } else { -# while(Wire.available()) Wire.read(); // Toss garbage bytes. -# } -# } else { -# while(Wire.available()) Wire.read(); // Toss garbage bytes. -# } -# -# Serial.print(buff); -# delay(1); // Don't loop to quickly. -# } diff --git a/scripts/examples/OpenMV/00-Arduino/arduino_spi_slave.py b/scripts/examples/OpenMV/00-Arduino/arduino_spi_slave.py deleted file mode 100644 index 7b3131c2f..000000000 --- a/scripts/examples/OpenMV/00-Arduino/arduino_spi_slave.py +++ /dev/null @@ -1,94 +0,0 @@ -# SPI with the Arduino as the master device and the OpenMV Cam as the slave. -# -# Please wire up your OpenMV Cam to your Arduino like this: -# -# OpenMV Cam Master Out Slave In (P0) - Arduino Uno MOSI (11) -# OpenMV Cam Master In Slave Out (P1) - Arduino Uno MISO (12) -# OpenMV Cam Serial Clock (P2) - Arduino Uno SCK (13) -# OpenMV Cam Slave Select (P3) - Arduino Uno SS (10) -# OpenMV Cam Ground - Arduino Ground - -import pyb, ustruct, time - -text = "Hello World!\n" -data = ustruct.pack(" clock is idle low. -# phase = 0 -> sample data on rising clock edge, output data on falling clock edge. -spi = pyb.SPI(2, pyb.SPI.SLAVE, polarity=0, phase=0) - -# NSS callback. -def nss_callback(line): - global spi, data - try: - spi.send(data, timeout=1000) - except OSError as err: - pass # Don't care about errors - so pass. - # Note that there are 3 possible errors. A timeout error, a general purpose error, or - # a busy error. The error codes are 116, 5, 16 respectively for "err.arg[0]". - -# Configure NSS/CS in IRQ mode to send data when requested by the master. -pyb.ExtInt(pyb.Pin("P3"), pyb.ExtInt.IRQ_FALLING, pyb.Pin.PULL_UP, nss_callback) - -while(True): - time.sleep_ms(1000) - -################################################################################################### -# Arduino Code -################################################################################################### -# -# #include -# #define SS_PIN 10 -# #define BAUD_RATE 19200 -# #define CHAR_BUF 128 -# -# void setup() { -# pinMode(SS_PIN, OUTPUT); -# Serial.begin(BAUD_RATE); -# SPI.begin(); -# SPI.setBitOrder(MSBFIRST); -# SPI.setClockDivider(SPI_CLOCK_DIV16); -# SPI.setDataMode(SPI_MODE0); -# delay(1000); // Give the OpenMV Cam time to bootup. -# } -# -# void loop() { -# int32_t len = 0; -# char buff[CHAR_BUF] = {0}; -# digitalWrite(SS_PIN, LOW); -# delay(1); // Give the OpenMV Cam some time to setup to send data. -# -# if(SPI.transfer(1) == 85) { // saw sync char? -# SPI.transfer(&len, 4); // get length -# if (len) { -# SPI.transfer(&buff, min(len, CHAR_BUF)); -# len -= min(len, CHAR_BUF); -# } -# while (len--) SPI.transfer(0); // eat any remaining bytes -# } -# -# digitalWrite(SS_PIN, HIGH); -# Serial.print(buff); -# delay(1); // Don't loop to quickly. -# } diff --git a/scripts/examples/OpenMV/00-Arduino/arduino_uart.py b/scripts/examples/OpenMV/00-Arduino/arduino_uart.py deleted file mode 100644 index 1269c8739..000000000 --- a/scripts/examples/OpenMV/00-Arduino/arduino_uart.py +++ /dev/null @@ -1,38 +0,0 @@ -# Basic UART communications between OpenMV and Arduino Uno. - -# 1) Wire up your OpenMV Cam to your Arduino Uno like this: -# -# OpenMV Cam Ground Pin ----> Arduino Ground -# OpenMV Cam UART3_TX(P4) ----> Arduino Uno UART_RX(0) -# OpenMV Cam UART3_RX(P5) ----> Arduino Uno UART_TX(1) - -# 2) Uncomment and upload the following sketch to Arduino: -# -# void setup() { -# // put your setup code here, to run once: -# Serial.begin(19200); -# } -# -# void loop() { -# // put your main code here, to run repeatedly: -# if (Serial.available()) { -# // Read the most recent byte -# byte byteRead = Serial.read(); -# // ECHO the value that was read -# Serial.write(byteRead); -# } -# } - -# 3) Run the following script in OpenMV IDE: - -import time -from pyb import UART - -# UART 3, and baudrate. -uart = UART(3, 19200) - -while(True): - uart.write("Hello World!\n") - if (uart.any()): - print(uart.read()) - time.sleep_ms(1000) diff --git a/scripts/examples/OpenMV/01-Basics/helloworld.py b/scripts/examples/OpenMV/01-Basics/helloworld.py deleted file mode 100644 index a18b84b21..000000000 --- a/scripts/examples/OpenMV/01-Basics/helloworld.py +++ /dev/null @@ -1,17 +0,0 @@ -# Hello World Example -# -# Welcome to the OpenMV IDE! Click on the green run arrow button below to run the script! - -import sensor, image, time - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected - # to the IDE. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/01-Basics/main.py b/scripts/examples/OpenMV/01-Basics/main.py deleted file mode 100644 index 4ff900896..000000000 --- a/scripts/examples/OpenMV/01-Basics/main.py +++ /dev/null @@ -1,33 +0,0 @@ -# Main Module Example -# -# When your OpenMV Cam is disconnected from your computer it will either run the -# main.py script on the SD card (if attached) or the main.py script on -# your OpenMV Cam's internal flash drive. - -import time, pyb - -led = pyb.LED(3) # Red LED = 1, Green LED = 2, Blue LED = 3, IR LEDs = 4. -usb = pyb.USB_VCP() # This is a serial port object that allows you to -# communciate with your computer. While it is not open the code below runs. - -while(not usb.isconnected()): - led.on() - time.sleep_ms(150) - led.off() - time.sleep_ms(100) - led.on() - time.sleep_ms(150) - led.off() - time.sleep_ms(600) - -led = pyb.LED(2) # Switch to using the green LED. - -while(usb.isconnected()): - led.on() - time.sleep_ms(150) - led.off() - time.sleep_ms(100) - led.on() - time.sleep_ms(150) - led.off() - time.sleep_ms(600) diff --git a/scripts/examples/OpenMV/02-Board-Control/native_emitters.py b/scripts/examples/OpenMV/02-Board-Control/native_emitters.py deleted file mode 100644 index 3839297fd..000000000 --- a/scripts/examples/OpenMV/02-Board-Control/native_emitters.py +++ /dev/null @@ -1,19 +0,0 @@ -import time - -@micropython.asm_thumb -def asm(): - movw(r0, 42) - -@micropython.viper -def viper(a, b): - return a + b - -@micropython.native -def native(a, b): - return a + b - - -print(asm()) -print(viper(1, 2)) -print(native(1, 2)) - diff --git a/scripts/examples/OpenMV/03-Drawing/arrow_drawing.py b/scripts/examples/OpenMV/03-Drawing/arrow_drawing.py deleted file mode 100644 index 33b190280..000000000 --- a/scripts/examples/OpenMV/03-Drawing/arrow_drawing.py +++ /dev/null @@ -1,31 +0,0 @@ -# Arrow Drawing -# -# This example shows off drawing arrows on the OpenMV Cam. - -import sensor, image, time, pyb - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - img = sensor.snapshot() - - for i in range(10): - x0 = (pyb.rng() % (2*img.width())) - (img.width()//2) - y0 = (pyb.rng() % (2*img.height())) - (img.height()//2) - x1 = (pyb.rng() % (2*img.width())) - (img.width()//2) - y1 = (pyb.rng() % (2*img.height())) - (img.height()//2) - r = (pyb.rng() % 127) + 128 - g = (pyb.rng() % 127) + 128 - b = (pyb.rng() % 127) + 128 - - # If the first argument is a scaler then this method expects - # to see x0, y0, x1, and y1. Otherwise, it expects a (x0,y0,x1,y1) tuple. - img.draw_arrow(x0, y0, x1, y1, color = (r, g, b), size = 30, thickness = 2) - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/circle_drawing.py b/scripts/examples/OpenMV/03-Drawing/circle_drawing.py deleted file mode 100644 index 373d5a40f..000000000 --- a/scripts/examples/OpenMV/03-Drawing/circle_drawing.py +++ /dev/null @@ -1,31 +0,0 @@ -# Circle Drawing -# -# This example shows off drawing circles on the OpenMV Cam. - -import sensor, image, time, pyb - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - img = sensor.snapshot() - - for i in range(10): - x = (pyb.rng() % (2*img.width())) - (img.width()//2) - y = (pyb.rng() % (2*img.height())) - (img.height()//2) - radius = pyb.rng() % (max(img.height(), img.width())//2) - - r = (pyb.rng() % 127) + 128 - g = (pyb.rng() % 127) + 128 - b = (pyb.rng() % 127) + 128 - - # If the first argument is a scaler then this method expects - # to see x, y, and radius. Otherwise, it expects a (x,y,radius) tuple. - img.draw_circle(x, y, radius, color = (r, g, b), thickness = 2, fill = False) - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/copy2fb.py b/scripts/examples/OpenMV/03-Drawing/copy2fb.py deleted file mode 100644 index 3b0761a5a..000000000 --- a/scripts/examples/OpenMV/03-Drawing/copy2fb.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copy image to framebuffer. -# -# This example shows how to load and copy an image to framebuffer for testing. - -import sensor, image, time - -# Still need to init sensor -sensor.reset() -# Set sensor settings -sensor.set_contrast(1) -sensor.set_gainceiling(16) - -# Set sensor pixel format -sensor.set_framesize(sensor.QQVGA) -sensor.set_pixformat(sensor.GRAYSCALE) - -# Load image -img = image.Image("/example.bmp", copy_to_fb=True) - -# Add a small delay to allow the IDE to read the loaded image. -time.sleep_ms(500) diff --git a/scripts/examples/OpenMV/03-Drawing/cross_drawing.py b/scripts/examples/OpenMV/03-Drawing/cross_drawing.py deleted file mode 100644 index 8d7e1e5ff..000000000 --- a/scripts/examples/OpenMV/03-Drawing/cross_drawing.py +++ /dev/null @@ -1,29 +0,0 @@ -# Cross Drawing -# -# This example shows off drawing crosses on the OpenMV Cam. - -import sensor, image, time, pyb - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - img = sensor.snapshot() - - for i in range(10): - x = (pyb.rng() % (2*img.width())) - (img.width()//2) - y = (pyb.rng() % (2*img.height())) - (img.height()//2) - r = (pyb.rng() % 127) + 128 - g = (pyb.rng() % 127) + 128 - b = (pyb.rng() % 127) + 128 - - # If the first argument is a scaler then this method expects - # to see x and y. Otherwise, it expects a (x,y) tuple. - img.draw_cross(x, y, color = (r, g, b), size = 10, thickness = 2) - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/ellipse_drawing.py b/scripts/examples/OpenMV/03-Drawing/ellipse_drawing.py deleted file mode 100644 index 9080a99d2..000000000 --- a/scripts/examples/OpenMV/03-Drawing/ellipse_drawing.py +++ /dev/null @@ -1,35 +0,0 @@ -# Ellipse Drawing -# -# This example shows off drawing ellipses on the OpenMV Cam. - -import sensor, image, time, pyb - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - img = sensor.snapshot() - - for i in range(10): - x = (pyb.rng() % (2*img.width())) - (img.width()//2) - y = (pyb.rng() % (2*img.height())) - (img.height()//2) - radius_x = pyb.rng() % (max(img.height(), img.width())//2) - radius_y = pyb.rng() % (max(img.height(), img.width())//2) - rot = pyb.rng() - - r = (pyb.rng() % 127) + 128 - g = (pyb.rng() % 127) + 128 - b = (pyb.rng() % 127) + 128 - - # If the first argument is a scaler then this method expects - # to see x, y, radius x, and radius y. - # Otherwise, it expects a (x,y,radius_x,radius_y) tuple. - img.draw_ellipse(x, y, radius_x, radius_y, rot, - color = (r, g, b), thickness = 2, fill = False) - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/flood_fill.py b/scripts/examples/OpenMV/03-Drawing/flood_fill.py deleted file mode 100644 index 4857ce3ca..000000000 --- a/scripts/examples/OpenMV/03-Drawing/flood_fill.py +++ /dev/null @@ -1,35 +0,0 @@ -# Flood Fill -# -# This example shows off flood filling areas in the image. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - # seed_threshold controls the maximum allowed difference between - # the initial pixel and any filled pixels. It's important to - # set this such that flood fill doesn't fill the whole image. - - # floating_threshold controls the maximum allowed difference - # between any two pixels. This can easily fill the whole image - # with even a very low threshold. - - # flood_fill will fill pixels that both thresholds. - - # You can invert what gets filled with "invert" and clear - # everything but the filled area with "clear_background". - - x = sensor.width() // 2 - y = sensor.height() // 2 - img = sensor.snapshot().flood_fill(x, y, \ - seed_threshold=0.05, floating_thresholds=0.05, \ - color=(255, 0, 0), invert=False, clear_background=False) - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/image_drawing.py b/scripts/examples/OpenMV/03-Drawing/image_drawing.py deleted file mode 100644 index 5931c394d..000000000 --- a/scripts/examples/OpenMV/03-Drawing/image_drawing.py +++ /dev/null @@ -1,25 +0,0 @@ -# Draw Image Example -# -# This example shows off how to draw images in the frame buffer. - -import sensor, image, time, pyb - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - img = sensor.snapshot() - small_img = img.mean_pooled(4, 4) # Makes a copy. - - x = (img.width()//2)-(small_img.width()//2) - y = (img.height()//2)-(small_img.height()//2) - # Draws an image in the frame buffer.Pass an optional - # mask image to control what pixels are drawn. - img.draw_image(small_img, x, y, x_scale=1, y_scale=1) - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/image_drawing_advanced.py b/scripts/examples/OpenMV/03-Drawing/image_drawing_advanced.py deleted file mode 100644 index 42261d160..000000000 --- a/scripts/examples/OpenMV/03-Drawing/image_drawing_advanced.py +++ /dev/null @@ -1,93 +0,0 @@ -# Draw Image Testing script with bounce -# -# Exercise draw image with many different values for testing - -import sensor, image, time, pyb - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) -clock = time.clock() - -BOUNCE = True -RESCALE = True - -SMALL_IMAGE_SCALE = 3 - -CYCLE_FORMATS = True -CYCLE_MASK = True - -# Used when CYCLE_FORMATS or CYCLE_MASK is true -value_mixer = 0 - -# Location of small image -x = 100 -y = 50 - -# Bounce direction -xd = 1 -yd = 1 - -# Small image scaling -rescale = 1.0 -rd = 0.1 -max_rescale = 5 -min_rescale = rd * 2 - -# Boundary to bounce within -xmin = -sensor.width() / SMALL_IMAGE_SCALE - 8 -ymin = -sensor.height() / SMALL_IMAGE_SCALE - 8 -xmax = sensor.width() + 8 -ymax = sensor.height() + 8 - -while(True): - clock.tick() - - status = "" - value_mixer = value_mixer + 1 - - img = sensor.snapshot() - # Makes a scaled copy of the sensor - small_img = img.mean_pooled(SMALL_IMAGE_SCALE, SMALL_IMAGE_SCALE) - - status = 'rgb565 ' - if CYCLE_FORMATS: - image_format = (value_mixer >> 8) & 3 - # To test combining different formats - if (image_format==1): small_img = small_img.to_bitmap(copy=True); status = 'bitmap ' - if (image_format==2): small_img = small_img.to_grayscale(copy=True); status = 'grayscale ' - if (image_format==3): small_img = small_img.to_rgb565(copy=True); status = 'rgb565 ' - - # update small image location - if BOUNCE: - x = x + xd - if (xxmax): - xd = -xd - - y = y + yd - if (yymax): - yd = -yd - - # Update small image scale - if RESCALE: - rescale = rescale + rd - if (rescalemax_rescale): - rd = -rd - - # Find the center of the image - scaled_width = int(small_img.width() * abs(rescale)) - scaled_height= int(small_img.height() * abs(rescale)) - - apply_mask = CYCLE_MASK and ((value_mixer >> 9) & 1) - if apply_mask: - img.draw_image(small_img, int(x), int(y), mask=small_img.to_bitmap(copy=True), x_scale=rescale, y_scale=rescale, alpha=240, hint=image.IMAGE_HINT_BILINEAR | image.IMAGE_HINT_CENTER) - status += 'alpha:240 ' - status += '+mask ' - else: - img.draw_image(small_img, int(x), int(y), x_scale=rescale, y_scale=rescale, alpha=128, hint=image.IMAGE_HINT_BILINEAR | image.IMAGE_HINT_CENTER) - status += 'alpha:128 ' - - img.draw_string(8, 0, status, mono_space = False) - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_blending_test.py b/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_blending_test.py deleted file mode 100644 index 8cae0107a..000000000 --- a/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_blending_test.py +++ /dev/null @@ -1,71 +0,0 @@ -# Image Drawing Alpha Blending Test -# -# This script tests the performance and quality of the draw_image() -# method which can perform nearest neighbor, bilinear, bicubic, and -# area scaling along with color channel extraction, alpha blending, -# color palette application, and alpha palette application. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QVGA) - -hint = image.BICUBIC # image.BILINEAR image.BICUBIC - -small_img = image.Image(4, 4, sensor.RGB565) -small_img.set_pixel(0, 0, (0, 0, 127)) -small_img.set_pixel(1, 0, (47, 255, 199)) -small_img.set_pixel(2, 0, (0, 188, 255)) -small_img.set_pixel(3, 0, (0, 0, 127)) -small_img.set_pixel(0, 1, (0, 176, 255)) -small_img.set_pixel(1, 1, (222, 0, 0 )) -small_img.set_pixel(2, 1, (50, 255, 195)) -small_img.set_pixel(3, 1, (86, 255, 160)) -small_img.set_pixel(0, 2, (255, 211, 0 )) -small_img.set_pixel(1, 2, (83, 255, 163)) -small_img.set_pixel(2, 2, (255, 211, 0)) -small_img.set_pixel(3, 2, (0, 80, 255)) -small_img.set_pixel(0, 3, (255, 118, 0 )) -small_img.set_pixel(1, 3, (127, 0, 0 )) -small_img.set_pixel(2, 3, (0, 144, 255)) -small_img.set_pixel(3, 3, (50, 255, 195)) -#small_img.to_grayscale() -#small_img.to_bitmap() - -big_img = image.Image(128, 128, sensor.RGB565) -big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint) -#big_img.to_grayscale() -#big_img.to_bitmap() - -alpha_div = 1 -alpha_value = 0 -alpha_step = 2 - -x_bounce = sensor.width()//2 -x_bounce_toggle = 1 - -y_bounce = sensor.height()//2 -y_bounce_toggle = 1 - -clock = time.clock() -while(True): - clock.tick() - - img = sensor.snapshot() - #img.to_grayscale() - #img.to_bitmap() - img.draw_image(big_img, x_bounce, y_bounce, - rgb_channel=-1, alpha=alpha_value//alpha_div, - hint=hint|image.CENTER) - - x_bounce += x_bounce_toggle - if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle - - y_bounce += y_bounce_toggle - if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle - - alpha_value += alpha_step - if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_blending_with_color_table_test.py b/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_blending_with_color_table_test.py deleted file mode 100644 index a3f70c0a6..000000000 --- a/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_blending_with_color_table_test.py +++ /dev/null @@ -1,81 +0,0 @@ -# Image Drawing Color Table with Alpha Blending Test -# -# This script tests the performance and quality of the draw_image() -# method which can perform nearest neighbor, bilinear, bicubic, and -# area scaling along with color channel extraction, alpha blending, -# color palette application, and alpha palette application. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QVGA) - -hint = image.BICUBIC # image.BILINEAR image.BICUBIC - -# RGB channel extraction is done after scaling normally, this -# may produce false colors. Set this flag to do it before. -# -hint |= 0 # image.EXTRACT_RGB_CHANNEL_FIRST - -# Color table application is done after scaling normally, this -# may produce false colors. Set this flag to do it before. -# -hint |= 0 # image.APPLY_COLOR_PALETTE_FIRST - -small_img = image.Image(4, 4, sensor.RGB565) -small_img.set_pixel(0, 0, (0, 0, 127)) -small_img.set_pixel(1, 0, (47, 255, 199)) -small_img.set_pixel(2, 0, (0, 188, 255)) -small_img.set_pixel(3, 0, (0, 0, 127)) -small_img.set_pixel(0, 1, (0, 176, 255)) -small_img.set_pixel(1, 1, (222, 0, 0 )) -small_img.set_pixel(2, 1, (50, 255, 195)) -small_img.set_pixel(3, 1, (86, 255, 160)) -small_img.set_pixel(0, 2, (255, 211, 0 )) -small_img.set_pixel(1, 2, (83, 255, 163)) -small_img.set_pixel(2, 2, (255, 211, 0)) -small_img.set_pixel(3, 2, (0, 80, 255)) -small_img.set_pixel(0, 3, (255, 118, 0 )) -small_img.set_pixel(1, 3, (127, 0, 0 )) -small_img.set_pixel(2, 3, (0, 144, 255)) -small_img.set_pixel(3, 3, (50, 255, 195)) -#small_img.to_grayscale() -#small_img.to_bitmap() - -big_img = image.Image(128, 128, sensor.RGB565) -big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint) -#big_img.to_grayscale() -#big_img.to_bitmap() - -alpha_div = 1 -alpha_value = 0 -alpha_step = 2 - -x_bounce = sensor.width()//2 -x_bounce_toggle = 1 - -y_bounce = sensor.height()//2 -y_bounce_toggle = 1 - -clock = time.clock() -while(True): - clock.tick() - - img = sensor.snapshot() - #img.to_grayscale() - #img.to_bitmap() - img.draw_image(big_img, x_bounce, y_bounce, - rgb_channel=-1, alpha=alpha_value//alpha_div, - color_palette=sensor.PALETTE_IRONBOW, hint=hint|image.CENTER) - - x_bounce += x_bounce_toggle - if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle - - y_bounce += y_bounce_toggle - if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle - - alpha_value += alpha_step - if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_table_test.py b/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_table_test.py deleted file mode 100644 index 7ca783fe9..000000000 --- a/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_table_test.py +++ /dev/null @@ -1,75 +0,0 @@ -# Image Drawing Alpha Table Test -# -# This script tests the performance and quality of the draw_image() -# method which can perform nearest neighbor, bilinear, bicubic, and -# area scaling along with color channel extraction, alpha blending, -# color palette application, and alpha palette application. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QVGA) - -hint = image.BICUBIC # image.BILINEAR image.BICUBIC - -small_img = image.Image(4, 4, sensor.RGB565) -small_img.set_pixel(0, 0, (0, 0, 127)) -small_img.set_pixel(1, 0, (47, 255, 199)) -small_img.set_pixel(2, 0, (0, 188, 255)) -small_img.set_pixel(3, 0, (0, 0, 127)) -small_img.set_pixel(0, 1, (0, 176, 255)) -small_img.set_pixel(1, 1, (222, 0, 0 )) -small_img.set_pixel(2, 1, (50, 255, 195)) -small_img.set_pixel(3, 1, (86, 255, 160)) -small_img.set_pixel(0, 2, (255, 211, 0 )) -small_img.set_pixel(1, 2, (83, 255, 163)) -small_img.set_pixel(2, 2, (255, 211, 0)) -small_img.set_pixel(3, 2, (0, 80, 255)) -small_img.set_pixel(0, 3, (255, 118, 0 )) -small_img.set_pixel(1, 3, (127, 0, 0 )) -small_img.set_pixel(2, 3, (0, 144, 255)) -small_img.set_pixel(3, 3, (50, 255, 195)) -#small_img.to_grayscale() -#small_img.to_bitmap() - -big_img = image.Image(128, 128, sensor.RGB565) -big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint) -#big_img.to_grayscale() -#big_img.to_bitmap() - -alpha_lut = image.Image(256, 1, sensor.GRAYSCALE) -for i in range(256): - alpha_lut.set_pixel(i, 0, 255 if i > 127 else 0) - -alpha_div = 1 -alpha_value = 0 -alpha_step = 2 - -x_bounce = sensor.width()//2 -x_bounce_toggle = 1 - -y_bounce = sensor.height()//2 -y_bounce_toggle = 1 - -clock = time.clock() -while(True): - clock.tick() - - img = sensor.snapshot() - #img.to_grayscale() - #img.to_bitmap() - img.draw_image(big_img, x_bounce, y_bounce, - rgb_channel=-1, alpha=alpha_value//alpha_div, - alpha_palette=alpha_lut, hint=hint|image.CENTER) - - x_bounce += x_bounce_toggle - if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle - - y_bounce += y_bounce_toggle - if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle - - alpha_value += alpha_step - if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_table_with_color_table_test.py b/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_table_with_color_table_test.py deleted file mode 100644 index 014a2c1ce..000000000 --- a/scripts/examples/OpenMV/03-Drawing/image_drawing_alpha_table_with_color_table_test.py +++ /dev/null @@ -1,85 +0,0 @@ -# Image Drawing Color Table with Alpha Table Test -# -# This script tests the performance and quality of the draw_image() -# method which can perform nearest neighbor, bilinear, bicubic, and -# area scaling along with color channel extraction, alpha blending, -# color palette application, and alpha palette application. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QVGA) - -hint = image.BICUBIC # image.BILINEAR image.BICUBIC - -# RGB channel extraction is done after scaling normally, this -# may produce false colors. Set this flag to do it before. -# -hint |= 0 # image.EXTRACT_RGB_CHANNEL_FIRST - -# Color table application is done after scaling normally, this -# may produce false colors. Set this flag to do it before. -# -hint |= 0 # image.APPLY_COLOR_PALETTE_FIRST - -small_img = image.Image(4, 4, sensor.RGB565) -small_img.set_pixel(0, 0, (0, 0, 127)) -small_img.set_pixel(1, 0, (47, 255, 199)) -small_img.set_pixel(2, 0, (0, 188, 255)) -small_img.set_pixel(3, 0, (0, 0, 127)) -small_img.set_pixel(0, 1, (0, 176, 255)) -small_img.set_pixel(1, 1, (222, 0, 0 )) -small_img.set_pixel(2, 1, (50, 255, 195)) -small_img.set_pixel(3, 1, (86, 255, 160)) -small_img.set_pixel(0, 2, (255, 211, 0 )) -small_img.set_pixel(1, 2, (83, 255, 163)) -small_img.set_pixel(2, 2, (255, 211, 0)) -small_img.set_pixel(3, 2, (0, 80, 255)) -small_img.set_pixel(0, 3, (255, 118, 0 )) -small_img.set_pixel(1, 3, (127, 0, 0 )) -small_img.set_pixel(2, 3, (0, 144, 255)) -small_img.set_pixel(3, 3, (50, 255, 195)) -#small_img.to_grayscale() -#small_img.to_bitmap() - -big_img = image.Image(128, 128, sensor.RGB565) -big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint) -#big_img.to_grayscale() -#big_img.to_bitmap() - -alpha_lut = image.Image(256, 1, sensor.GRAYSCALE) -for i in range(256): - alpha_lut.set_pixel(i, 0, 255 if i > 127 else 0) - -alpha_div = 1 -alpha_value = 0 -alpha_step = 2 - -x_bounce = sensor.width()//2 -x_bounce_toggle = 1 - -y_bounce = sensor.height()//2 -y_bounce_toggle = 1 - -clock = time.clock() -while(True): - clock.tick() - - img = sensor.snapshot() - #img.to_grayscale() - #img.to_bitmap() - img.draw_image(big_img, x_bounce, y_bounce, - rgb_channel=-1, alpha=alpha_value//alpha_div, - color_palette=sensor.PALETTE_IRONBOW, alpha_palette=alpha_lut, hint=hint|image.CENTER) - - x_bounce += x_bounce_toggle - if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle - - y_bounce += y_bounce_toggle - if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle - - alpha_value += alpha_step - if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/image_drawing_scale_down_test.py b/scripts/examples/OpenMV/03-Drawing/image_drawing_scale_down_test.py deleted file mode 100644 index 7b4f9a67c..000000000 --- a/scripts/examples/OpenMV/03-Drawing/image_drawing_scale_down_test.py +++ /dev/null @@ -1,69 +0,0 @@ -# Image Scaling Down Drawing Test -# -# This script tests the performance and quality of the draw_image() -# method which can perform nearest neighbor, bilinear, bicubic, and -# area scaling along with color channel extraction, alpha blending, -# color palette application, and alpha palette application. - -# DISABLE THE FRAME BUFFER TO SEE THE REAL FPS - -import sensor, image, time - -up_hint = 0 # image.BILINEAR image.BICUBIC -down_hint = image.AREA # image.BILINEAR image.BICUBIC image.AREA - -bounce_div = 128 - -medium_img = image.Image(32, 32, sensor.RGB565, copy_to_fb=True) -#medium_img.to_grayscale() -#medium_img.to_bitmap() - -small_img = image.Image(4, 4, sensor.RGB565) -small_img.set_pixel(0, 0, (0, 0, 127)) -small_img.set_pixel(1, 0, (47, 255, 199)) -small_img.set_pixel(2, 0, (0, 188, 255)) -small_img.set_pixel(3, 0, (0, 0, 127)) -small_img.set_pixel(0, 1, (0, 176, 255)) -small_img.set_pixel(1, 1, (222, 0, 0 )) -small_img.set_pixel(2, 1, (50, 255, 195)) -small_img.set_pixel(3, 1, (86, 255, 160)) -small_img.set_pixel(0, 2, (255, 211, 0 )) -small_img.set_pixel(1, 2, (83, 255, 163)) -small_img.set_pixel(2, 2, (255, 211, 0)) -small_img.set_pixel(3, 2, (0, 80, 255)) -small_img.set_pixel(0, 3, (255, 118, 0 )) -small_img.set_pixel(1, 3, (127, 0, 0 )) -small_img.set_pixel(2, 3, (0, 144, 255)) -small_img.set_pixel(3, 3, (50, 255, 195)) -#small_img.to_grayscale() -#small_img.to_bitmap() - -big_img = image.Image(128, 128, sensor.RGB565) -big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=up_hint) -#big_img.to_grayscale() -#big_img.to_bitmap() - -x_bounce = 0 -x_bounce_toggle = 0 - -y_bounce = 0 -y_bounce_toggle = 0 - -clock = time.clock() -while(True): - clock.tick() - - medium_img.clear() - medium_img.draw_image(big_img, - x_bounce // bounce_div, y_bounce // bounce_div, - x_scale=0.25, y_scale=0.25, - hint=down_hint) - sensor.flush() - - x_bounce += x_bounce_toggle - if abs(x_bounce // bounce_div) >= (medium_img.width()*1.1): x_bounce_toggle = -x_bounce_toggle - - y_bounce += y_bounce_toggle - if abs(y_bounce // bounce_div) >= (medium_img.height()*1.1): y_bounce_toggle = -y_bounce_toggle - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/image_drawing_scale_up_test.py b/scripts/examples/OpenMV/03-Drawing/image_drawing_scale_up_test.py deleted file mode 100644 index 92bcf91ee..000000000 --- a/scripts/examples/OpenMV/03-Drawing/image_drawing_scale_up_test.py +++ /dev/null @@ -1,63 +0,0 @@ -# Image Scaling Up Drawing Test -# -# This script tests the performance and quality of the draw_image() -# method which can perform nearest neighbor, bilinear, bicubic, and -# area scaling along with color channel extraction, alpha blending, -# color palette application, and alpha palette application. - -# DISABLE THE FRAME BUFFER TO SEE THE REAL FPS - -import sensor, image, time - -hint = 0 # image.BILINEAR image.BICUBIC - -bounce_div = 32 - -big_img = image.Image(128, 128, sensor.RGB565, copy_to_fb=True) -#big_img.to_grayscale() -#big_img.to_bitmap() - -small_img = image.Image(4, 4, sensor.RGB565) -small_img.set_pixel(0, 0, (0, 0, 127)) -small_img.set_pixel(1, 0, (47, 255, 199)) -small_img.set_pixel(2, 0, (0, 188, 255)) -small_img.set_pixel(3, 0, (0, 0, 127)) -small_img.set_pixel(0, 1, (0, 176, 255)) -small_img.set_pixel(1, 1, (222, 0, 0 )) -small_img.set_pixel(2, 1, (50, 255, 195)) -small_img.set_pixel(3, 1, (86, 255, 160)) -small_img.set_pixel(0, 2, (255, 211, 0 )) -small_img.set_pixel(1, 2, (83, 255, 163)) -small_img.set_pixel(2, 2, (255, 211, 0)) -small_img.set_pixel(3, 2, (0, 80, 255)) -small_img.set_pixel(0, 3, (255, 118, 0 )) -small_img.set_pixel(1, 3, (127, 0, 0 )) -small_img.set_pixel(2, 3, (0, 144, 255)) -small_img.set_pixel(3, 3, (50, 255, 195)) -#small_img.to_grayscale() -#small_img.to_bitmap() - -x_bounce = 0 -x_bounce_toggle = 0 - -y_bounce = 0 -y_bounce_toggle = 0 - -clock = time.clock() -while(True): - clock.tick() - - big_img.clear() - big_img.draw_image(small_img, - x_bounce // bounce_div, y_bounce // bounce_div, - x_scale=32, y_scale=32, - hint=hint) - sensor.flush() - - x_bounce += x_bounce_toggle - if abs(x_bounce // bounce_div) >= (big_img.width()*1.1): x_bounce_toggle = -x_bounce_toggle - - y_bounce += y_bounce_toggle - if abs(y_bounce // bounce_div) >= (big_img.height()*1.1): y_bounce_toggle = -y_bounce_toggle - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/image_drawing_with_custom_palette.py b/scripts/examples/OpenMV/03-Drawing/image_drawing_with_custom_palette.py deleted file mode 100644 index d379b3342..000000000 --- a/scripts/examples/OpenMV/03-Drawing/image_drawing_with_custom_palette.py +++ /dev/null @@ -1,43 +0,0 @@ -# Draw Image Example with custom color palette -# -# This example shows off how to draw images in the frame buffer with a custom generated color palette. - -import sensor, image, time, pyb - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) # or GRAYSCALE... -sensor.set_framesize(sensor.QQVGA) # or QQVGA... -sensor.skip_frames(time = 2000) -clock = time.clock() - -# the color palette is actually an image, this allows you to use image ops to create palettes -# the image must have 256 entries i.e. 256x1, 64x4, 16x16 and have the format rgb565 - -# Initialise palette source colors into an image -palette_source_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 0, 255)] -palette_source_color_image = image.Image(len(palette_source_colors), 1, sensor.RGB565) -for i, color in enumerate(palette_source_colors): - palette_source_color_image[i] = color - -# Scale the image to palette width and smooth them -palette = image.Image(256,1, sensor.RGB565) -palette.draw_image(palette_source_color_image, 0, 0, x_scale=palette.width() / palette_source_color_image.width()) -palette.mean(int(palette.width() / palette_source_color_image.width()/2)) - -while(True): - clock.tick() - - img = sensor.snapshot() - # Get a copy of grayscale image before converting to color - img_copy = img.copy() - - img.to_rgb565() - - palette_boundary_inset = int(sensor.width() / 40) - palette_scale_x = (sensor.width() - palette_boundary_inset * 2) / palette.width() - - img.draw_image(img_copy, 0, 0, color_palette=palette) - img.draw_image(palette, palette_boundary_inset, palette_boundary_inset, x_scale=palette_scale_x, y_scale=8) - img.draw_rectangle(palette_boundary_inset, palette_boundary_inset, int(palette.width()*palette_scale_x), 8, color=(255,255,255), thickness=1) - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/keypoints_drawing.py b/scripts/examples/OpenMV/03-Drawing/keypoints_drawing.py deleted file mode 100644 index c41a8161d..000000000 --- a/scripts/examples/OpenMV/03-Drawing/keypoints_drawing.py +++ /dev/null @@ -1,31 +0,0 @@ -# Keypoints Drawing -# -# This example shows off drawing keypoints on the OpenMV Cam. Usually you call draw_keypoints() -# on a keypoints object but you can also call it on a list of 3-value tuples... - -import sensor, image, time, pyb - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - img = sensor.snapshot() - - for i in range(20): - x = (pyb.rng() % (2*img.width())) - (img.width()//2) - y = (pyb.rng() % (2*img.height())) - (img.height()//2) - rot = pyb.rng() % 360 - - r = (pyb.rng() % 127) + 128 - g = (pyb.rng() % 127) + 128 - b = (pyb.rng() % 127) + 128 - - # This method draws a keypoints object or a list of (x, y, rot) tuples... - img.draw_keypoints([(x, y, rot)], color = (r, g, b), size = 20, thickness = 2, fill = False) - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/line_drawing.py b/scripts/examples/OpenMV/03-Drawing/line_drawing.py deleted file mode 100644 index eb2d761bd..000000000 --- a/scripts/examples/OpenMV/03-Drawing/line_drawing.py +++ /dev/null @@ -1,31 +0,0 @@ -# Line Drawing -# -# This example shows off drawing lines on the OpenMV Cam. - -import sensor, image, time, pyb - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - img = sensor.snapshot() - - for i in range(10): - x0 = (pyb.rng() % (2*img.width())) - (img.width()//2) - y0 = (pyb.rng() % (2*img.height())) - (img.height()//2) - x1 = (pyb.rng() % (2*img.width())) - (img.width()//2) - y1 = (pyb.rng() % (2*img.height())) - (img.height()//2) - r = (pyb.rng() % 127) + 128 - g = (pyb.rng() % 127) + 128 - b = (pyb.rng() % 127) + 128 - - # If the first argument is a scaler then this method expects - # to see x0, y0, x1, and y1. Otherwise, it expects a (x0,y0,x1,y1) tuple. - img.draw_line(x0, y0, x1, y1, color = (r, g, b), thickness = 2) - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/rectangle_drawing.py b/scripts/examples/OpenMV/03-Drawing/rectangle_drawing.py deleted file mode 100644 index ab2afac66..000000000 --- a/scripts/examples/OpenMV/03-Drawing/rectangle_drawing.py +++ /dev/null @@ -1,31 +0,0 @@ -# Rectangle Drawing -# -# This example shows off drawing rectangles on the OpenMV Cam. - -import sensor, image, time, pyb - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - img = sensor.snapshot() - - for i in range(10): - x = (pyb.rng() % (2*img.width())) - (img.width()//2) - y = (pyb.rng() % (2*img.height())) - (img.height()//2) - w = (pyb.rng() % (img.width()//2)) - h = (pyb.rng() % (img.height()//2)) - r = (pyb.rng() % 127) + 128 - g = (pyb.rng() % 127) + 128 - b = (pyb.rng() % 127) + 128 - - # If the first argument is a scaler then this method expects - # to see x, y, w, and h. Otherwise, it expects a (x,y,w,h) tuple. - img.draw_rectangle(x, y, w, h, color = (r, g, b), thickness = 2, fill = False) - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/03-Drawing/text_drawing.py b/scripts/examples/OpenMV/03-Drawing/text_drawing.py deleted file mode 100644 index da37af656..000000000 --- a/scripts/examples/OpenMV/03-Drawing/text_drawing.py +++ /dev/null @@ -1,33 +0,0 @@ -# Text Drawing -# -# This example shows off drawing text on the OpenMV Cam. - -import sensor, image, time, pyb - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - img = sensor.snapshot() - - for i in range(10): - x = (pyb.rng() % (2*img.width())) - (img.width()//2) - y = (pyb.rng() % (2*img.height())) - (img.height()//2) - r = (pyb.rng() % 127) + 128 - g = (pyb.rng() % 127) + 128 - b = (pyb.rng() % 127) + 128 - - # If the first argument is a scaler then this method expects - # to see x, y, and text. Otherwise, it expects a (x,y,text) tuple. - - # Character and string rotation can be done at 0, 90, 180, 270, and etc. degrees. - img.draw_string(x, y, "Hello World!", color = (r, g, b), scale = 2, mono_space = False, - char_rotation = 0, char_hmirror = False, char_vflip = False, - string_rotation = 0, string_hmirror = False, string_vflip = False) - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/04-Image-Filters/adaptive_histogram_equalization.py b/scripts/examples/OpenMV/04-Image-Filters/adaptive_histogram_equalization.py deleted file mode 100644 index a958b531e..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/adaptive_histogram_equalization.py +++ /dev/null @@ -1,29 +0,0 @@ -# Adaptive Histogram Equalization -# -# This example shows off how to use adaptive histogram equalization to improve -# the contrast in the image. Adaptive histogram equalization splits the image -# into regions and then equalizes the histogram in those regions to improve -# the image contrast versus a global histogram equalization. Additionally, -# you may specify a clip limit to prevent the contrast from going wild. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - # A clip_limit of < 0 gives you normal adaptive histogram equalization - # which may result in huge amounts of contrast noise... - - # A clip_limit of 1 does nothing. For best results go slightly higher - # than 1 like below. The higher you go the closer you get back to - # standard adaptive histogram equalization with huge contrast swings. - - img = sensor.snapshot().histeq(adaptive=True, clip_limit=3) - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/04-Image-Filters/blur_filter.py b/scripts/examples/OpenMV/04-Image-Filters/blur_filter.py deleted file mode 100644 index 6074d2f2b..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/blur_filter.py +++ /dev/null @@ -1,21 +0,0 @@ -# Blur Filter Example -# -# This example shows off using the guassian filter to blur images. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # Run the kernel on every pixel of the image. - img.gaussian(1) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/cartoon_filter.py b/scripts/examples/OpenMV/04-Image-Filters/cartoon_filter.py deleted file mode 100644 index c1703ca47..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/cartoon_filter.py +++ /dev/null @@ -1,29 +0,0 @@ -# Cartoon Filter -# -# This example shows off a simple cartoon filter on images. The cartoon -# filter works by joining similar pixel areas of an image and replacing -# the pixels in those areas with the area mean. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - # seed_threshold controls the maximum area growth of a colored - # region. Making this larger will merge more pixels. - - # floating_threshold controls the maximum pixel-to-pixel difference - # when growing a region. Settings this very high will quickly combine - # all pixels in the image. You should keep this small. - - # cartoon() will grow regions while both thresholds are statisfied... - - img = sensor.snapshot().cartoon(seed_threshold=0.05, floating_thresholds=0.05) - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/04-Image-Filters/color_bilateral_filter.py b/scripts/examples/OpenMV/04-Image-Filters/color_bilateral_filter.py deleted file mode 100644 index 1bdbbb7eb..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/color_bilateral_filter.py +++ /dev/null @@ -1,33 +0,0 @@ -# Color Bilteral Filter Example -# -# This example shows off using the bilateral filter on color images. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # color_sigma controls how close color wise pixels have to be to each other to be - # blured togheter. A smaller value means they have to be closer. - # A larger value is less strict. - - # space_sigma controls how close space wise pixels have to be to each other to be - # blured togheter. A smaller value means they have to be closer. - # A larger value is less strict. - - # Run the kernel on every pixel of the image. - img.bilateral(3, color_sigma=0.1, space_sigma=1) - - # Note that the bilateral filter can introduce image defects if you set - # color_sigma/space_sigma to aggresively. Increase the sigma values until - # the defects go away if you see them. - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/color_binary_filter.py b/scripts/examples/OpenMV/04-Image-Filters/color_binary_filter.py deleted file mode 100644 index b8686f7bc..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/color_binary_filter.py +++ /dev/null @@ -1,61 +0,0 @@ -# Color Binary Filter Example -# -# This script shows off the binary image filter. You may pass binary any -# number of thresholds to segment the image by. - -import sensor, image, time - -sensor.reset() -sensor.set_framesize(sensor.QVGA) -sensor.set_pixformat(sensor.RGB565) -sensor.skip_frames(time = 2000) -clock = time.clock() - -# Use the Tools -> Machine Vision -> Threshold Edtor to pick better thresholds. -red_threshold = (0,100, 0,127, 0,127) # L A B -green_threshold = (0,100, -128,0, 0,127) # L A B -blue_threshold = (0,100, -128,127, -128,0) # L A B - -while(True): - - # Test red threshold - for i in range(100): - clock.tick() - img = sensor.snapshot() - img.binary([red_threshold]) - print(clock.fps()) - - # Test green threshold - for i in range(100): - clock.tick() - img = sensor.snapshot() - img.binary([green_threshold]) - print(clock.fps()) - - # Test blue threshold - for i in range(100): - clock.tick() - img = sensor.snapshot() - img.binary([blue_threshold]) - print(clock.fps()) - - # Test not red threshold - for i in range(100): - clock.tick() - img = sensor.snapshot() - img.binary([red_threshold], invert = 1) - print(clock.fps()) - - # Test not green threshold - for i in range(100): - clock.tick() - img = sensor.snapshot() - img.binary([green_threshold], invert = 1) - print(clock.fps()) - - # Test not blue threshold - for i in range(100): - clock.tick() - img = sensor.snapshot() - img.binary([blue_threshold], invert = 1) - print(clock.fps()) diff --git a/scripts/examples/OpenMV/04-Image-Filters/color_light_removal.py b/scripts/examples/OpenMV/04-Image-Filters/color_light_removal.py deleted file mode 100644 index 270104cfb..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/color_light_removal.py +++ /dev/null @@ -1,25 +0,0 @@ -# Color Light Removal -# -# This example shows off how to remove bright lights from the image. -# You can do this using the binary() method with the "zero=" argument. -# -# Removing bright lights from the image allows you to now use -# histeq() on the image without outliers from oversaturated -# parts of the image breaking the algorithm... - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -thresholds = (90, 100, -128, 127, -128, 127) - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot().binary([thresholds], invert=False, zero=True) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/edge_filter.py b/scripts/examples/OpenMV/04-Image-Filters/edge_filter.py deleted file mode 100644 index cdc03ddf3..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/edge_filter.py +++ /dev/null @@ -1,21 +0,0 @@ -# Edge Filter Example -# -# This example shows off using the laplacian filter to detect edges. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # Run the kernel on every pixel of the image. - img.laplacian(1) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/erode_and_dilate.py b/scripts/examples/OpenMV/04-Image-Filters/erode_and_dilate.py deleted file mode 100644 index 06a6fde68..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/erode_and_dilate.py +++ /dev/null @@ -1,35 +0,0 @@ -# Erode and Dilate Example -# -# This example shows off the erode and dilate functions which you can run on -# a binary image to remove noise. This example was originally a test but its -# useful for showing off how these functions work. - -import pyb, sensor, image - -sensor.reset() -sensor.set_framesize(sensor.QVGA) - -grayscale_thres = (170, 255) -rgb565_thres = (70, 100, -128, 127, -128, 127) - -while(True): - - sensor.set_pixformat(sensor.GRAYSCALE) - for i in range(20): - img = sensor.snapshot() - img.binary([grayscale_thres]) - img.erode(2) - for i in range(20): - img = sensor.snapshot() - img.binary([grayscale_thres]) - img.dilate(2) - - sensor.set_pixformat(sensor.RGB565) - for i in range(20): - img = sensor.snapshot() - img.binary([rgb565_thres]) - img.erode(2) - for i in range(20): - img = sensor.snapshot() - img.binary([rgb565_thres]) - img.dilate(2) diff --git a/scripts/examples/OpenMV/04-Image-Filters/gamma_correction.py b/scripts/examples/OpenMV/04-Image-Filters/gamma_correction.py deleted file mode 100644 index 2dd1137c5..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/gamma_correction.py +++ /dev/null @@ -1,21 +0,0 @@ -# Gamma Correction -# -# This example shows off gamma correction to make the image brighter. The gamma -# correction method can also fix contrast and brightness too. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - # Gamma, contrast, and brightness correction are applied to each color channel. The - # values are scaled to the range per color channel per image type... - img = sensor.snapshot().gamma_corr(gamma = 0.5, contrast = 1.0, brightness = 0.0) - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/04-Image-Filters/grayscale_bilateral_filter.py b/scripts/examples/OpenMV/04-Image-Filters/grayscale_bilateral_filter.py deleted file mode 100644 index 6b3a67b21..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/grayscale_bilateral_filter.py +++ /dev/null @@ -1,33 +0,0 @@ -# Grayscale Bilteral Filter Example -# -# This example shows off using the bilateral filter on grayscale images. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # color_sigma controls how close color wise pixels have to be to each other to be - # blured togheter. A smaller value means they have to be closer. - # A larger value is less strict. - - # space_sigma controls how close space wise pixels have to be to each other to be - # blured togheter. A smaller value means they have to be closer. - # A larger value is less strict. - - # Run the kernel on every pixel of the image. - img.bilateral(3, color_sigma=0.1, space_sigma=1) - - # Note that the bilateral filter can introduce image defects if you set - # color_sigma/space_sigma to aggresively. Increase the sigma values until - # the defects go away if you see them. - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/grayscale_binary_filter.py b/scripts/examples/OpenMV/04-Image-Filters/grayscale_binary_filter.py deleted file mode 100644 index dfaed5012..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/grayscale_binary_filter.py +++ /dev/null @@ -1,45 +0,0 @@ -# Grayscale Binary Filter Example -# -# This script shows off the binary image filter. You may pass binary any -# number of thresholds to segment the image by. - -import sensor, image, time - -sensor.reset() -sensor.set_framesize(sensor.QVGA) -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.skip_frames(time = 2000) -clock = time.clock() - -low_threshold = (0, 50) -high_threshold = (205, 255) - -while(True): - - # Test low threshold - for i in range(100): - clock.tick() - img = sensor.snapshot() - img.binary([low_threshold]) - print(clock.fps()) - - # Test high threshold - for i in range(100): - clock.tick() - img = sensor.snapshot() - img.binary([high_threshold]) - print(clock.fps()) - - # Test not low threshold - for i in range(100): - clock.tick() - img = sensor.snapshot() - img.binary([low_threshold], invert = 1) - print(clock.fps()) - - # Test not high threshold - for i in range(100): - clock.tick() - img = sensor.snapshot() - img.binary([high_threshold], invert = 1) - print(clock.fps()) diff --git a/scripts/examples/OpenMV/04-Image-Filters/grayscale_light_removal.py b/scripts/examples/OpenMV/04-Image-Filters/grayscale_light_removal.py deleted file mode 100644 index d42b8a8e9..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/grayscale_light_removal.py +++ /dev/null @@ -1,25 +0,0 @@ -# Grayscale Light Removal -# -# This example shows off how to remove bright lights from the image. -# You can do this using the binary() method with the "zero=" argument. -# -# Removing bright lights from the image allows you to now use -# histeq() on the image without outliers from oversaturated -# parts of the image breaking the algorithm... - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -thresholds = (220, 255) - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot().binary([thresholds], invert=False, zero=True) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/histogram_equalization.py b/scripts/examples/OpenMV/04-Image-Filters/histogram_equalization.py deleted file mode 100644 index 2a3aece50..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/histogram_equalization.py +++ /dev/null @@ -1,19 +0,0 @@ -# Histogram Equalization -# -# This example shows off how to use histogram equalization to improve -# the contrast in the image. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - img = sensor.snapshot().histeq() - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/04-Image-Filters/kernel_filters.py b/scripts/examples/OpenMV/04-Image-Filters/kernel_filters.py deleted file mode 100644 index 9b9dd565f..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/kernel_filters.py +++ /dev/null @@ -1,27 +0,0 @@ -# Kernel Filtering Example -# -# This example shows off how to use a generic kernel filter. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -kernel_size = 1 # 3x3==1, 5x5==2, 7x7==3, etc. - -kernel = [-2, -1, 0, \ - -1, 1, 1, \ - 0, 1, 2] - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # Run the kernel on every pixel of the image. - img.morph(kernel_size, kernel) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/lens_correction.py b/scripts/examples/OpenMV/04-Image-Filters/lens_correction.py deleted file mode 100644 index 56066f583..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/lens_correction.py +++ /dev/null @@ -1,21 +0,0 @@ -# Lens Correction -# -# This example shows off how to use the lens correction method to fix lens -# distortion in an image. You need to do this for qrcode / barcode / data matrix -# detection. Increase the strength below until lines are straight in the view. -# Zoom in (higher) or out (lower) until you see enough of the image. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - - img = sensor.snapshot().lens_corr(strength = 1.8, zoom = 1.0) - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/04-Image-Filters/linear_polar.py b/scripts/examples/OpenMV/04-Image-Filters/linear_polar.py deleted file mode 100644 index 98106542f..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/linear_polar.py +++ /dev/null @@ -1,21 +0,0 @@ -# Linear Polar Mapping Example -# -# This example shows off re-projecting the image using a linear polar -# transformation. Linear polar images are useful in that rotations -# become translations in the X direction and linear changes -# in scale become linear translations in the Y direction. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot().linpolar(reverse=False) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/log_polar.py b/scripts/examples/OpenMV/04-Image-Filters/log_polar.py deleted file mode 100644 index d79f374b9..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/log_polar.py +++ /dev/null @@ -1,21 +0,0 @@ -# Log Polar Mapping Example -# -# This example shows off re-projecting the image using a log polar -# transformation. Log polar images are useful in that rotations -# become translations in the X direction and exponential changes -# in scale (x2, x4, etc.) become linear translations in the Y direction. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot().logpolar(reverse=False) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/mean_adaptive_threshold_filter.py b/scripts/examples/OpenMV/04-Image-Filters/mean_adaptive_threshold_filter.py deleted file mode 100644 index 2d140ecc4..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/mean_adaptive_threshold_filter.py +++ /dev/null @@ -1,25 +0,0 @@ -# Mean Adaptive Threshold Filter Example -# -# This example shows off mean filtering with adaptive thresholding. -# When mean(threshold=True) the mean() method adaptive thresholds the image -# by comparing the mean of the pixels around a pixel, minus an offset, with that pixel. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # The first argument is the kernel size. N coresponds to a ((N*2)+1)^2 - # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You - # shouldn't ever need to use a value bigger than 2. - img.mean(1, threshold=True, offset=5, invert=True) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/mean_filter.py b/scripts/examples/OpenMV/04-Image-Filters/mean_filter.py deleted file mode 100644 index c6de0c81b..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/mean_filter.py +++ /dev/null @@ -1,25 +0,0 @@ -# Mean Filter Example -# -# This example shows off mean filtering. Mean filtering is your standard average -# filter in a NxN neighborhood. Mean filtering removes noise in the image by -# bluring everything. But, it's the fastest kernel filter operation. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # The only argument is the kernel size. N coresponds to a ((N*2)+1)^2 - # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You - # shouldn't ever need to use a value bigger than 2. - img.mean(1) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/median_adaptive_threshold_filter.py b/scripts/examples/OpenMV/04-Image-Filters/median_adaptive_threshold_filter.py deleted file mode 100644 index 673b28482..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/median_adaptive_threshold_filter.py +++ /dev/null @@ -1,27 +0,0 @@ -# Median Adaptive Threshold Filter Example -# -# This example shows off median filtering with adaptive thresholding. -# When median(threshold=True) the median() method adaptive thresholds the image -# by comparing the median of the pixels around a pixel, minus an offset, with that pixel. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # The first argument to the median filter is the kernel size, it can be - # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. The second - # argument "percentile" is the percentile number to choose from the NxN - # neighborhood. 0.5 is the median, 0.25 is the lower quartile, and 0.75 - # would be the upper quartile. - img.median(1, percentile=0.5, threshold=True, offset=5, invert=True) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/median_filter.py b/scripts/examples/OpenMV/04-Image-Filters/median_filter.py deleted file mode 100644 index 441464a36..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/median_filter.py +++ /dev/null @@ -1,27 +0,0 @@ -# Median Filter Example -# -# This example shows off median filtering. Median filtering replaces every pixel -# with the median value of it's NxN neighborhood. Median filtering is good for -# removing noise in the image while preserving edges. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # The first argument to the median filter is the kernel size, it can be - # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. The second - # argument "percentile" is the percentile number to choose from the NxN - # neighborhood. 0.5 is the median, 0.25 is the lower quartile, and 0.75 - # would be the upper quartile. - img.median(1, percentile=0.5) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/midpoint_adaptive_threshold_filter.py b/scripts/examples/OpenMV/04-Image-Filters/midpoint_adaptive_threshold_filter.py deleted file mode 100644 index adaeaaa5d..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/midpoint_adaptive_threshold_filter.py +++ /dev/null @@ -1,28 +0,0 @@ -# Midpoint Adaptive Threshold Filter Example -# -# This example shows off midpoint filtering with adaptive thresholding. -# When midpoint(threshold=True) the midpoint() method adaptive thresholds the image -# by comparing the midpoint of the pixels around a pixel, minus an offset, with that pixel. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # The first argument is the kernel size. N coresponds to a ((N*2)+1)^2 - # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You - # shouldn't ever need to use a value bigger than 2. The "bias" argument - # lets you select between min and max blending. 0.5 == midpoint filter, - # 0.0 == min filter, and 1.0 == max filter. Note that the min filter - # makes images darker while the max filter makes images lighter. - img.midpoint(1, bias=0.5, threshold=True, offset=5, invert=True) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/midpoint_filter.py b/scripts/examples/OpenMV/04-Image-Filters/midpoint_filter.py deleted file mode 100644 index ee9ab5d94..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/midpoint_filter.py +++ /dev/null @@ -1,27 +0,0 @@ -# Midpoint Filter Example -# -# This example shows off midpoint filtering. Midpoint filtering replaces each -# pixel by the average of the min and max pixel values for a NxN neighborhood. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # The first argument is the kernel size. N coresponds to a ((N*2)+1)^2 - # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You - # shouldn't ever need to use a value bigger than 2. The "bias" argument - # lets you select between min and max blending. 0.5 == midpoint filter, - # 0.0 == min filter, and 1.0 == max filter. Note that the min filter - # makes images darker while the max filter makes images lighter. - img.midpoint(1, bias=0.5) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/mode_adaptive_threshold_filter.py b/scripts/examples/OpenMV/04-Image-Filters/mode_adaptive_threshold_filter.py deleted file mode 100644 index 8ab9a0675..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/mode_adaptive_threshold_filter.py +++ /dev/null @@ -1,25 +0,0 @@ -# Mode Adaptive Threshold Filter Example -# -# This example shows off mode filtering with adaptive thresholding. -# When mode(threshold=True) the mode() method adaptive thresholds the image -# by comparing the mode of the pixels around a pixel, minus an offset, with that pixel. -# Avoid using the mode filter on RGB565 images. It will cause artifacts on image edges... - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # The only argument to the median filter is the kernel size, it can be - # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. - img.mode(1, threshold=True, offset=5, invert=True) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/mode_filter.py b/scripts/examples/OpenMV/04-Image-Filters/mode_filter.py deleted file mode 100644 index 170937c58..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/mode_filter.py +++ /dev/null @@ -1,25 +0,0 @@ -# Mode Filter Example -# -# This example shows off mode filtering. Mode filtering is a highly non-linear -# operation which replaces each pixel with the mode of the NxN neighborhood -# of pixels around it. Avoid using the mode filter on RGB565 images. It will -# cause artifacts on image edges... - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # The only argument to the median filter is the kernel size, it can be - # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. - img.mode(1) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/negative.py b/scripts/examples/OpenMV/04-Image-Filters/negative.py deleted file mode 100644 index 36186cba9..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/negative.py +++ /dev/null @@ -1,19 +0,0 @@ -# Negative Example -# -# This example shows off negating the image. This is not a particularly -# useful method but it can come in handy once in a while. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot().negate() - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/perspective_and_rotation_correction.py b/scripts/examples/OpenMV/04-Image-Filters/perspective_and_rotation_correction.py deleted file mode 100644 index b89117d40..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/perspective_and_rotation_correction.py +++ /dev/null @@ -1,71 +0,0 @@ -# Rotation Correction -# -# This example shows off how to use the rotation_corr() to both correct for -# perspective distortion and then to rotate the new corrected image in 3D -# space aftwards to handle movement. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -# The image will be warped such that the following points become the new: -# -# (0, 0) -# (w-1, 0) -# (w-1, h-1) -# (0, h-1) -# -# Try setting the points below to the corners of a quadrilateral -# (in clock-wise order) in the field-of-view. You can get points -# on the image by clicking and dragging on the frame buffer and -# recording the values shown in the histogram widget. - -w = sensor.width() -h = sensor.height() - -TARGET_POINTS = [(0, 0), # (x, y) CHANGE ME! - (w-1, 0), # (x, y) CHANGE ME! - (w-1, h-1), # (x, y) CHANGE ME! - (0, h-1)] # (x, y) CHANGE ME! - -# Degrees per frame to rotation by... -X_ROTATION_DEGREE_RATE = 5 -Y_ROTATION_DEGREE_RATE = 0.5 -Z_ROTATION_DEGREE_RATE = 0 -X_OFFSET = 0 -Y_OFFSET = 0 - -ZOOM_AMOUNT = 1 # Lower zooms out - Higher zooms in. -FOV_WINDOW = 25 # Between 0 and 180. Represents the field-of-view of the scene - # window when rotating the image in 3D space. When closer to - # zero results in lines becoming straighter as the window - # moves away from the image being rotated in 3D space. A large - # value moves the window closer to the image in 3D space which - # results in the more perspective distortion and sometimes - # the image in 3D intersecting the scene window. - -x_rotation_counter = 0 -y_rotation_counter = 0 -z_rotation_counter = 0 - -while(True): - clock.tick() - - img = sensor.snapshot().rotation_corr(x_rotation = x_rotation_counter, \ - y_rotation = y_rotation_counter, \ - z_rotation = z_rotation_counter, \ - x_translation = X_OFFSET, \ - y_translation = Y_OFFSET, \ - zoom = ZOOM_AMOUNT, \ - fov = FOV_WINDOW, \ - corners = TARGET_POINTS) - - x_rotation_counter += X_ROTATION_DEGREE_RATE - y_rotation_counter += Y_ROTATION_DEGREE_RATE - z_rotation_counter += Z_ROTATION_DEGREE_RATE - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/04-Image-Filters/perspective_correction.py b/scripts/examples/OpenMV/04-Image-Filters/perspective_correction.py deleted file mode 100644 index a8f2aa58d..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/perspective_correction.py +++ /dev/null @@ -1,39 +0,0 @@ -# Perspective Correction -# -# This example shows off how to use the rotation_corr() to fix perspective -# issues related to how your OpenMV Cam is mounted. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -# The image will be warped such that the following points become the new: -# -# (0, 0) -# (w-1, 0) -# (w-1, h-1) -# (0, h-1) -# -# Try setting the points below to the corners of a quadrilateral -# (in clock-wise order) in the field-of-view. You can get points -# on the image by clicking and dragging on the frame buffer and -# recording the values shown in the histogram widget. - -w = sensor.width() -h = sensor.height() - -TARGET_POINTS = [(0, 0), # (x, y) CHANGE ME! - (w-1, 0), # (x, y) CHANGE ME! - (w-1, h-1), # (x, y) CHANGE ME! - (0, h-1)] # (x, y) CHANGE ME! - -while(True): - clock.tick() - - img = sensor.snapshot().rotation_corr(corners = TARGET_POINTS) - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/04-Image-Filters/rotation_correction.py b/scripts/examples/OpenMV/04-Image-Filters/rotation_correction.py deleted file mode 100644 index b95e41d78..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/rotation_correction.py +++ /dev/null @@ -1,49 +0,0 @@ -# Rotation Correction -# -# This example shows off how to use the rotation_corr() to play with the scene -# window your OpenMV Cam sees. - -import sensor, image, time - -# Degrees per frame to rotation by... -X_ROTATION_DEGREE_RATE = 5 -Y_ROTATION_DEGREE_RATE = 0.5 -Z_ROTATION_DEGREE_RATE = 0 -X_OFFSET = 0 -Y_OFFSET = 0 - -ZOOM_AMOUNT = 1 # Lower zooms out - Higher zooms in. -FOV_WINDOW = 60 # Between 0 and 180. Represents the field-of-view of the scene - # window when rotating the image in 3D space. When closer to - # zero results in lines becoming straighter as the window - # moves away from the image being rotated in 3D space. A large - # value moves the window closer to the image in 3D space which - # results in the more perspective distortion and sometimes - # the image in 3D intersecting the scene window. - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -x_rotation_counter = 0 -y_rotation_counter = 0 -z_rotation_counter = 0 - -while(True): - clock.tick() - - img = sensor.snapshot().rotation_corr(x_rotation = x_rotation_counter, \ - y_rotation = y_rotation_counter, \ - z_rotation = z_rotation_counter, \ - x_translation = X_OFFSET, \ - y_translation = Y_OFFSET, \ - zoom = ZOOM_AMOUNT, \ - fov = FOV_WINDOW) - - x_rotation_counter += X_ROTATION_DEGREE_RATE - y_rotation_counter += Y_ROTATION_DEGREE_RATE - z_rotation_counter += Z_ROTATION_DEGREE_RATE - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/04-Image-Filters/sharpen_filter.py b/scripts/examples/OpenMV/04-Image-Filters/sharpen_filter.py deleted file mode 100644 index 0f541e203..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/sharpen_filter.py +++ /dev/null @@ -1,21 +0,0 @@ -# Sharpen Filter Example -# -# This example shows off using the laplacian filter to sharpen images. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # Run the kernel on every pixel of the image. - img.laplacian(1, sharpen=True) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/ulab.py b/scripts/examples/OpenMV/04-Image-Filters/ulab.py deleted file mode 100644 index b7cfa4927..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/ulab.py +++ /dev/null @@ -1,19 +0,0 @@ -# Ulab is a numpy-like module for micropython, meant to simplify and speed up common -# mathematical operations on arrays. This basic example shows mean/std on an image. -# -# NOTE: ndarrays cause the heap to be fragmented easily. If you run out of memory, -# there's not much that can be done about it, lowering the resolution might help. - -import sensor, image, time -from ulab import numpy as np - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QQVGA) # Set frame size to QVGA (320x240) -clock = time.clock() # Create a clock object to track the FPS. - -while (True): - img = sensor.snapshot() # Take a picture and return the image. - a = np.array(img, dtype=np.uint8) - print("mean: %d std:%d"%(np.mean(a), np.std(a))) - diff --git a/scripts/examples/OpenMV/04-Image-Filters/unsharp_filter.py b/scripts/examples/OpenMV/04-Image-Filters/unsharp_filter.py deleted file mode 100644 index eb8eb2270..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/unsharp_filter.py +++ /dev/null @@ -1,21 +0,0 @@ -# Unsharp Filter Example -# -# This example shows off using the guassian filter to unsharp mask filter images. - -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # Run the kernel on every pixel of the image. - img.gaussian(1, unsharp=True) - - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while - # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/04-Image-Filters/vflip_hmirror_transpose.py b/scripts/examples/OpenMV/04-Image-Filters/vflip_hmirror_transpose.py deleted file mode 100644 index bf43c4e73..000000000 --- a/scripts/examples/OpenMV/04-Image-Filters/vflip_hmirror_transpose.py +++ /dev/null @@ -1,33 +0,0 @@ -# Vertical Flip - Horizontal Mirror - Transpose -# -# This example shows off how to vertically flip, horizontally mirror, or -# transpose an image. Note that: -# -# vflip=False, hmirror=False, transpose=False -> 0 degree rotation -# vflip=True, hmirror=False, transpose=True -> 90 degree rotation -# vflip=True, hmirror=True, transpose=False -> 180 degree rotation -# vflip=False, hmirror=True, transpose=True -> 270 degree rotation - -import sensor, image, time, pyb - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -mills = pyb.millis() -counter = 0 - -while(True): - clock.tick() - - img = sensor.snapshot().replace(vflip=(counter//2)%2, - hmirror=(counter//4)%2, - transpose=(counter//8)%2) - - if (pyb.millis() > (mills + 1000)): - mills = pyb.millis() - counter += 1 - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/05-Snapshot/emboss_snapshot.py b/scripts/examples/OpenMV/05-Snapshot/emboss_snapshot.py deleted file mode 100644 index 69bc105c6..000000000 --- a/scripts/examples/OpenMV/05-Snapshot/emboss_snapshot.py +++ /dev/null @@ -1,33 +0,0 @@ -# Emboss Snapshot Example -# -# Note: You will need an SD card to run this example. -# -# You can use your OpenMV Cam to save modified image files. - -import sensor, image, pyb - -RED_LED_PIN = 1 -BLUE_LED_PIN = 3 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. - -pyb.LED(RED_LED_PIN).on() -sensor.skip_frames(time = 2000) # Give the user time to get ready. - -pyb.LED(RED_LED_PIN).off() -pyb.LED(BLUE_LED_PIN).on() - -print("You're on camera!") -img = sensor.snapshot() - -img.morph(1, [+2, +1, +0,\ - +1, +1, -1,\ - +0, -1, -2]) # Emboss the image. - -img.save("example.jpg") # or "example.bmp" (or others) - -pyb.LED(BLUE_LED_PIN).off() -print("Done! Reset the camera to see the saved image.") diff --git a/scripts/examples/OpenMV/05-Snapshot/snapshot.py b/scripts/examples/OpenMV/05-Snapshot/snapshot.py deleted file mode 100644 index 8eb621e6d..000000000 --- a/scripts/examples/OpenMV/05-Snapshot/snapshot.py +++ /dev/null @@ -1,27 +0,0 @@ -# Snapshot Example -# -# Note: You will need an SD card to run this example. -# -# You can use your OpenMV Cam to save image files. - -import sensor, image, pyb - -RED_LED_PIN = 1 -BLUE_LED_PIN = 3 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. - -pyb.LED(RED_LED_PIN).on() -sensor.skip_frames(time = 2000) # Give the user time to get ready. - -pyb.LED(RED_LED_PIN).off() -pyb.LED(BLUE_LED_PIN).on() - -print("You're on camera!") -sensor.snapshot().save("example.jpg") # or "example.bmp" (or others) - -pyb.LED(BLUE_LED_PIN).off() -print("Done! Reset the camera to see the saved image.") diff --git a/scripts/examples/OpenMV/05-Snapshot/snapshot_on_face_detection.py b/scripts/examples/OpenMV/05-Snapshot/snapshot_on_face_detection.py deleted file mode 100644 index a716df263..000000000 --- a/scripts/examples/OpenMV/05-Snapshot/snapshot_on_face_detection.py +++ /dev/null @@ -1,51 +0,0 @@ -# Snapshot on Face Detection Example -# -# Note: You will need an SD card to run this example. -# -# This example demonstrates using face tracking on your OpenMV Cam to take a -# picture. - -import sensor, image, pyb - -RED_LED_PIN = 1 -BLUE_LED_PIN = 3 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.HQVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. - -# Load up a face detection HaarCascade. This is object that your OpenMV Cam -# can use to detect faces using the find_features() method below. Your OpenMV -# Cam has fontalface HaarCascade built-in. By default, all the stages of the -# HaarCascade are loaded. However, You can adjust the number of stages to speed -# up processing at the expense of accuracy. The frontalface HaarCascade has 25 -# stages. -face_cascade = image.HaarCascade("frontalface", stages=25) - -while(True): - - pyb.LED(RED_LED_PIN).on() - print("About to start detecting faces...") - sensor.skip_frames(time = 2000) # Give the user time to get ready. - - pyb.LED(RED_LED_PIN).off() - print("Now detecting faces!") - pyb.LED(BLUE_LED_PIN).on() - - diff = 10 # We'll say we detected a face after 10 frames. - while(diff): - img = sensor.snapshot() - # Threshold can be between 0.0 and 1.0. A higher threshold results in a - # higher detection rate with more false positives. The scale value - # controls the matching scale allowing you to detect smaller faces. - faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5) - - if faces: - diff -= 1 - for r in faces: - img.draw_rectangle(r) - - pyb.LED(BLUE_LED_PIN).off() - print("Face detected! Saving image...") - sensor.snapshot().save("snapshot-%d.jpg" % pyb.rng()) # Save Pic. diff --git a/scripts/examples/OpenMV/05-Snapshot/time_lapse_photos.py b/scripts/examples/OpenMV/05-Snapshot/time_lapse_photos.py deleted file mode 100644 index be6c370dd..000000000 --- a/scripts/examples/OpenMV/05-Snapshot/time_lapse_photos.py +++ /dev/null @@ -1,67 +0,0 @@ -# Time Lapse Photos (Credit nedhorning) -# -# This example shows off how to take time lapse photos using your OpenMV -# Cam and using the RTC module along with a timer interrupt to achieve -# very low power operation. -# -# Note that if the USB is still plugged in when the camera is taking -# pictures it will run the bootloader each time. Please power the camera -# from something other than USB to not have the bootloader run. - -import pyb, machine, sensor, image, pyb, os - -# Create and init RTC object. This will allow us to set the current time for -# the RTC and let us set an interrupt to wake up later on. -rtc = pyb.RTC() -newFile = False - -try: - os.stat('time.txt') -except OSError: # If the log file doesn't exist then set the RTC and set newFile to True - # datetime format: year, month, day, weekday (Monday=1, Sunday=7), - # hours (24 hour clock), minutes, seconds, subseconds (counds down from 255 to 0) - rtc.datetime((2018, 3, 9, 5, 13, 0, 0, 0)) - newFile = True - -# Extract the date and time from the RTC object. -dateTime = rtc.datetime() -year = str(dateTime[0]) -month = '%02d' % dateTime[1] -day = '%02d' % dateTime[2] -hour = '%02d' % dateTime[4] -minute = '%02d' % dateTime[5] -second = '%02d' % dateTime[6] -subSecond = str(dateTime[7]) - -newName='I'+year+month+day+hour+minute+second # Image file name based on RTC - -# Enable RTC interrupts every 10 seconds, camera will RESET after wakeup from deepsleep Mode. -rtc.wakeup(10000) - -BLUE_LED_PIN = 3 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.VGA) -sensor.skip_frames(time = 1000) # Let new settings take affect. - -# Let folks know we are about to take a picture. -pyb.LED(BLUE_LED_PIN).on() - -if(newFile): # If log file does not exist then create it. - with open('time.txt', 'a') as timeFile: # Write text file to keep track of date, time and image number. - timeFile.write('Date and time format: year, month, day, hours, minutes, seconds, subseconds' + '\n') - timeFile.write(newName + ',' + year + ',' + month + ',' + day + ',' + hour + ',' + minute + ',' + second + ',' + subSecond + '\n') -else: - with open('time.txt', 'a') as timeFile: # Append to date, time and image number to text file. - timeFile.write(newName + ',' + year + ',' + month + ',' + day + ',' + hour + ',' + minute + ',' + second + ',' + subSecond + '\n') - -if not "images" in os.listdir(): os.mkdir("images") # Make a temp directory - -# Take photo and save to SD card -img = sensor.snapshot() -img.save('images/' + newName, quality=90) -pyb.LED(BLUE_LED_PIN).off() - -# Enter Deepsleep Mode (i.e. the OpenMV Cam effectively turns itself off except for the RTC). -machine.deepsleep() diff --git a/scripts/examples/OpenMV/06-Video-Recording/gif.py b/scripts/examples/OpenMV/06-Video-Recording/gif.py deleted file mode 100644 index 35f0933d7..000000000 --- a/scripts/examples/OpenMV/06-Video-Recording/gif.py +++ /dev/null @@ -1,37 +0,0 @@ -# GIF Video Recording Example -# -# Note: You will need an SD card to run this example. -# -# You can use your OpenMV Cam to record gif files. You can either feed the -# recorder object RGB565 frames or Grayscale frames. Use photo editing software -# like GIMP to compress and optimize the Gif before uploading it to the web. - -import sensor, image, time, gif, pyb - -RED_LED_PIN = 1 -BLUE_LED_PIN = 3 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -pyb.LED(RED_LED_PIN).on() -sensor.skip_frames(time = 2000) # Give the user time to get ready. - -pyb.LED(RED_LED_PIN).off() -pyb.LED(BLUE_LED_PIN).on() - -g = gif.Gif("example.gif", loop=True) - -print("You're on camera!") -for i in range(100): - clock.tick() - # clock.avg() returns the milliseconds between frames - gif delay is in - g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds. - print(clock.fps()) - -g.close() -pyb.LED(BLUE_LED_PIN).off() -print("Done! Reset the camera to see the saved recording.") diff --git a/scripts/examples/OpenMV/06-Video-Recording/gif_on_face_detection.py b/scripts/examples/OpenMV/06-Video-Recording/gif_on_face_detection.py deleted file mode 100644 index 0732ca1c8..000000000 --- a/scripts/examples/OpenMV/06-Video-Recording/gif_on_face_detection.py +++ /dev/null @@ -1,65 +0,0 @@ -# GIF Video Recording on Face Detection Example -# -# Note: You will need an SD card to run this example. -# -# You can use your OpenMV Cam to record gif files. You can either feed the -# recorder object RGB565 frames or Grayscale frames. Use photo editing software -# like GIMP to compress and optimize the Gif before uploading it to the web. -# -# This example demonstrates using face tracking on your OpenMV Cam to take a -# gif. - -import sensor, image, time, gif, pyb - -RED_LED_PIN = 1 -BLUE_LED_PIN = 3 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor. -sensor.set_framesize(sensor.QQVGA) # or sensor.HQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. - -# Load up a face detection HaarCascade. This is object that your OpenMV Cam -# can use to detect faces using the find_features() method below. Your OpenMV -# Cam has fontalface HaarCascade built-in. By default, all the stages of the -# HaarCascade are loaded. However, You can adjust the number of stages to speed -# up processing at the expense of accuracy. The frontalface HaarCascade has 25 -# stages. -face_cascade = image.HaarCascade("frontalface", stages=25) - -while(True): - - pyb.LED(RED_LED_PIN).on() - print("About to start detecting faces...") - sensor.skip_frames(time = 2000) # Give the user time to get ready. - - pyb.LED(RED_LED_PIN).off() - print("Now detecting faces!") - pyb.LED(BLUE_LED_PIN).on() - - diff = 10 # We'll say we detected a face after 10 frames. - while(diff): - img = sensor.snapshot() - # Threshold can be between 0.0 and 1.0. A higher threshold results in a - # higher detection rate with more false positives. The scale value - # controls the matching scale allowing you to detect smaller faces. - faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5) - - if faces: - diff -= 1 - for r in faces: - img.draw_rectangle(r) - - g = gif.Gif("example-%d.gif" % pyb.rng(), loop=True) - - clock = time.clock() # Tracks FPS. - print("You're on camera!") - for i in range(100): - clock.tick() - # clock.avg() returns the milliseconds between frames - gif delay is in - g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds. - print(clock.fps()) - - g.close() - pyb.LED(BLUE_LED_PIN).off() - print("Restarting...") diff --git a/scripts/examples/OpenMV/06-Video-Recording/imageio_memory.py b/scripts/examples/OpenMV/06-Video-Recording/imageio_memory.py deleted file mode 100644 index d64341be4..000000000 --- a/scripts/examples/OpenMV/06-Video-Recording/imageio_memory.py +++ /dev/null @@ -1,33 +0,0 @@ -# Image Memory Stream I/O Example -# -# This example shows how to use the ImageIO stream to record frames in memory and play them back. -# Note: While this should work on any board, the board should have an SDRAM to be of any use. -import sensor, image, time - -# Number of frames to pre-allocate and record -N_FRAMES = 500 - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QVGA) - -# This frame size must match the image size passed to ImageIO -sensor.set_windowing((120, 120)) -sensor.skip_frames(time = 2000) - -clock = time.clock() - -# Write to memory stream -stream = image.ImageIO((120, 120, sensor.RGB565), N_FRAMES) - -for i in range(0, N_FRAMES): - clock.tick() - stream.write(sensor.snapshot()) - print(clock.fps()) - -while (True): - # Rewind stream and play back - stream.seek(0) - for i in range(0, N_FRAMES): - img = stream.read(copy_to_fb=True, pause=True) - # Do machine vision algorithms on the image here. diff --git a/scripts/examples/OpenMV/06-Video-Recording/imageio_read.py b/scripts/examples/OpenMV/06-Video-Recording/imageio_read.py deleted file mode 100644 index 430c2fe30..000000000 --- a/scripts/examples/OpenMV/06-Video-Recording/imageio_read.py +++ /dev/null @@ -1,32 +0,0 @@ -# Image Reader Example -# -# USE THIS EXAMPLE WITH A USD CARD! -# -# This example shows how to use the Image Reader object to replay snapshots of what your -# OpenMV Cam saw saved by the Image Writer object for testing machine vision algorithms. - -# Altered to allow full speed reading from SD card for extraction of sequences to the network etc. -# Set the new pause parameter to false - -import sensor, image, time - -snapshot_source = False # Set to true once finished to pull data from sensor. - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -stream = None -if snapshot_source == False: - stream = image.ImageIO("/stream.bin", "r") - -while(True): - clock.tick() - if snapshot_source: - img = sensor.snapshot() - else: - img = stream.read(copy_to_fb=True, loop=True, pause=True) - # Do machine vision algorithms on the image here. - print(clock.fps()) diff --git a/scripts/examples/OpenMV/06-Video-Recording/imageio_write.py b/scripts/examples/OpenMV/06-Video-Recording/imageio_write.py deleted file mode 100644 index aa3f5bff0..000000000 --- a/scripts/examples/OpenMV/06-Video-Recording/imageio_write.py +++ /dev/null @@ -1,36 +0,0 @@ -# Image Writer Example -# -# USE THIS EXAMPLE WITH A USD CARD! Reset the camera after recording to see the file. -# -# This example shows how to use the Image Writer object to record snapshots of what your -# OpenMV Cam sees for later analysis using the Image Reader object. Images written to disk -# by the Image Writer object are stored in a simple file format readable by your OpenMV Cam. - -import sensor, image, pyb, time - -record_time = 10000 # 10 seconds in milliseconds - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -stream = image.ImageIO("/stream.bin", "w") - -# Red LED on means we are capturing frames. -pyb.LED(1).on() - -start = pyb.millis() -while pyb.elapsed_millis(start) < record_time: - clock.tick() - img = sensor.snapshot() - # Modify the image if you feel like here... - stream.write(img) - print(clock.fps()) - -stream.close() - -# Blue LED on means we are done. -pyb.LED(1).off() -pyb.LED(3).on() diff --git a/scripts/examples/OpenMV/06-Video-Recording/mjpeg.py b/scripts/examples/OpenMV/06-Video-Recording/mjpeg.py deleted file mode 100644 index 1e3f732e4..000000000 --- a/scripts/examples/OpenMV/06-Video-Recording/mjpeg.py +++ /dev/null @@ -1,37 +0,0 @@ -# MJPEG Video Recording Example -# -# Note: You will need an SD card to run this demo. -# -# You can use your OpenMV Cam to record mjpeg files. You can either feed the -# recorder object JPEG frames or RGB565/Grayscale frames. Once you've finished -# recording a Mjpeg file you can use VLC to play it. If you are on Ubuntu then -# the built-in video player will work too. - -import sensor, image, time, mjpeg, pyb - -RED_LED_PIN = 1 -BLUE_LED_PIN = 3 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. - -pyb.LED(RED_LED_PIN).on() -sensor.skip_frames(time = 2000) # Give the user time to get ready. - -pyb.LED(RED_LED_PIN).off() -pyb.LED(BLUE_LED_PIN).on() - -m = mjpeg.Mjpeg("example.mjpeg") - -print("You're on camera!") -for i in range(200): - clock.tick() - m.add_frame(sensor.snapshot()) - print(clock.fps()) - -m.close(clock.fps()) -pyb.LED(BLUE_LED_PIN).off() -print("Done! Reset the camera to see the saved recording.") diff --git a/scripts/examples/OpenMV/06-Video-Recording/mjpeg_on_face_detection.py b/scripts/examples/OpenMV/06-Video-Recording/mjpeg_on_face_detection.py deleted file mode 100644 index fd567a4f6..000000000 --- a/scripts/examples/OpenMV/06-Video-Recording/mjpeg_on_face_detection.py +++ /dev/null @@ -1,65 +0,0 @@ -# MJPEG Video Recording on Face Detection Example -# -# Note: You will need an SD card to run this example. -# -# You can use your OpenMV Cam to record mjpeg files. You can either feed the -# recorder object JPEG frames or RGB565/Grayscale frames. Once you've finished -# recording a Mjpeg file you can use VLC to play it. If you are on Ubuntu then -# the built-in video player will work too. -# -# This example demonstrates using face tracking on your OpenMV Cam to take a -# mjpeg. - -import sensor, image, time, mjpeg, pyb - -RED_LED_PIN = 1 -BLUE_LED_PIN = 3 - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor. -sensor.set_framesize(sensor.QQVGA) # or sensor.HQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. - -# Load up a face detection HaarCascade. This is object that your OpenMV Cam -# can use to detect faces using the find_features() method below. Your OpenMV -# Cam has fontalface HaarCascade built-in. By default, all the stages of the -# HaarCascade are loaded. However, You can adjust the number of stages to speed -# up processing at the expense of accuracy. The frontalface HaarCascade has 25 -# stages. -face_cascade = image.HaarCascade("frontalface", stages=25) - -while(True): - - pyb.LED(RED_LED_PIN).on() - print("About to start detecting faces...") - sensor.skip_frames(time = 2000) # Give the user time to get ready. - - pyb.LED(RED_LED_PIN).off() - print("Now detecting faces!") - pyb.LED(BLUE_LED_PIN).on() - - diff = 10 # We'll say we detected a face after 10 frames. - while(diff): - img = sensor.snapshot() - # Threshold can be between 0.0 and 1.0. A higher threshold results in a - # higher detection rate with more false positives. The scale value - # controls the matching scale allowing you to detect smaller faces. - faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5) - - if faces: - diff -= 1 - for r in faces: - img.draw_rectangle(r) - - m = mjpeg.Mjpeg("example-%d.mjpeg" % pyb.rng()) - - clock = time.clock() # Tracks FPS. - print("You're on camera!") - for i in range(200): - clock.tick() - m.add_frame(sensor.snapshot()) - print(clock.fps()) - - m.close(clock.fps()) - pyb.LED(BLUE_LED_PIN).off() - print("Restarting...") diff --git a/scripts/examples/OpenMV/07-Face-Detection/face_detection.py b/scripts/examples/OpenMV/07-Face-Detection/face_detection.py deleted file mode 100644 index aca735d33..000000000 --- a/scripts/examples/OpenMV/07-Face-Detection/face_detection.py +++ /dev/null @@ -1,51 +0,0 @@ -# Face Detection Example -# -# This example shows off the built-in face detection feature of the OpenMV Cam. -# -# Face detection works by using the Haar Cascade feature detector on an image. A -# Haar Cascade is a series of simple area contrasts checks. For the built-in -# frontalface detector there are 25 stages of checks with each stage having -# hundreds of checks a piece. Haar Cascades run fast because later stages are -# only evaluated if previous stages pass. Additionally, your OpenMV Cam uses -# a data structure called the integral image to quickly execute each area -# contrast check in constant time (the reason for feature detection being -# grayscale only is because of the space requirment for the integral image). - -import sensor, time, image - -# Reset sensor -sensor.reset() - -# Sensor settings -sensor.set_contrast(3) -sensor.set_gainceiling(16) -# HQVGA and GRAYSCALE are the best for face tracking. -sensor.set_framesize(sensor.HQVGA) -sensor.set_pixformat(sensor.GRAYSCALE) - -# Load Haar Cascade -# By default this will use all stages, lower satges is faster but less accurate. -face_cascade = image.HaarCascade("frontalface", stages=25) -print(face_cascade) - -# FPS clock -clock = time.clock() - -while (True): - clock.tick() - - # Capture snapshot - img = sensor.snapshot() - - # Find objects. - # Note: Lower scale factor scales-down the image more and detects smaller objects. - # Higher threshold results in a higher detection rate, with more false positives. - objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25) - - # Draw objects - for r in objects: - img.draw_rectangle(r) - - # Print FPS. - # Note: Actual FPS is higher, streaming the FB makes it slower. - print(clock.fps()) diff --git a/scripts/examples/OpenMV/07-Face-Detection/face_recognition.py b/scripts/examples/OpenMV/07-Face-Detection/face_recognition.py deleted file mode 100644 index 8a514664a..000000000 --- a/scripts/examples/OpenMV/07-Face-Detection/face_recognition.py +++ /dev/null @@ -1,27 +0,0 @@ -# Face recognition with LBP descriptors. -# See Timo Ahonen's "Face Recognition with Local Binary Patterns". -# -# Before running the example: -# 1) Download the AT&T faces database http://www.cl.cam.ac.uk/Research/DTG/attarchive/pub/data/att_faces.zip -# 2) Exract and copy the orl_faces directory to the SD card root. -# -# NOTE: This is just a PoC implementation of the paper mentioned above, it does Not work well in real life conditions. - -import sensor, time, image - -SUB = "s2" -NUM_SUBJECTS = 5 -NUM_SUBJECTS_IMGS = 10 - -img = image.Image("orl_faces/%s/1.pgm"%(SUB)).mask_ellipse() -d0 = img.find_lbp((0, 0, img.width(), img.height())) -img = None - -print("") -for s in range(1, NUM_SUBJECTS+1): - dist = 0 - for i in range(2, NUM_SUBJECTS_IMGS+1): - img = image.Image("orl_faces/s%d/%d.pgm"%(s, i)).mask_ellipse() - d1 = img.find_lbp((0, 0, img.width(), img.height())) - dist += image.match_descriptor(d0, d1) - print("Average dist for subject %d: %d"%(s, dist/NUM_SUBJECTS_IMGS)) diff --git a/scripts/examples/OpenMV/07-Face-Detection/face_tracking.py b/scripts/examples/OpenMV/07-Face-Detection/face_tracking.py deleted file mode 100644 index f4571e872..000000000 --- a/scripts/examples/OpenMV/07-Face-Detection/face_tracking.py +++ /dev/null @@ -1,68 +0,0 @@ -# Face Tracking Example -# -# This example shows off using the keypoints feature of your OpenMV Cam to track -# a face after it has been detected by a Haar Cascade. The first part of this -# script finds a face in the image using the frontalface Haar Cascade. -# After which the script uses the keypoints feature to automatically learn your -# face and track it. Keypoints can be used to automatically track anything. -import sensor, time, image - -# Reset sensor -sensor.reset() -sensor.set_contrast(3) -sensor.set_gainceiling(16) -sensor.set_framesize(sensor.VGA) -sensor.set_windowing((320, 240)) -sensor.set_pixformat(sensor.GRAYSCALE) - -# Skip a few frames to allow the sensor settle down -sensor.skip_frames(time = 2000) - -# Load Haar Cascade -# By default this will use all stages, lower satges is faster but less accurate. -face_cascade = image.HaarCascade("frontalface", stages=25) -print(face_cascade) - -# First set of keypoints -kpts1 = None - -# Find a face! -while (kpts1 == None): - img = sensor.snapshot() - img.draw_string(0, 0, "Looking for a face...") - # Find faces - objects = img.find_features(face_cascade, threshold=0.5, scale=1.25) - if objects: - # Expand the ROI by 31 pixels in every direction - face = (objects[0][0]-31, objects[0][1]-31,objects[0][2]+31*2, objects[0][3]+31*2) - # Extract keypoints using the detect face size as the ROI - kpts1 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, roi=face) - # Draw a rectangle around the first face - img.draw_rectangle(objects[0]) - -# Draw keypoints -print(kpts1) -img.draw_keypoints(kpts1, size=24) -img = sensor.snapshot() -time.sleep_ms(2000) - -# FPS clock -clock = time.clock() - -while (True): - clock.tick() - img = sensor.snapshot() - # Extract keypoints from the whole frame - kpts2 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, normalized=True) - - if (kpts2): - # Match the first set of keypoints with the second one - c=image.match_descriptor(kpts1, kpts2, threshold=85) - match = c[6] # C[6] contains the number of matches. - if (match>5): - img.draw_rectangle(c[2:6]) - img.draw_cross(c[0], c[1], size=10) - print(kpts2, "matched:%d dt:%d"%(match, c[7])) - - # Draw FPS - img.draw_string(0, 0, "FPS:%.2f"%(clock.fps())) diff --git a/scripts/examples/OpenMV/08-Eye-Tracking/face_eye_detection.py b/scripts/examples/OpenMV/08-Eye-Tracking/face_eye_detection.py deleted file mode 100644 index 13e5ab454..000000000 --- a/scripts/examples/OpenMV/08-Eye-Tracking/face_eye_detection.py +++ /dev/null @@ -1,49 +0,0 @@ -# Face Eye Detection Example -# -# This script uses the built-in frontalface detector to find a face and then -# the eyes within the face. If you want to determine the eye gaze please see the -# iris_detection script for an example on how to do that. - -import sensor, time, image - -# Reset sensor -sensor.reset() - -# Sensor settings -sensor.set_contrast(1) -sensor.set_gainceiling(16) -sensor.set_framesize(sensor.HQVGA) -sensor.set_pixformat(sensor.GRAYSCALE) - -# Load Haar Cascade -# By default this will use all stages, lower satges is faster but less accurate. -face_cascade = image.HaarCascade("frontalface", stages=25) -eyes_cascade = image.HaarCascade("eye", stages=24) -print(face_cascade, eyes_cascade) - -# FPS clock -clock = time.clock() - -while (True): - clock.tick() - - # Capture snapshot - img = sensor.snapshot() - - # Find a face ! - # Note: Lower scale factor scales-down the image more and detects smaller objects. - # Higher threshold results in a higher detection rate, with more false positives. - objects = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5) - - # Draw faces - for face in objects: - img.draw_rectangle(face) - # Now find eyes within each face. - # Note: Use a higher threshold here (more detections) and lower scale (to find small objects) - eyes = img.find_features(eyes_cascade, threshold=0.5, scale_factor=1.2, roi=face) - for e in eyes: - img.draw_rectangle(e) - - # Print FPS. - # Note: Actual FPS is higher, streaming the FB makes it slower. - print(clock.fps()) diff --git a/scripts/examples/OpenMV/08-Eye-Tracking/iris_detection.py b/scripts/examples/OpenMV/08-Eye-Tracking/iris_detection.py deleted file mode 100644 index 9d51498c3..000000000 --- a/scripts/examples/OpenMV/08-Eye-Tracking/iris_detection.py +++ /dev/null @@ -1,52 +0,0 @@ -# Iris Detection 2 Example -# -# This example shows how to find the eye gaze (pupil detection) after finding -# the eyes in an image. This script uses the find_eyes function which determines -# the center point of roi that should contain a pupil. It does this by basically -# finding the center of the darkest area in the eye roi which is the pupil center. -# -# Note: This script does not detect a face first, use it with the telephoto lens. - -import sensor, time, image - -# Reset sensor -sensor.reset() - -# Sensor settings -sensor.set_contrast(3) -sensor.set_gainceiling(16) - -# Set resolution to VGA. -sensor.set_framesize(sensor.VGA) - -# Bin/Crop image to 200x100, which gives more details with less data to process -sensor.set_windowing((220, 190, 200, 100)) - -sensor.set_pixformat(sensor.GRAYSCALE) - -# Load Haar Cascade -# By default this will use all stages, lower stages is faster but less accurate. -eyes_cascade = image.HaarCascade("eye", stages=24) -print(eyes_cascade) - -# FPS clock -clock = time.clock() - -while (True): - clock.tick() - # Capture snapshot - img = sensor.snapshot() - # Find eyes ! - # Note: Lower scale factor scales-down the image more and detects smaller objects. - # Higher threshold results in a higher detection rate, with more false positives. - eyes = img.find_features(eyes_cascade, threshold=0.5, scale_factor=1.5) - - # Find iris - for e in eyes: - iris = img.find_eye(e) - img.draw_rectangle(e) - img.draw_cross(iris[0], iris[1]) - - # Print FPS. - # Note: Actual FPS is higher, streaming the FB makes it slower. - print(clock.fps()) diff --git a/scripts/examples/OpenMV/09-Feature-Detection/edges.py b/scripts/examples/OpenMV/09-Feature-Detection/edges.py deleted file mode 100644 index 166820b72..000000000 --- a/scripts/examples/OpenMV/09-Feature-Detection/edges.py +++ /dev/null @@ -1,20 +0,0 @@ -# Edge detection with Canny: -# -# This example demonstrates the Canny edge detector. -import sensor, image, time - -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -sensor.set_gainceiling(8) - -clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - # Use Canny edge detector - img.find_edges(image.EDGE_CANNY, threshold=(50, 80)) - # Faster simpler edge detection - #img.find_edges(image.EDGE_SIMPLE, threshold=(100, 255)) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while diff --git a/scripts/examples/OpenMV/09-Feature-Detection/find_circles.py b/scripts/examples/OpenMV/09-Feature-Detection/find_circles.py deleted file mode 100644 index a208b4a5b..000000000 --- a/scripts/examples/OpenMV/09-Feature-Detection/find_circles.py +++ /dev/null @@ -1,39 +0,0 @@ -# Find Circles Example -# -# This example shows off how to find circles in the image using the Hough -# Transform. https://en.wikipedia.org/wiki/Circle_Hough_Transform -# -# Note that the find_circles() method will only find circles which are completely -# inside of the image. Circles which go outside of the image/roi are ignored... - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) # grayscale is faster -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - img = sensor.snapshot().lens_corr(1.8) - - # Circle objects have four values: x, y, r (radius), and magnitude. The - # magnitude is the strength of the detection of the circle. Higher is - # better... - - # `threshold` controls how many circles are found. Increase its value - # to decrease the number of circles detected... - - # `x_margin`, `y_margin`, and `r_margin` control the merging of similar - # circles in the x, y, and r (radius) directions. - - # r_min, r_max, and r_step control what radiuses of circles are tested. - # Shrinking the number of tested circle radiuses yields a big performance boost. - - for c in img.find_circles(threshold = 2000, x_margin = 10, y_margin = 10, r_margin = 10, - r_min = 2, r_max = 100, r_step = 2): - img.draw_circle(c.x(), c.y(), c.r(), color = (255, 0, 0)) - print(c) - - print("FPS %f" % clock.fps()) diff --git a/scripts/examples/OpenMV/09-Feature-Detection/find_line_segments.py b/scripts/examples/OpenMV/09-Feature-Detection/find_line_segments.py deleted file mode 100644 index 4aa42cf17..000000000 --- a/scripts/examples/OpenMV/09-Feature-Detection/find_line_segments.py +++ /dev/null @@ -1,39 +0,0 @@ -# Find Line Segments Example -# -# This example shows off how to find line segments in the image. For each line object -# found in the image a line object is returned which includes the line's rotation. - -# find_line_segments() finds finite length lines (but is slow). -# Use find_line_segments() to find non-infinite lines (and is fast). - -enable_lens_corr = False # turn on for straighter lines... - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) # grayscale is faster -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -# All lines also have `x1()`, `y1()`, `x2()`, and `y2()` methods to get their end-points -# and a `line()` method to get all the above as one 4 value tuple for `draw_line()`. - -while(True): - clock.tick() - img = sensor.snapshot() - if enable_lens_corr: img.lens_corr(1.8) # for 2.8mm lens... - - # `merge_distance` controls the merging of nearby lines. At 0 (the default), no - # merging is done. At 1, any line 1 pixel away from another is merged... and so - # on as you increase this value. You may wish to merge lines as line segment - # detection produces a lot of line segment results. - - # `max_theta_diff` controls the maximum amount of rotation difference between - # any two lines about to be merged. The default setting allows for 15 degrees. - - for l in img.find_line_segments(merge_distance = 0, max_theta_diff = 5): - img.draw_line(l.line(), color = (255, 0, 0)) - # print(l) - - print("FPS %f" % clock.fps()) diff --git a/scripts/examples/OpenMV/09-Feature-Detection/find_lines.py b/scripts/examples/OpenMV/09-Feature-Detection/find_lines.py deleted file mode 100644 index 6c45fcfbc..000000000 --- a/scripts/examples/OpenMV/09-Feature-Detection/find_lines.py +++ /dev/null @@ -1,57 +0,0 @@ -# Find Lines Example -# -# This example shows off how to find lines in the image. For each line object -# found in the image a line object is returned which includes the line's rotation. - -# Note: Line detection is done by using the Hough Transform: -# http://en.wikipedia.org/wiki/Hough_transform -# Please read about it above for more information on what `theta` and `rho` are. - -# find_lines() finds infinite length lines. Use find_line_segments() to find non-infinite lines. - -enable_lens_corr = False # turn on for straighter lines... - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) # grayscale is faster -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -# All line objects have a `theta()` method to get their rotation angle in degrees. -# You can filter lines based on their rotation angle. - -min_degree = 0 -max_degree = 179 - -# All lines also have `x1()`, `y1()`, `x2()`, and `y2()` methods to get their end-points -# and a `line()` method to get all the above as one 4 value tuple for `draw_line()`. - -while(True): - clock.tick() - img = sensor.snapshot() - if enable_lens_corr: img.lens_corr(1.8) # for 2.8mm lens... - - # `threshold` controls how many lines in the image are found. Only lines with - # edge difference magnitude sums greater than `threshold` are detected... - - # More about `threshold` - each pixel in the image contributes a magnitude value - # to a line. The sum of all contributions is the magintude for that line. Then - # when lines are merged their magnitudes are added togheter. Note that `threshold` - # filters out lines with low magnitudes before merging. To see the magnitude of - # un-merged lines set `theta_margin` and `rho_margin` to 0... - - # `theta_margin` and `rho_margin` control merging similar lines. If two lines - # theta and rho value differences are less than the margins then they are merged. - - for l in img.find_lines(threshold = 1000, theta_margin = 25, rho_margin = 25): - if (min_degree <= l.theta()) and (l.theta() <= max_degree): - img.draw_line(l.line(), color = (255, 0, 0)) - # print(l) - - print("FPS %f" % clock.fps()) - -# About negative rho values: -# -# A [theta+0:-rho] tuple is the same as [theta+180:+rho]. diff --git a/scripts/examples/OpenMV/09-Feature-Detection/find_rects.py b/scripts/examples/OpenMV/09-Feature-Detection/find_rects.py deleted file mode 100644 index 5fafba626..000000000 --- a/scripts/examples/OpenMV/09-Feature-Detection/find_rects.py +++ /dev/null @@ -1,31 +0,0 @@ -# Find Rects Example -# -# This example shows off how to find rectangles in the image using the quad threshold -# detection code from our April Tags code. The quad threshold detection algorithm -# detects rectangles in an extremely robust way and is much better than Hough -# Transform based methods. For example, it can still detect rectangles even when lens -# distortion causes those rectangles to look bent. Rounded rectangles are no problem! -# (But, given this the code will also detect small radius circles too)... - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) # grayscale is faster (160x120 max on OpenMV-M7) -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - img = sensor.snapshot() - - # `threshold` below should be set to a high enough value to filter out noise - # rectangles detected in the image which have low edge magnitudes. Rectangles - # have larger edge magnitudes the larger and more contrasty they are... - - for r in img.find_rects(threshold = 10000): - img.draw_rectangle(r.rect(), color = (255, 0, 0)) - for p in r.corners(): img.draw_circle(p[0], p[1], 5, color = (0, 255, 0)) - print(r) - - print("FPS %f" % clock.fps()) diff --git a/scripts/examples/OpenMV/09-Feature-Detection/hog.py b/scripts/examples/OpenMV/09-Feature-Detection/hog.py deleted file mode 100644 index 7bc17446b..000000000 --- a/scripts/examples/OpenMV/09-Feature-Detection/hog.py +++ /dev/null @@ -1,28 +0,0 @@ -# Histogram of Oriented Gradients (HoG) Example -# -# This example demonstrates HoG visualization. -# -# Note: Due to JPEG artifacts, the HoG visualization looks blurry. To see the -# image without JPEG artifacts, uncomment the lines that save the image to uSD. - -import sensor, image, time - -sensor.reset() -# Set sensor settings -sensor.set_contrast(1) -sensor.set_gainceiling(8) -sensor.set_framesize(sensor.QVGA) -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.skip_frames(time = 2000) - -clock = time.clock() # Tracks FPS. -while (True): - clock.tick() - img = sensor.snapshot() - img.find_hog() - - # Uncomment to save raw FB to file and exit the loop - #img.save("/hog.pgm") - #break - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/09-Feature-Detection/keypoints.py b/scripts/examples/OpenMV/09-Feature-Detection/keypoints.py deleted file mode 100644 index 0c4316a4d..000000000 --- a/scripts/examples/OpenMV/09-Feature-Detection/keypoints.py +++ /dev/null @@ -1,58 +0,0 @@ -# Object tracking with keypoints example. -# Show the camera an object and then run the script. A set of keypoints will be extracted -# once and then tracked in the following frames. If you want a new set of keypoints re-run -# the script. NOTE: see the docs for arguments to tune find_keypoints and match_keypoints. -import sensor, time, image - -# Reset sensor -sensor.reset() - -# Sensor settings -sensor.set_contrast(3) -sensor.set_gainceiling(16) -sensor.set_framesize(sensor.VGA) -sensor.set_windowing((320, 240)) -sensor.set_pixformat(sensor.GRAYSCALE) - -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False, value=100) - -def draw_keypoints(img, kpts): - if kpts: - print(kpts) - img.draw_keypoints(kpts) - img = sensor.snapshot() - time.sleep_ms(1000) - -kpts1 = None -# NOTE: uncomment to load a keypoints descriptor from file -#kpts1 = image.load_descriptor("/desc.orb") -#img = sensor.snapshot() -#draw_keypoints(img, kpts1) - -clock = time.clock() -while (True): - clock.tick() - img = sensor.snapshot() - if (kpts1 == None): - # NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid. - kpts1 = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2) - draw_keypoints(img, kpts1) - else: - # NOTE: When extracting keypoints to match the first descriptor, we use normalized=True to extract - # keypoints from the first scale only, which will match one of the scales in the first descriptor. - kpts2 = img.find_keypoints(max_keypoints=150, threshold=10, normalized=True) - if (kpts2): - match = image.match_descriptor(kpts1, kpts2, threshold=85) - if (match.count()>10): - # If we have at least n "good matches" - # Draw bounding rectangle and cross. - img.draw_rectangle(match.rect()) - img.draw_cross(match.cx(), match.cy(), size=10) - - print(kpts2, "matched:%d dt:%d"%(match.count(), match.theta())) - # NOTE: uncomment if you want to draw the keypoints - #img.draw_keypoints(kpts2, size=KEYPOINTS_SIZE, matched=True) - - # Draw FPS - img.draw_string(0, 0, "FPS:%.2f"%(clock.fps())) diff --git a/scripts/examples/OpenMV/09-Feature-Detection/keypoints_save.py b/scripts/examples/OpenMV/09-Feature-Detection/keypoints_save.py deleted file mode 100644 index bcd1a2edf..000000000 --- a/scripts/examples/OpenMV/09-Feature-Detection/keypoints_save.py +++ /dev/null @@ -1,37 +0,0 @@ -# Keypoints descriptor example. -# This example shows how to save a keypoints descriptor to file. Show the camera an object -# and then run the script. The script will extract and save a keypoints descriptor and the image. -# You can use the keypoints_editor.py util to remove unwanted keypoints. -# -# NOTE: Please reset the camera after running this script to see the new file. -import sensor, time, image - -# Reset sensor -sensor.reset() - -# Sensor settings -sensor.set_contrast(3) -sensor.set_gainceiling(16) -sensor.set_framesize(sensor.VGA) -sensor.set_windowing((320, 240)) -sensor.set_pixformat(sensor.GRAYSCALE) - -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False, value=100) - -FILE_NAME = "desc" -img = sensor.snapshot() -# NOTE: See the docs for other arguments -# NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid. -kpts = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2) - -if (kpts == None): - raise(Exception("Couldn't find any keypoints!")) - -image.save_descriptor(kpts, "/%s.orb"%(FILE_NAME)) -img.save("/%s.pgm"%(FILE_NAME)) - -img.draw_keypoints(kpts) -sensor.snapshot() -time.sleep_ms(1000) -raise(Exception("Done! Please reset the camera")) diff --git a/scripts/examples/OpenMV/09-Feature-Detection/lbp.py b/scripts/examples/OpenMV/09-Feature-Detection/lbp.py deleted file mode 100644 index b54838313..000000000 --- a/scripts/examples/OpenMV/09-Feature-Detection/lbp.py +++ /dev/null @@ -1,53 +0,0 @@ -# Local Binary Patterns (LBP) Example -# -# This example shows off how to use the local binary pattern feature descriptor -# on your OpenMV Cam. LBP descriptors work like Freak feature descriptors. -# -# WARNING: LBP supports needs to be reworked! As of right now this feature needs -# a lot of work to be made into somethin useful. This script will reamin to show -# that the functionality exists, but, in its current state is inadequate. - -import sensor, time, image -sensor.reset() - -# Reset sensor -sensor.reset() - -# Sensor settings -sensor.set_contrast(1) -sensor.set_gainceiling(16) -sensor.set_framesize(sensor.HQVGA) -sensor.set_pixformat(sensor.GRAYSCALE) - -# Load Haar Cascade -# By default this will use all stages, lower satges is faster but less accurate. -face_cascade = image.HaarCascade("frontalface", stages=25) -print(face_cascade) - -# Skip a few frames to allow the sensor settle down -# Note: This takes more time when exec from the IDE. -for i in range(0, 30): - img = sensor.snapshot() - img.draw_string(0, 0, "Please wait...") - -d0 = None -#d0 = image.load_descriptor("/desc.lbp") -clock = time.clock() - -while (True): - clock.tick() - img = sensor.snapshot() - - objects = img.find_features(face_cascade, threshold=0.5, scale_factor=1.25) - if objects: - face = objects[0] - d1 = img.find_lbp(face) - if (d0 == None): - d0 = d1 - else: - dist = image.match_descriptor(d0, d1) - img.draw_string(0, 10, "Match %d%%"%(dist)) - - img.draw_rectangle(face) - # Draw FPS - img.draw_string(0, 0, "FPS:%.2f"%(clock.fps())) diff --git a/scripts/examples/OpenMV/09-Feature-Detection/linear_regression_fast.py b/scripts/examples/OpenMV/09-Feature-Detection/linear_regression_fast.py deleted file mode 100644 index f200e4ace..000000000 --- a/scripts/examples/OpenMV/09-Feature-Detection/linear_regression_fast.py +++ /dev/null @@ -1,43 +0,0 @@ -# Fast Linear Regression Example -# -# This example shows off how to use the get_regression() method on your OpenMV Cam -# to get the linear regression of a ROI. Using this method you can easily build -# a robot which can track lines which all point in the same general direction -# but are not actually connected. Use find_blobs() on lines that are nicely -# connected for better filtering options and control. -# -# This is called the fast linear regression because we use the least-squares -# method to fit the line. However, this method is NOT GOOD FOR ANY images that -# have a lot (or really any) outlier points which corrupt the line fit... - -THRESHOLD = (0, 100) # Grayscale threshold for dark things... -BINARY_VISIBLE = True # Does binary first so you can see what the linear regression - # is being run on... might lower FPS though. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - clock.tick() - img = sensor.snapshot().binary([THRESHOLD]) if BINARY_VISIBLE else sensor.snapshot() - - # Returns a line object similar to line objects returned by find_lines() and - # find_line_segments(). You have x1(), y1(), x2(), y2(), length(), - # theta() (rotation in degrees), rho(), and magnitude(). - # - # magnitude() represents how well the linear regression worked. It goes from - # (0, INF] where 0 is returned for a circle. The more linear the - # scene is the higher the magnitude. - line = img.get_regression([(255,255) if BINARY_VISIBLE else THRESHOLD]) - - if (line): img.draw_line(line.line(), color = 127) - print("FPS %f, mag = %s" % (clock.fps(), str(line.magnitude()) if (line) else "N/A")) - -# About negative rho values: -# -# A [theta+0:-rho] tuple is the same as [theta+180:+rho]. diff --git a/scripts/examples/OpenMV/09-Feature-Detection/linear_regression_robust.py b/scripts/examples/OpenMV/09-Feature-Detection/linear_regression_robust.py deleted file mode 100644 index 9f24c618d..000000000 --- a/scripts/examples/OpenMV/09-Feature-Detection/linear_regression_robust.py +++ /dev/null @@ -1,45 +0,0 @@ -# Robust Linear Regression Example -# -# This example shows off how to use the get_regression() method on your OpenMV Cam -# to get the linear regression of a ROI. Using this method you can easily build -# a robot which can track lines which all point in the same general direction -# but are not actually connected. Use find_blobs() on lines that are nicely -# connected for better filtering options and control. -# -# We're using the robust=True argument for get_regression() in this script which -# computes the linear regression using a much more robust algorithm... but potentially -# much slower. The robust algorithm runs in O(N^2) time on the image. So, YOU NEED -# TO LIMIT THE NUMBER OF PIXELS the robust algorithm works on or it can actually -# take seconds for the algorithm to give you a result... THRESHOLD VERY CAREFULLY! - -THRESHOLD = (0, 100) # Grayscale threshold for dark things... -BINARY_VISIBLE = True # Does binary first so you can see what the linear regression - # is being run on... might lower FPS though. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QQQVGA) # 80x60 (4,800 pixels) - O(N^2) max = 2,3040,000. -sensor.skip_frames(time = 2000) # WARNING: If you use QQVGA it may take seconds -clock = time.clock() # to process a frame sometimes. - -while(True): - clock.tick() - img = sensor.snapshot().binary([THRESHOLD]) if BINARY_VISIBLE else sensor.snapshot() - - # Returns a line object similar to line objects returned by find_lines() and - # find_line_segments(). You have x1(), y1(), x2(), y2(), length(), - # theta() (rotation in degrees), rho(), and magnitude(). - # - # magnitude() represents how well the linear regression worked. It means something - # different for the robust linear regression. In general, the larger the value the - # better... - line = img.get_regression([(255,255) if BINARY_VISIBLE else THRESHOLD], robust = True) - - if (line): img.draw_line(line.line(), color = 127) - print("FPS %f, mag = %s" % (clock.fps(), str(line.magnitude()) if (line) else "N/A")) - -# About negative rho values: -# -# A [theta+0:-rho] tuple is the same as [theta+180:+rho]. diff --git a/scripts/examples/OpenMV/09-Feature-Detection/template_matching.py b/scripts/examples/OpenMV/09-Feature-Detection/template_matching.py deleted file mode 100644 index 79d9c66a7..000000000 --- a/scripts/examples/OpenMV/09-Feature-Detection/template_matching.py +++ /dev/null @@ -1,48 +0,0 @@ -# Template Matching Example - Normalized Cross Correlation (NCC) -# -# This example shows off how to use the NCC feature of your OpenMV Cam to match -# image patches to parts of an image... expect for extremely controlled enviorments -# NCC is not all to useful. -# -# WARNING: NCC supports needs to be reworked! As of right now this feature needs -# a lot of work to be made into somethin useful. This script will reamin to show -# that the functionality exists, but, in its current state is inadequate. - -import time, sensor, image -from image import SEARCH_EX, SEARCH_DS - -# Reset sensor -sensor.reset() - -# Set sensor settings -sensor.set_contrast(1) -sensor.set_gainceiling(16) -# Max resolution for template matching with SEARCH_EX is QQVGA -sensor.set_framesize(sensor.QQVGA) -# You can set windowing to reduce the search image. -#sensor.set_windowing(((640-80)//2, (480-60)//2, 80, 60)) -sensor.set_pixformat(sensor.GRAYSCALE) - -# Load template. -# Template should be a small (eg. 32x32 pixels) grayscale image. -template = image.Image("/template.pgm") - -clock = time.clock() - -# Run template matching -while (True): - clock.tick() - img = sensor.snapshot() - - # find_template(template, threshold, [roi, step, search]) - # ROI: The region of interest tuple (x, y, w, h). - # Step: The loop step used (y+=step, x+=step) use a bigger step to make it faster. - # Search is either image.SEARCH_EX for exhaustive search or image.SEARCH_DS for diamond search - # - # Note1: ROI has to be smaller than the image and bigger than the template. - # Note2: In diamond search, step and ROI are both ignored. - r = img.find_template(template, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) - if r: - img.draw_rectangle(r) - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/16-Codes/qrcodes_with_lens_zoom.py b/scripts/examples/OpenMV/16-Codes/qrcodes_with_lens_zoom.py deleted file mode 100644 index c9798a4bf..000000000 --- a/scripts/examples/OpenMV/16-Codes/qrcodes_with_lens_zoom.py +++ /dev/null @@ -1,22 +0,0 @@ -# QRCode Example -# -# This example shows the power of the OpenMV Cam to detect QR Codes -# without needing lens correction. - -import sensor, image, time - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.VGA) -sensor.set_windowing((240, 240)) # look at center 240x240 pixels of the VGA resolution. -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must turn this off to prevent image washout... -clock = time.clock() - -while(True): - clock.tick() - img = sensor.snapshot() - for code in img.find_qrcodes(): - img.draw_rectangle(code.rect(), color = 127) - print(code) - print(clock.fps()) diff --git a/scripts/examples/OpenMV/17-Pixy-Emulation/apriltags_pixy_i2c_emulation.py b/scripts/examples/OpenMV/17-Pixy-Emulation/apriltags_pixy_i2c_emulation.py deleted file mode 100644 index 18e394fb5..000000000 --- a/scripts/examples/OpenMV/17-Pixy-Emulation/apriltags_pixy_i2c_emulation.py +++ /dev/null @@ -1,245 +0,0 @@ -# AprilTags Pixy I2C Emulation Script -# -# This script allows your OpenMV Cam to transmit AprilTag detection data like -# a Pixy (CMUcam5) tracking colors in I2C mode. This script allows you to -# easily replace a Pixy (CMUcam5) color tracking sensor with an OpenMV Cam -# AprilTag tracking sensor. Note that this only runs on the OpenMV Cam M7. -# -# P4 = SCL -# P5 = SDA -# -# P7 = Servo 1 -# P8 = Servo 2 - -# Note: The tag family is TAG36H11. Additionally, in order to for the -# signature value of a tag detection to be compatible with pixy -# interface libraries all tag ids have 8 added to them in order -# to move them in the color code signature range. Finally, tags -# are all reported as color code blocks... - -# Pixy Parameters ############################################################ - -max_blocks = 1000 -max_blocks_per_id = 1000 - -i2c_address = 0x54 - -# Pan Servo -s0_lower_limit = 1000 # Servo pulse width lower limit in microseconds. -s0_upper_limit = 2000 # Servo pulse width upper limit in microseconds. - -# Tilt Servo -s1_lower_limit = 1000 # Servo pulse width lower limit in microseconds. -s1_upper_limit = 2000 # Servo pulse width upper limit in microseconds. - -analog_out_enable = False # P6 -> Analog Out (0v - 3.3v). -analog_out_mode = 0 # 0 == x position of largest tag - 1 == y position of largest tag - -############################################################################## - -import image, math, pyb, sensor, struct, time - -# Camera Setup - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) - -# LED Setup - -red_led = pyb.LED(1) -green_led = pyb.LED(2) -blue_led = pyb.LED(3) - -red_led.off() -green_led.off() -blue_led.off() - -# DAC Setup - -dac = pyb.DAC("P6") if analog_out_enable else None - -if dac: - dac.write(0) - -# Servo Setup - -min_s0_limit = min(s0_lower_limit, s0_upper_limit) -max_s0_limit = max(s0_lower_limit, s0_upper_limit) -min_s1_limit = min(s1_lower_limit, s1_upper_limit) -max_s1_limit = max(s1_lower_limit, s1_upper_limit) - -s0_pan = pyb.Servo(1) # P7 -s1_tilt = pyb.Servo(2) # P8 - -s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center -s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center - -s0_pan_conversion_factor = (max_s0_limit - min_s0_limit) / 1000 -s1_tilt_conversion_factor = (max_s1_limit - min_s1_limit) / 1000 - -def s0_pan_position(value): - s0_pan.pulse_width(round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor))) - -def s1_tilt_position(value): - s1_tilt.pulse_width(round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor))) - -# Link Setup - -bus = pyb.I2C(2, pyb.I2C.SLAVE, addr = i2c_address) - -def write(data): - # Prepare the data to transmit first so we can do it quickly. - out_data = [] - for i in range(0, len(data), 2): - out_data.append(data[i:i+2]) - # Disable interrupts so we can send all packets without gaps. - state = pyb.disable_irq() - for i in range(len(out_data)): - max_exceptions = 10 - loop = True - while(loop): - try: - bus.send(out_data[i], timeout = 1) - loop = False - except OSError as error: - if(max_exceptions <= 0): - pyb.enable_irq(state) - return - max_exceptions -= 1 - pyb.enable_irq(state) - -def available(): - return 0 # Not implemented as there is no way for the us to be ready to receive the data. - -def read_byte(): - return 0 # Not implemented as there is no way for the us to be ready to receive the data. - -# Helper Stuff - -def checksum(data): - checksum = 0 - for i in range(0, len(data), 2): - checksum += ((data[i+1] & 0xFF) << 8) | ((data[i+0] & 0xFF) << 0) - return checksum & 0xFFFF - -def to_object_block_format(tag): - angle = int((tag.rotation() * 180) // math.pi) - temp = struct.pack(" 0) and (max_blocks_per_id > 0): # new frame - dat_buf = struct.pack(" Analog Out (0v - 3.3v). -analog_out_mode = 0 # 0 == x position of largest tag - 1 == y position of largest tag - -############################################################################## - -import image, math, pyb, sensor, struct, time - -# Camera Setup - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) - -# LED Setup - -red_led = pyb.LED(1) -green_led = pyb.LED(2) -blue_led = pyb.LED(3) - -red_led.off() -green_led.off() -blue_led.off() - -# DAC Setup - -dac = pyb.DAC("P6") if analog_out_enable else None - -if dac: - dac.write(0) - -# Servo Setup - -min_s0_limit = min(s0_lower_limit, s0_upper_limit) -max_s0_limit = max(s0_lower_limit, s0_upper_limit) -min_s1_limit = min(s1_lower_limit, s1_upper_limit) -max_s1_limit = max(s1_lower_limit, s1_upper_limit) - -s0_pan = pyb.Servo(1) # P7 -s1_tilt = pyb.Servo(2) # P8 - -s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center -s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center - -s0_pan_conversion_factor = (max_s0_limit - min_s0_limit) / 1000 -s1_tilt_conversion_factor = (max_s1_limit - min_s1_limit) / 1000 - -def s0_pan_position(value): - s0_pan.pulse_width(round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor))) - -def s1_tilt_position(value): - s1_tilt.pulse_width(round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor))) - -# Link Setup - -bus = pyb.SPI(2, pyb.SPI.SLAVE, polarity = 0, phase = 0, bits = 16) -while(True): - try: - sync_bytes = bus.recv(2, timeout = 10) - if((sync_bytes[0] == 0x00) and (sync_bytes[1] == 0x5A)): - break - except OSError as error: - pass - - bus.deinit() - bus.init(pyb.SPI.SLAVE, polarity = 0, phase = 0, bits = 16) - -def write(data): - - max_exceptions = 10 - loop = True - while(loop): - try: - bus.send(data, timeout = 10) - loop = False - except OSError as error: - if(max_exceptions <= 0): - return - max_exceptions -= 1 - -def available(): - return 0 # Not implemented as there is no way for the us to be ready to receive the data. - -def read_byte(): - return 0 # Not implemented as there is no way for the us to be ready to receive the data. - -# Helper Stuff - -def checksum(data): - checksum = 0 - for i in range(0, len(data), 2): - checksum += ((data[i+1] & 0xFF) << 8) | ((data[i+0] & 0xFF) << 0) - return checksum & 0xFFFF - -def to_object_block_format(tag): - angle = int((tag.rotation() * 180) // math.pi) - temp = struct.pack(" 0) and (max_blocks_per_id > 0): # new frame - dat_buf = struct.pack(" Analog Out (0v - 3.3v). -analog_out_mode = 0 # 0 == x position of largest tag - 1 == y position of largest tag - -############################################################################## - -import image, math, pyb, sensor, struct, time - -# Camera Setup - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) - -# LED Setup - -red_led = pyb.LED(1) -green_led = pyb.LED(2) -blue_led = pyb.LED(3) - -red_led.off() -green_led.off() -blue_led.off() - -# DAC Setup - -dac = pyb.DAC("P6") if analog_out_enable else None - -if dac: - dac.write(0) - -# Servo Setup - -min_s0_limit = min(s0_lower_limit, s0_upper_limit) -max_s0_limit = max(s0_lower_limit, s0_upper_limit) -min_s1_limit = min(s1_lower_limit, s1_upper_limit) -max_s1_limit = max(s1_lower_limit, s1_upper_limit) - -s0_pan = pyb.Servo(1) # P7 -s1_tilt = pyb.Servo(2) # P8 - -s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center -s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center - -s0_pan_conversion_factor = (max_s0_limit - min_s0_limit) / 1000 -s1_tilt_conversion_factor = (max_s1_limit - min_s1_limit) / 1000 - -def s0_pan_position(value): - s0_pan.pulse_width(round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor))) - -def s1_tilt_position(value): - s1_tilt.pulse_width(round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor))) - -# Link Setup - -uart = pyb.UART(3, uart_baudrate, timeout_char = 1000) - -def write(data): - uart.write(data) - -def available(): - return uart.any() - -def read_byte(): - return uart.readchar() - -# Helper Stuff - -def checksum(data): - checksum = 0 - for i in range(0, len(data), 2): - checksum += ((data[i+1] & 0xFF) << 8) | ((data[i+0] & 0xFF) << 0) - return checksum & 0xFFFF - -def to_object_block_format(tag): - angle = int((tag.rotation() * 180) // math.pi) - temp = struct.pack(" 0) and (max_blocks_per_id > 0): # new frame - dat_buf = struct.pack("> 8) ^ (tmp << 8) ^ (tmp << 3) ^ (tmp >> 4)) & 0xFFFF - tmp = extra ^ (output & 0xFF) - tmp = (tmp ^ (tmp << 4)) & 0xFF - output = ((output >> 8) ^ (tmp << 8) ^ (tmp << 3) ^ (tmp >> 4)) & 0xFFFF - return output - -MAV_DISTANCE_SENSOR_message_id = 132 -MAV_DISTANCE_SENSOR_min_distance = 1 # in cm -MAV_DISTANCE_SENSOR_max_distance = 10000 # in cm -MAV_DISTANCE_SENSOR_type = 0 # MAV_DISTANCE_SENSOR_LASER -MAV_DISTANCE_SENSOR_id = 0 # unused -MAV_DISTANCE_SENSOR_orientation = 25 # MAV_SENSOR_ROTATION_PITCH_270 -MAV_DISTANCE_SENSOR_covariance = 0 # unused -MAV_DISTANCE_SENSOR_extra_crc = 85 - -# http://mavlink.org/messages/common#DISTANCE_SENSOR -# https://github.com/mavlink/c_library_v1/blob/master/common/mavlink_msg_distance_sensor.h -def send_distance_sensor_packet(tag, tag_size): - global packet_sequence - temp = struct.pack(" 0.1): # Below 0.1 or so (YMMV) and the results are just noise. - print("{0:+f}r {1:+f}z {2} {3} FPS".format(rotation_change, zoom_amount, \ - displacement.response(), - clock.fps())) - else: - print(clock.fps()) diff --git a/scripts/examples/OpenMV/22-Optical-Flow/absolute-translation.py b/scripts/examples/OpenMV/22-Optical-Flow/absolute-translation.py deleted file mode 100644 index f4dd2e49f..000000000 --- a/scripts/examples/OpenMV/22-Optical-Flow/absolute-translation.py +++ /dev/null @@ -1,55 +0,0 @@ -# Absolute Optical Flow Translation -# -# This example shows off using your OpenMV Cam to measure translation -# in the X and Y direction by comparing the current and a previous -# image against each other. Note that only X and Y translation is -# handled - not rotation/scale in this mode. - -# To run this demo effectively please mount your OpenMV Cam on a steady -# base and SLOWLY translate it to the left, right, up, and down and -# watch the numbers change. Note that you can see displacement numbers -# up +- half of the hoizontal and vertical resolution. - -import sensor, image, time - -# NOTE!!! You have to use a small power of 2 resolution when using -# find_displacement(). This is because the algorithm is powered by -# something called phase correlation which does the image comparison -# using FFTs. A non-power of 2 resolution requires padding to a power -# of 2 which reduces the usefulness of the algorithm results. Please -# use a resolution like B64X64 or B64X32 (2x faster). - -# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, -# 128x64, and 128x128. If you want a resolution of 32x32 you can create -# it by doing "img.pool(2, 2)" on a 64x64 image. - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -# Take from the main frame buffer's RAM to allocate a second frame buffer. -# There's a lot more RAM in the frame buffer than in the MicroPython heap. -# However, after doing this you have a lot less RAM for some algorithms... -# So, be aware that it's a lot easier to get out of RAM issues now. -extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) -extra_fb.replace(sensor.snapshot()) - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # For this example we never update the old image to measure absolute change. - displacement = extra_fb.find_displacement(img) - - # Offset results are noisy without filtering so we drop some accuracy. - sub_pixel_x = int(displacement.x_translation() * 5) / 5.0 - sub_pixel_y = int(displacement.y_translation() * 5) / 5.0 - - if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. - print("{0:+f}x {1:+f}y {2} {3} FPS".format(sub_pixel_x, sub_pixel_y, - displacement.response(), - clock.fps())) - else: - print(clock.fps()) diff --git a/scripts/examples/OpenMV/22-Optical-Flow/differential-rotation-scale.py b/scripts/examples/OpenMV/22-Optical-Flow/differential-rotation-scale.py deleted file mode 100644 index 8e1b54c64..000000000 --- a/scripts/examples/OpenMV/22-Optical-Flow/differential-rotation-scale.py +++ /dev/null @@ -1,67 +0,0 @@ -# Differential Optical Flow Rotation/Scale -# -# This example shows off using your OpenMV Cam to measure -# rotation/scale by comparing the current and the previous -# image against each other. Note that only rotation/scale is -# handled - not X and Y translation in this mode. - -# To run this demo effectively please mount your OpenMV Cam on a steady -# base and SLOWLY rotate the camera around the lens and move the camera -# forward/backwards to see the numbers change. -# I.e. Z direction changes only. - -import sensor, image, time, math - -# NOTE!!! You have to use a small power of 2 resolution when using -# find_displacement(). This is because the algorithm is powered by -# something called phase correlation which does the image comparison -# using FFTs. A non-power of 2 resolution requires padding to a power -# of 2 which reduces the usefulness of the algorithm results. Please -# use a resolution like B64X64 or B64X32 (2x faster). - -# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, -# 128x64, and 128x128. If you want a resolution of 32x32 you can create -# it by doing "img.pool(2, 2)" on a 64x64 image. - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -# Take from the main frame buffer's RAM to allocate a second frame buffer. -# There's a lot more RAM in the frame buffer than in the MicroPython heap. -# However, after doing this you have a lot less RAM for some algorithms... -# So, be aware that it's a lot easier to get out of RAM issues now. -extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) -extra_fb.replace(sensor.snapshot()) - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works. - # Put in a z_rotation value below and you should see the r output be equal to that. - if(0): - expected_rotation = 20.0 - extra_fb.rotation_corr(z_rotation=(-expected_rotation)) - - # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works. - # Put in a zoom value below and you should see the z output be equal to that. - if(0): - expected_zoom = 0.8 - extra_fb.rotation_corr(zoom=(2.00-expected_zoom)) - - displacement = extra_fb.find_displacement(img, logpolar=True) - extra_fb.replace(img) - - # Offset results are noisy without filtering so we drop some accuracy. - rotation_change = int(math.degrees(displacement.rotation()) * 5) / 5.0 - zoom_amount = displacement.scale() - - if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. - print("{0:+f}r {1:+f}z {2} {3} FPS".format(rotation_change, zoom_amount, \ - displacement.response(), - clock.fps())) - else: - print(clock.fps()) diff --git a/scripts/examples/OpenMV/22-Optical-Flow/differential-translation.py b/scripts/examples/OpenMV/22-Optical-Flow/differential-translation.py deleted file mode 100644 index 04416cf09..000000000 --- a/scripts/examples/OpenMV/22-Optical-Flow/differential-translation.py +++ /dev/null @@ -1,55 +0,0 @@ -# Differential Optical Flow Translation -# -# This example shows off using your OpenMV Cam to measure translation -# in the X and Y direction by comparing the current and the previous -# image against each other. Note that only X and Y translation is -# handled - not rotation/scale in this mode. - -# To run this demo effectively please mount your OpenMV Cam on a steady -# base and QUICKLY translate it to the left, right, up, and down and -# watch the numbers change. Note that you can see displacement numbers -# up +- half of the hoizontal and vertical resolution. - -import sensor, image, time - -# NOTE!!! You have to use a small power of 2 resolution when using -# find_displacement(). This is because the algorithm is powered by -# something called phase correlation which does the image comparison -# using FFTs. A non-power of 2 resolution requires padding to a power -# of 2 which reduces the usefulness of the algorithm results. Please -# use a resolution like B64X64 or B64X32 (2x faster). - -# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, -# 128x64, and 128x128. If you want a resolution of 32x32 you can create -# it by doing "img.pool(2, 2)" on a 64x64 image. - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -# Take from the main frame buffer's RAM to allocate a second frame buffer. -# There's a lot more RAM in the frame buffer than in the MicroPython heap. -# However, after doing this you have a lot less RAM for some algorithms... -# So, be aware that it's a lot easier to get out of RAM issues now. -extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) -extra_fb.replace(sensor.snapshot()) - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - displacement = extra_fb.find_displacement(img) - extra_fb.replace(img) - - # Offset results are noisy without filtering so we drop some accuracy. - sub_pixel_x = int(displacement.x_translation() * 5) / 5.0 - sub_pixel_y = int(displacement.y_translation() * 5) / 5.0 - - if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. - print("{0:+f}x {1:+f}y {2} {3} FPS".format(sub_pixel_x, sub_pixel_y, - displacement.response(), - clock.fps())) - else: - print(clock.fps()) diff --git a/scripts/examples/OpenMV/22-Optical-Flow/image-patches-absolute-rotation-scale.py b/scripts/examples/OpenMV/22-Optical-Flow/image-patches-absolute-rotation-scale.py deleted file mode 100644 index 414a105e6..000000000 --- a/scripts/examples/OpenMV/22-Optical-Flow/image-patches-absolute-rotation-scale.py +++ /dev/null @@ -1,73 +0,0 @@ -# Image Patches Absolute Optical Flow Rotation/Scale -# -# This example shows off using your OpenMV Cam to measure -# rotation/scale by comparing the current and a previous -# image against each other. Note that only rotation/scale is -# handled - not X and Y translation in this mode. -# -# However, this examples goes beyond doing optical flow on the whole -# image at once. Instead it breaks up the process by working on groups -# of pixels in the image. This gives you a "new" image of results. -# -# NOTE that surfaces need to have some type of "edge" on them for the -# algorithm to work. A featureless surface produces crazy results. - -# NOTE: Unless you have a very nice test rig this example is hard to see usefulness of... - -BLOCK_W = 16 # pow2 -BLOCK_H = 16 # pow2 - -# To run this demo effectively please mount your OpenMV Cam on a steady -# base and SLOWLY rotate the camera around the lens and move the camera -# forward/backwards to see the numbers change. -# I.e. Z direction changes only. - -import sensor, image, time, math - -# NOTE!!! You have to use a small power of 2 resolution when using -# find_displacement(). This is because the algorithm is powered by -# something called phase correlation which does the image comparison -# using FFTs. A non-power of 2 resolution requires padding to a power -# of 2 which reduces the usefulness of the algorithm results. Please -# use a resolution like B128X128 or B128X64 (2x faster). - -# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, -# 128x64, and 128x128. If you want a resolution of 32x32 you can create -# it by doing "img.pool(2, 2)" on a 64x64 image. - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) -sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)... -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -# Take from the main frame buffer's RAM to allocate a second frame buffer. -# There's a lot more RAM in the frame buffer than in the MicroPython heap. -# However, after doing this you have a lot less RAM for some algorithms... -# So, be aware that it's a lot easier to get out of RAM issues now. -extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) -extra_fb.replace(sensor.snapshot()) - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - for y in range(0, sensor.height(), BLOCK_H): - for x in range(0, sensor.width(), BLOCK_W): - # For this example we never update the old image to measure absolute change. - displacement = extra_fb.find_displacement(img, logpolar=True, \ - roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) - - # Below 0.1 or so (YMMV) and the results are just noise. - if(displacement.response() > 0.1): - rotation_change = displacement.rotation() - zoom_amount = displacement.scale() - pixel_x = x + (BLOCK_W//2) + int(math.sin(rotation_change) * zoom_amount * (BLOCK_W//4)) - pixel_y = y + (BLOCK_H//2) + int(math.cos(rotation_change) * zoom_amount * (BLOCK_H//4)) - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ - color = 255) - else: - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \ - color = 0) - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/22-Optical-Flow/image-patches-absolute-translation.py b/scripts/examples/OpenMV/22-Optical-Flow/image-patches-absolute-translation.py deleted file mode 100644 index 0bfae8ca6..000000000 --- a/scripts/examples/OpenMV/22-Optical-Flow/image-patches-absolute-translation.py +++ /dev/null @@ -1,69 +0,0 @@ -# Image Patches Absolute Optical Flow Translation -# -# This example shows off using your OpenMV Cam to measure translation -# in the X and Y direction by comparing the current and a previous -# image against each other. Note that only X and Y translation is -# handled - not rotation/scale in this mode. -# -# However, this examples goes beyond doing optical flow on the whole -# image at once. Instead it breaks up the process by working on groups -# of pixels in the image. This gives you a "new" image of results. -# -# NOTE that surfaces need to have some type of "edge" on them for the -# algorithm to work. A featureless surface produces crazy results. - -BLOCK_W = 16 # pow2 -BLOCK_H = 16 # pow2 - -# To run this demo effectively please mount your OpenMV Cam on a steady -# base and SLOWLY translate it to the left, right, up, and down and -# watch the numbers change. Note that you can see displacement numbers -# up +- half of the hoizontal and vertical resolution. - -import sensor, image, time - -# NOTE!!! You have to use a small power of 2 resolution when using -# find_displacement(). This is because the algorithm is powered by -# something called phase correlation which does the image comparison -# using FFTs. A non-power of 2 resolution requires padding to a power -# of 2 which reduces the usefulness of the algorithm results. Please -# use a resolution like B128X128 or B128X64 (2x faster). - -# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, -# 128x64, and 128x128. If you want a resolution of 32x32 you can create -# it by doing "img.pool(2, 2)" on a 64x64 image. - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) -sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)... -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -# Take from the main frame buffer's RAM to allocate a second frame buffer. -# There's a lot more RAM in the frame buffer than in the MicroPython heap. -# However, after doing this you have a lot less RAM for some algorithms... -# So, be aware that it's a lot easier to get out of RAM issues now. -extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) -extra_fb.replace(sensor.snapshot()) - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - for y in range(0, sensor.height(), BLOCK_H): - for x in range(0, sensor.width(), BLOCK_W): - # For this example we never update the old image to measure absolute change. - displacement = extra_fb.find_displacement(img, \ - roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) - - # Below 0.1 or so (YMMV) and the results are just noise. - if(displacement.response() > 0.1): - pixel_x = x + (BLOCK_W//2) + int(displacement.x_translation()) - pixel_y = y + (BLOCK_H//2) + int(displacement.y_translation()) - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ - color = 255) - else: - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \ - color = 0) - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/22-Optical-Flow/image-patches-differential-rotation-scale.py b/scripts/examples/OpenMV/22-Optical-Flow/image-patches-differential-rotation-scale.py deleted file mode 100644 index bb1bc2eea..000000000 --- a/scripts/examples/OpenMV/22-Optical-Flow/image-patches-differential-rotation-scale.py +++ /dev/null @@ -1,73 +0,0 @@ -# Image Patches Differential Optical Flow Rotation/Scale -# -# This example shows off using your OpenMV Cam to measure -# rotation/scale by comparing the current and the previous -# image against each other. Note that only rotation/scale is -# handled - not X and Y translation in this mode. -# -# However, this examples goes beyond doing optical flow on the whole -# image at once. Instead it breaks up the process by working on groups -# of pixels in the image. This gives you a "new" image of results. -# -# NOTE that surfaces need to have some type of "edge" on them for the -# algorithm to work. A featureless surface produces crazy results. - -# NOTE: Unless you have a very nice test rig this example is hard to see usefulness of... - -BLOCK_W = 16 # pow2 -BLOCK_H = 16 # pow2 - -# To run this demo effectively please mount your OpenMV Cam on a steady -# base and SLOWLY rotate the camera around the lens and move the camera -# forward/backwards to see the numbers change. -# I.e. Z direction changes only. - -import sensor, image, time, math - -# NOTE!!! You have to use a small power of 2 resolution when using -# find_displacement(). This is because the algorithm is powered by -# something called phase correlation which does the image comparison -# using FFTs. A non-power of 2 resolution requires padding to a power -# of 2 which reduces the usefulness of the algorithm results. Please -# use a resolution like B128X128 or B128X64 (2x faster). - -# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, -# 128x64, and 128x128. If you want a resolution of 32x32 you can create -# it by doing "img.pool(2, 2)" on a 64x64 image. - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) -sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)... -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -# Take from the main frame buffer's RAM to allocate a second frame buffer. -# There's a lot more RAM in the frame buffer than in the MicroPython heap. -# However, after doing this you have a lot less RAM for some algorithms... -# So, be aware that it's a lot easier to get out of RAM issues now. -extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) -extra_fb.replace(sensor.snapshot()) - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - for y in range(0, sensor.height(), BLOCK_H): - for x in range(0, sensor.width(), BLOCK_W): - displacement = extra_fb.find_displacement(img, logpolar=True, \ - roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) - - # Below 0.1 or so (YMMV) and the results are just noise. - if(displacement.response() > 0.1): - rotation_change = displacement.rotation() - zoom_amount = 1.0 + displacement.scale() - pixel_x = x + (BLOCK_W//2) + int(math.sin(rotation_change) * zoom_amount * (BLOCK_W//4)) - pixel_y = y + (BLOCK_H//2) + int(math.cos(rotation_change) * zoom_amount * (BLOCK_H//4)) - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ - color = 255) - else: - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \ - color = 0) - extra_fb.replace(img) - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/22-Optical-Flow/image-patches-differential-translation.py b/scripts/examples/OpenMV/22-Optical-Flow/image-patches-differential-translation.py deleted file mode 100644 index 0a87c42f3..000000000 --- a/scripts/examples/OpenMV/22-Optical-Flow/image-patches-differential-translation.py +++ /dev/null @@ -1,69 +0,0 @@ -# Image Patches Differential Optical Flow Translation -# -# This example shows off using your OpenMV Cam to measure translation -# in the X and Y direction by comparing the current and the previous -# image against each other. Note that only X and Y translation is -# handled - not rotation/scale in this mode. -# -# However, this examples goes beyond doing optical flow on the whole -# image at once. Instead it breaks up the process by working on groups -# of pixels in the image. This gives you a "new" image of results. -# -# NOTE that surfaces need to have some type of "edge" on them for the -# algorithm to work. A featureless surface produces crazy results. - -BLOCK_W = 16 # pow2 -BLOCK_H = 16 # pow2 - -# To run this demo effectively please mount your OpenMV Cam on a steady -# base and SLOWLY translate it to the left, right, up, and down and -# watch the numbers change. Note that you can see displacement numbers -# up +- half of the hoizontal and vertical resolution. - -import sensor, image, time - -# NOTE!!! You have to use a small power of 2 resolution when using -# find_displacement(). This is because the algorithm is powered by -# something called phase correlation which does the image comparison -# using FFTs. A non-power of 2 resolution requires padding to a power -# of 2 which reduces the usefulness of the algorithm results. Please -# use a resolution like B128X128 or B128X64 (2x faster). - -# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, -# 128x64, and 128x128. If you want a resolution of 32x32 you can create -# it by doing "img.pool(2, 2)" on a 64x64 image. - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) -sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)... -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -# Take from the main frame buffer's RAM to allocate a second frame buffer. -# There's a lot more RAM in the frame buffer than in the MicroPython heap. -# However, after doing this you have a lot less RAM for some algorithms... -# So, be aware that it's a lot easier to get out of RAM issues now. -extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) -extra_fb.replace(sensor.snapshot()) - -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. - - for y in range(0, sensor.height(), BLOCK_H): - for x in range(0, sensor.width(), BLOCK_W): - displacement = extra_fb.find_displacement(img, \ - roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) - - # Below 0.1 or so (YMMV) and the results are just noise. - if(displacement.response() > 0.1): - pixel_x = x + (BLOCK_W//2) + int(displacement.x_translation()) - pixel_y = y + (BLOCK_H//2) + int(displacement.y_translation()) - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ - color = 255) - else: - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \ - color = 0) - extra_fb.replace(img) - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/24-External-Sensors/I2C_Lidar_Lite_V3_example_code.py b/scripts/examples/OpenMV/24-External-Sensors/I2C_Lidar_Lite_V3_example_code.py deleted file mode 100644 index ac9cee166..000000000 --- a/scripts/examples/OpenMV/24-External-Sensors/I2C_Lidar_Lite_V3_example_code.py +++ /dev/null @@ -1,63 +0,0 @@ -# OpenMV M7 I2C interface with Garmin Lidar Lite V3 - By: Grant Phillips - Sun Apr 8 2018 - - -# Returns a basic distance reading from the lidar in cm for the target point and prints to console -# Uses default lidar settings. For more advanced settings, see the I2C commands in the manual: -# https://static.garmin.com/pumac/LIDAR_Lite_v3_Operation_Manual_and_Technical_Specifications.pdf - -# I2C Control of LIDAR Lite V3 -# 1. Write 0x04 to register 0x00 -# 2. Read register 0x01. Repeat until bit 0 (LSB) goes low. -# 3. Read two bytes from 0x8f (high byte 0x0f then low byte 0x10) to obtain 16 bit measurement in cm - -# HARDWARE CONNECTIONS: -# Connect the lidar SCL line (green) to I2C 2 SCL on openMV (Pin 4) -# Connect the lidar SDA line (blue) to I2C 2 SDA on openMV (pin 5) -# 680uF filter capacitor in parallel with the lidar -# 10k pullup resistors on the SCL and SDA lines to +5Vdc - - -import pyb -from pyb import I2C - - -lidarReady = bytearray([0xff]) # holds the returned data for ready check -lidarReadyCheck = bytes([1]) # to compare bit 0 of lidarReady - -startBuf = bytearray([0x00,0x04]) # step 1 address and data -readyBuf = bytearray([0x01]) # step 2 address for readiness check -distBuf = bytearray([0x8f]) # step 3 address for distance reading -distance = -1 # variable for distance reading - -# I2C setup -Lidar=I2C(2,I2C.MASTER) # initialise I2C 2 bus in master mode - - -while(True): - distance = -1 # reset to -1 so we know when we get a real reading - - try: # handles errors thrown up if we have an I2C error - # Step 1 Write 0x04 to register 0x00 - Lidar.send(startBuf,0x62) # this is making it read (laser visible) - - # Step 2 Read register 0x01 and wait for bit 0 to go low - while (lidarReady[0] & readyBuf[0]): - Lidar.send(readyBuf,0x62) - lidarReady=Lidar.recv(1,0x62) - pyb.delay(50) # This seems to help reduce errors on the I2C bus - lidarReady=bytearray([0xff]) # reset the ready check data for next reading - - # Step 3 Read the distance measurement from 0x8f (0x0f and 0x10) - Lidar.send(distBuf,0x62) - dist=Lidar.recv(2,0x62) - distance=dist[0] - distance<<=8 # move 2 bytes into a 16 bit int - distance|=dist[1] - pyb.delay(100) # allow time between readings, can go faster but more errors - - except OSError: # reninitialise i2c bus if error - Lidar.init(I2C.MASTER) - print("error, reinitialising") - - if distance > -1: - print("Distance:", distance, "cm") diff --git a/scripts/examples/OpenMV/25-Machine-Learning/nn_stm32cubeai.py b/scripts/examples/OpenMV/25-Machine-Learning/nn_stm32cubeai.py deleted file mode 100644 index 542aaf458..000000000 --- a/scripts/examples/OpenMV/25-Machine-Learning/nn_stm32cubeai.py +++ /dev/null @@ -1,38 +0,0 @@ -# STM32 CUBE.AI on OpenMV MNIST Example -# See https://github.com/openmv/openmv/blob/master/src/stm32cubeai/README.MD - -import sensor, image, time, nn_st - -sensor.reset() # Reset and initialize the sensor. -sensor.set_contrast(3) -sensor.set_brightness(0) -sensor.set_auto_gain(True) -sensor.set_auto_exposure(True) -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to Grayscale -sensor.set_framesize(sensor.QQQVGA) # Set frame size to 80x60 -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -# [CUBE.AI] Initialize the network -net = nn_st.loadnnst('network') - -nn_input_sz = 28 # The NN input is 28x28 - -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - - # Crop in the middle (avoids vignetting) - img.crop((img.width()//2-nn_input_sz//2, - img.height()//2-nn_input_sz//2, - nn_input_sz, - nn_input_sz)) - - # Binarize the image - img.midpoint(2, bias=0.5, threshold=True, offset=5, invert=True) - - # [CUBE.AI] Run the inference - out = net.predict(img) - print('Network argmax output: {}'.format( out.index(max(out)) )) - img.draw_string(0, 0, str(out.index(max(out)))) - print('FPS {}'.format(clock.fps())) # Note: OpenMV Cam runs about half as fast when connected diff --git a/scripts/examples/OpenMV/25-Machine-Learning/tf_object_detection.py b/scripts/examples/OpenMV/25-Machine-Learning/tf_object_detection.py deleted file mode 100644 index 48a2254ba..000000000 --- a/scripts/examples/OpenMV/25-Machine-Learning/tf_object_detection.py +++ /dev/null @@ -1,51 +0,0 @@ -# TensorFlow Lite Object Detection Example -# -# This example shows off object detection. Object detect is much more powerful than -# object classification. It can locate multiple objects in the image. - -import sensor, image, time, os, tf - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.set_windowing((240, 240)) # Set 240x240 window. -sensor.skip_frames(time=2000) # Let the camera adjust. - -net = tf.load('', load_to_fb=True) -labels = [] - -try: # Load labels if they exist - labels = [line.rstrip('\n') for line in open("labels.txt")] -except: - pass - -colors = [ # Add more colors if you are detecting more than 7 types of classes at once. - (255, 0, 0), - ( 0, 255, 0), - (255, 255, 0), - ( 0, 0, 255), - (255, 0, 255), - ( 0, 255, 255), - (255, 255, 255), -] - -clock = time.clock() -while(True): - clock.tick() - - img = sensor.snapshot() - - # detect() segments an object using the provided segmentation model. This produces mutliple - # grayscale images per object class that we are trying to detect. detect() then runs - # find_blobs() internally on the segmented images to find all blob locations and then returns - # the bound boxes of all blobs found per object class. So, detect() returns a list of lists of - # classification objects and the respective confidence level. - - for i, detection_list in enumerate(net.detect(img, thresholds=[(128, 255)])): - if (i < len(labels)): - print("********** %s **********" % labels[i]) - for d in detection_list: - print(d) - img.draw_rectangle(d.rect(), color=colors[i]) - - print(clock.fps(), "fps", end="\n\n") diff --git a/scripts/examples/OpenMV/25-Machine-Learning/tf_person_detection_search_just_center.py b/scripts/examples/OpenMV/25-Machine-Learning/tf_person_detection_search_just_center.py deleted file mode 100644 index b1531f58b..000000000 --- a/scripts/examples/OpenMV/25-Machine-Learning/tf_person_detection_search_just_center.py +++ /dev/null @@ -1,48 +0,0 @@ -# TensorFlow Lite Person Dection Example -# -# Google's Person Detection Model detects if a person is in view. -# -# In this example we slide the detector window over the image and get a list -# of activations. Note that use a CNN with a sliding window is extremely compute -# expensive so for an exhaustive search do not expect the CNN to be real-time. - -import sensor, image, time, os, tf - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.set_windowing((240, 240)) # Set 240x240 window. -sensor.skip_frames(time=2000) # Let the camera adjust. - -# Load the built-in person detection network (the network is in your OpenMV Cam's firmware). -labels, net = tf.load_builtin_model('person_detection') - -clock = time.clock() -while(True): - clock.tick() - - img = sensor.snapshot() - - # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not - # specified). A classification score output vector will be generated for each location. At each scale the - # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. - # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note - # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after - # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) - # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. - # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... - - # Setting x_overlap=-1 forces the window to stay centered in the ROI in the x direction always. If - # y_overlap is not -1 the method will search in all vertical positions. - - # Setting y_overlap=-1 forces the window to stay centered in the ROI in the y direction always. If - # x_overlap is not -1 the method will serach in all horizontal positions. - - # default settings just do one detection... change them to search the image... - for obj in net.classify(img, min_scale=0.5, scale_mul=0.5, x_overlap=-1, y_overlap=-1): - print("**********\nDetections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) - for i in range(len(obj.output())): - print("%s = %f" % (labels[i], obj.output()[i])) - img.draw_rectangle(obj.rect()) - img.draw_string(obj.x()+3, obj.y()-1, labels[obj.output().index(max(obj.output()))], mono_space = False) - print(clock.fps(), "fps") diff --git a/scripts/examples/OpenMV/25-Machine-Learning/tf_person_detection_search_whole_window.py b/scripts/examples/OpenMV/25-Machine-Learning/tf_person_detection_search_whole_window.py deleted file mode 100644 index ff05e03d6..000000000 --- a/scripts/examples/OpenMV/25-Machine-Learning/tf_person_detection_search_whole_window.py +++ /dev/null @@ -1,42 +0,0 @@ -# TensorFlow Lite Person Dection Example -# -# Google's Person Detection Model detects if a person is in view. -# -# In this example we slide the detector window over the image and get a list -# of activations. Note that use a CNN with a sliding window is extremely compute -# expensive so for an exhaustive search do not expect the CNN to be real-time. - -import sensor, image, time, os, tf - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.set_windowing((240, 240)) # Set 240x240 window. -sensor.skip_frames(time=2000) # Let the camera adjust. - -# Load the built-in person detection network (the network is in your OpenMV Cam's firmware). -labels, net = tf.load_builtin_model('person_detection') - -clock = time.clock() -while(True): - clock.tick() - - img = sensor.snapshot() - - # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not - # specified). A classification score output vector will be generated for each location. At each scale the - # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. - # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note - # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after - # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) - # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. - # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... - - # default settings just do one detection... change them to search the image... - for obj in net.classify(img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0): - print("**********\nDetections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) - for i in range(len(obj.output())): - print("%s = %f" % (labels[i], obj.output()[i])) - img.draw_rectangle(obj.rect()) - img.draw_string(obj.x()+3, obj.y()-1, labels[obj.output().index(max(obj.output()))], mono_space = False) - print(clock.fps(), "fps") diff --git a/scripts/examples/OpenMV/28-Global-Shutter/high_fps.py b/scripts/examples/OpenMV/28-Global-Shutter/high_fps.py deleted file mode 100644 index d2cf12ded..000000000 --- a/scripts/examples/OpenMV/28-Global-Shutter/high_fps.py +++ /dev/null @@ -1,29 +0,0 @@ -# High FPS Example -# -# This example shows off how to make the frame rate of the global shutter camera extremely -# high. To do so you need to set the resolution to a low value such that pixel binning is -# activated on the camera and then reduce the maximum exposure time. -# -# When the resolution is 320x240 or less the camera reads out pixels 2x faster. When the -# resolution is 160x120 or less the camera reads out pixels 4x faster. This happens due -# to pixel binning which is automatically activated for you to increase the readout speed. -# -# While the readout speed may increase the camera must still expose the image for the request -# time so you will not get the maximum readout speed unless you reduce the exposure time too. -# This results in a dark image however so YOU NEED A LOT of lighting for high FPS. - -import sensor, image, time - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120) - make smaller to go faster -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -sensor.set_auto_exposure(True, exposure_us=5000) # make smaller to go faster - -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected - # to the IDE. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/28-Global-Shutter/triggered_mode.py b/scripts/examples/OpenMV/28-Global-Shutter/triggered_mode.py deleted file mode 100644 index 08e78c916..000000000 --- a/scripts/examples/OpenMV/28-Global-Shutter/triggered_mode.py +++ /dev/null @@ -1,29 +0,0 @@ -# Global Shutter Triggered Mode Example -# -# This example shows off setting the global shutter camera into triggered mode. In triggered mode -# snapshot() controls EXACTLY when integration of the camera pixels start such that you can sync -# taking pictures to some external movement. Since the camera captures all pixels at the same time -# (as it is a global shutter camera versus a rolling shutter camera) movement in the image will -# only be captured for the integration time and not the integration time multipled by the number -# of rows in the image. Additionally, sensor noise is reduced in triggered mode as the camera will -# not read out rows until after exposing which results in a higher quality image. -# -# That said, your maximum frame rate will be reduced by 2 to 3 as frames are no longer generated -# continously by the camera and because you have to wait for the integration to finish before -# readout of the frame. - -import sensor, image, time - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE -sensor.set_framesize(sensor.VGA) # Set frame size to VGA (640x480) -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -sensor.ioctl(sensor.IOCTL_SET_TRIGGERED_MODE, True) - -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected - # to the IDE. The FPS should increase once disconnected. diff --git a/scripts/examples/OpenMV/32-modbus/modbus_apriltag.py b/scripts/examples/OpenMV/32-modbus/modbus_apriltag.py deleted file mode 100644 index c0e51b501..000000000 --- a/scripts/examples/OpenMV/32-modbus/modbus_apriltag.py +++ /dev/null @@ -1,39 +0,0 @@ -import sensor, image -import time -from pyb import UART -from modbus import ModbusRTU - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QQVGA) # we run out of memory if the resolution is much bigger... - -uart = UART(3,115200, parity=None, stop=2, timeout=1, timeout_char=4) -modbus = ModbusRTU(uart, register_num=9999) - -sensor.skip_frames(time = 2000) -clock = time.clock() - -while(True): - if modbus.any(): - modbus.handle(debug=True) - else: - clock.tick() - img = sensor.snapshot() - tags = img.find_apriltags() # defaults to TAG36H11 without "families". - modbus.clear() - modbus.REGISTER[0] = len(tags) - if tags: - print(tags) - i = 1 - for tag in tags: - img.draw_rectangle(tag.rect(), color = 127) - modbus.REGISTER[i] = tag.family() - i += 1 - modbus.REGISTER[i] = tag.id() - i += 1 - modbus.REGISTER[i] = tag.cx() - i += 1 - modbus.REGISTER[i] = tag.cy() - i += 1 - #print(modbus.REGISTER[0:15]) - #print(clock.fps()) diff --git a/scripts/examples/OpenMV/32-modbus/modbus_rtu_slave.py b/scripts/examples/OpenMV/32-modbus/modbus_rtu_slave.py deleted file mode 100644 index b3ab0937e..000000000 --- a/scripts/examples/OpenMV/32-modbus/modbus_rtu_slave.py +++ /dev/null @@ -1,17 +0,0 @@ -import time -from pyb import UART -from modbus import ModbusRTU -uart = UART(3,115200, parity=None, stop=2, timeout=1, timeout_char=4) -modbus = ModbusRTU(uart, register_num=9999) - -while(True): - if modbus.any(): - modbus.handle(debug=True) - else: - time.sleep_ms(100) - modbus.REGISTER[0] = 1000 - modbus.REGISTER[1] += 1 - modbus.REGISTER[3] += 3 - #print(modbus.REGISTER[10:15]) - # image processing in there - diff --git a/scripts/examples/OpenMV/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py b/scripts/examples/OpenMV/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py deleted file mode 100644 index 6dd962dd7..000000000 --- a/scripts/examples/OpenMV/34-Remote-Control/image_transfer_jpg_as_the_remote_device_for_your_computer.py +++ /dev/null @@ -1,87 +0,0 @@ -# Image Transfer - As The Remote Device -# -# This script is meant to talk to the "image_transfer_jpg_as_the_controller_device.py" on your computer. -# -# This script shows off how to transfer the frame buffer to your computer as a jpeg image. - -import image, network, omv, rpc, sensor, struct - -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) - -# Turn off the frame buffer connection to the IDE from the OpenMV Cam side. -# -# This needs to be done when manually compressing jpeg images at higher quality -# so that the OpenMV Cam does not try to stream them to the IDE using a fall back -# mechanism if the JPEG image is too large to fit in the IDE JPEG frame buffer on the OpenMV Cam. - -omv.disable_fb(True) - -# The RPC library above is installed on your OpenMV Cam and provides mutliple classes for -# allowing your OpenMV Cam to be controlled over USB or LAN/WLAN. - -################################################################ -# Choose the interface you wish to control your OpenMV Cam over. -################################################################ - -# Uncomment the below line to setup your OpenMV Cam for control over a USB VCP. -# -interface = rpc.rpc_usb_vcp_slave() - -# Uncomment the below line to setup your OpenMV Cam for control over the lan. -# -# network_if = network.LAN() -# network_if.active(True) -# network_if.ifconfig('dhcp') -# -# interface = rpc.rpc_network_slave(network_if) - -# Uncomment the below line to setup your OpenMV Cam for control over the wlan. -# -# network_if = network.WLAN(network.STA_IF) -# network_if.active(True) -# network_if.connect('your-ssid', 'your-password') -# -# interface = rpc.rpc_network_slave(network_if) - -################################################################ -# Call Backs -################################################################ - -# When called sets the pixformat and framesize, takes a snapshot -# and then returns the frame buffer jpg size to store the image in. -# -# data is a pixformat string and framesize string. -def jpeg_image_snapshot(data): - pixformat, framesize = bytes(data).decode().split(",") - sensor.set_pixformat(eval(pixformat)) - sensor.set_framesize(eval(framesize)) - img = sensor.snapshot().compress(quality=90) - return struct.pack(" 0: print("+X Limit Reached ", end="") - if y_error < 0: print("-Y Limit Reached ", end="") - if y_error > 0: print("+Y Limit Reached ", end="") - - center_on_blob(most_dense_blob, TRACKING_RESOLUTION) - - # This loop will track the blob at a much higher readout speed and lower resolution. - while(True): - clock.tick() - img = sensor.snapshot() - - # Find the blob in the lower resolution image. - blobs = img.find_blobs(TRACKING_THRESHOLDS, - area_threshold=TRACKING_AREA_THRESHOLD, - pixels_threshold=TRACKING_PIXEL_THRESHOLD) - - # If we loose the blob then we need to find a new one. - if not len(blobs): - # Reset resolution. - sensor.set_framesize(SEARCHING_RESOLUTION) - sensor.ioctl(sensor.IOCTL_SET_READOUT_WINDOW, (sensor_w, sensor_h)) - break - - # Narrow down the blob list and highlight the blob. - most_dense_blob = max(blobs, key = lambda x: x.density()) - img.draw_rectangle(most_dense_blob.rect()) - - print(clock.fps(), "BLOB cx:%d, cy:%d" % get_mapped_centroid(most_dense_blob)) - - x_diff = most_dense_blob.cx() - (sensor.width() / 2.0) - y_diff = most_dense_blob.cy() - (sensor.height() / 2.0) - - w_threshold = (sensor.width() / 2.0) * TRACKING_EDGE_TOLERANCE - h_threshold = (sensor.height() / 2.0) * TRACKING_EDGE_TOLERANCE - - # Re-center on the blob if it starts going out of view (costs FPS). - if abs(x_diff) > w_threshold or abs(y_diff) > h_threshold: - center_on_blob(most_dense_blob, TRACKING_RESOLUTION) - - print(clock.fps()) diff --git a/scripts/examples/OpenMV/35-Readout-Control/apriltag_tracking.py b/scripts/examples/OpenMV/35-Readout-Control/apriltag_tracking.py deleted file mode 100644 index c465d1d92..000000000 --- a/scripts/examples/OpenMV/35-Readout-Control/apriltag_tracking.py +++ /dev/null @@ -1,151 +0,0 @@ -# This example shows off how to use readout window control to readout a small part of a camera -# sensor pixel array at a very high speed and move that readout window around. - -# This example is was designed and tested on the OpenMV Cam H7 Plus using the OV5640 sensor. - -import sensor, image, time - -# This example script forces the exposure to a constant value for the whole time. However, you may -# wish to dynamically adjust the exposure when the readout window shrinks to a small size. -EXPOSURE_MICROSECONDS = 20000 - -SEARCHING_RESOLUTION = sensor.QVGA -TRACKING_RESOLUTION = sensor.QQVGA # or sensor.QQQVGA - -TRACKING_LOW_RATIO_THRESHOLD = 0.2 # Go to a smaller readout window when tag side vs res is smaller. -TRACKING_HIGH_RATIO_THRESHOLD = 0.8 # Go to a larger readout window when tag side vs res is larger. - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE -sensor.set_framesize(SEARCHING_RESOLUTION) -sensor.skip_frames(time = 1000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - -sensor.set_auto_gain(False) # Turn off as it will oscillate. -sensor.set_auto_exposure(False, exposure_us=EXPOSURE_MICROSECONDS) -sensor.skip_frames(time = 1000) - -# sensor_w and sensor_h are the image sensor raw pixels w/h (x/y are 0 initially). -x, y, sensor_w, sensor_h = sensor.ioctl(sensor.IOCTL_GET_READOUT_WINDOW) - -while(True): - clock.tick() - img = sensor.snapshot() - - # Tracks TAG36H11 by default. - tags = img.find_apriltags() - - if len(tags): - best_tag = max(tags, key = lambda x: x.decision_margin()) - img.draw_rectangle(best_tag.rect()) - - # This needs to be less than the sensor output at default so we can move it around. - readout_window_w = ((sensor_w // sensor.width()) * sensor.width()) / 2 - readout_window_h = ((sensor_h // sensor.height()) * sensor.height()) / 2 - - def get_mapped_centroid(t): - # By default the readout window is set the whole sensor pixel array with x/y==0. - # The resolution you see if produced by taking pixels from the readout window on - # the camera. The x/y location is relative to the sensor center. - x, y, w, h = sensor.ioctl(sensor.IOCTL_GET_READOUT_WINDOW) - - # The camera driver will try to scale to fit whatever resolution you pass to max - # width/height that fit on the sensor while keeping the aspect ratio. - ratio = min(w / float(sensor.width()), h / float(sensor.height())) - - # Reference cx() to the center of the viewport and then scale to the readout. - mapped_cx = (t.cx() - (sensor.width() / 2.0)) * ratio - # Since we are keeping the aspect ratio there might be an offset in x. - mapped_cx += (w - (sensor.width() * ratio)) / 2.0 - # Add in our displacement from the sensor center - mapped_cx += x + (sensor_w / 2.0) - - # Reference cy() to the center of the viewport and then scale to the readout. - mapped_cy = (t.cy() - (sensor.height() / 2.0)) * ratio - # Since we are keeping the aspect ratio there might be an offset in y. - mapped_cy += (h - (sensor.height() * ratio)) / 2.0 - # Add in our displacement from the sensor center - mapped_cy += y + (sensor_h / 2.0) - - return (mapped_cx, mapped_cy) # X/Y location on the sensor array. - - def center_on_tag(t, res): - global readout_window_w - global readout_window_h - mapped_cx, mapped_cy = get_mapped_centroid(t) - - # Switch to the res (if res was unchanged this does nothing). - sensor.set_framesize(res) - - # Construct readout window. x/y are offsets from the center. - x = int(mapped_cx - (sensor_w / 2.0)) - y = int(mapped_cy - (sensor_h / 2.0)) - w = int(readout_window_w) - h = int(readout_window_h) - - # Focus on the centroid. - sensor.ioctl(sensor.IOCTL_SET_READOUT_WINDOW, (x, y, w, h)) - - # See if we are hitting the edge. - new_x, new_y, w, h = sensor.ioctl(sensor.IOCTL_GET_READOUT_WINDOW) - - # You can use these error values to drive servos to move the camera if you want. - x_error = x - new_x - y_error = y - new_y - - if x_error < 0: print("-X Limit Reached ", end="") - if x_error > 0: print("+X Limit Reached ", end="") - if y_error < 0: print("-Y Limit Reached ", end="") - if y_error > 0: print("+Y Limit Reached ", end="") - - center_on_tag(best_tag, TRACKING_RESOLUTION) - - loss_count = 0 - - # This loop will track the tag at a much higher readout speed and lower resolution. - while(True): - clock.tick() - img = sensor.snapshot() - - # Tracks TAG36H11 by default. - tags = img.find_apriltags() - - # If we loose the tag then we need to find a new one. - if not len(tags): - # Handle a few bad frames due to tag flicker. - if (loss_count < 2): - loss_count += 1 - continue - # Reset resolution. - sensor.set_framesize(SEARCHING_RESOLUTION) - sensor.ioctl(sensor.IOCTL_SET_READOUT_WINDOW, (sensor_w, sensor_h)) - break - - loss_count = 0 - - # Narrow down the blob list and highlight the blob. - best_tag = max(tags, key = lambda x: x.decision_margin()) - img.draw_rectangle(best_tag.rect()) - - print(clock.fps(), "TAG cx:%d, cy:%d" % get_mapped_centroid(best_tag)) - - w_ratio = best_tag.w() / sensor.width() - h_ratio = best_tag.h() / sensor.height() - - # Shrink the tracking window until the tag fits. - while (w_ratio < TRACKING_LOW_RATIO_THRESHOLD) or (h_ratio < TRACKING_LOW_RATIO_THRESHOLD): - readout_window_w /= 2 - readout_window_h /= 2 - w_ratio *= 2 - h_ratio *= 2 - - # Enlarge the tracking window until the tag fits. - while (TRACKING_HIGH_RATIO_THRESHOLD < w_ratio) or (TRACKING_HIGH_RATIO_THRESHOLD < h_ratio): - readout_window_w *= 2 - readout_window_h *= 2 - w_ratio /= 2 - h_ratio /= 2 - - center_on_tag(best_tag, TRACKING_RESOLUTION) - - print(clock.fps())