From 53fa4430becb004426c73c1799e08f3d9e3d31f5 Mon Sep 17 00:00:00 2001 From: iabdalkader Date: Wed, 5 Jul 2023 16:27:06 +0200 Subject: [PATCH 1/3] examples: Fix imports. --- scripts/examples/00-HelloWorld/helloworld.py | 3 ++- scripts/examples/01-Camera/00-Snapshot/emboss_snapshot.py | 3 ++- scripts/examples/01-Camera/00-Snapshot/snapshot.py | 3 ++- .../01-Camera/00-Snapshot/snapshot_on_face_detection.py | 4 +++- .../01-Camera/00-Snapshot/snapshot_on_movement.py | 4 +++- .../examples/01-Camera/00-Snapshot/time_lapse_photos.py | 5 ++++- scripts/examples/01-Camera/01-Video-Recording/gif.py | 5 ++++- .../01-Camera/01-Video-Recording/gif_on_face_detection.py | 6 +++++- .../01-Camera/01-Video-Recording/gif_on_movement.py | 6 +++++- .../01-Camera/01-Video-Recording/imageio_memory.py | 4 +++- .../examples/01-Camera/01-Video-Recording/imageio_read.py | 4 +++- .../01-Camera/01-Video-Recording/imageio_write.py | 5 ++++- scripts/examples/01-Camera/01-Video-Recording/mjpeg.py | 5 ++++- .../01-Video-Recording/mjpeg_on_face_detection.py | 6 +++++- .../01-Camera/01-Video-Recording/mjpeg_on_movement.py | 6 +++++- .../01-Camera/02-Optical-Flow/absolute-rotation-scale.py | 4 +++- .../01-Camera/02-Optical-Flow/absolute-translation.py | 3 ++- .../02-Optical-Flow/differential-rotation-scale.py | 4 +++- .../01-Camera/02-Optical-Flow/differential-translation.py | 3 ++- .../image-patches-absolute-rotation-scale.py | 4 +++- .../02-Optical-Flow/image-patches-absolute-translation.py | 3 ++- .../image-patches-differential-rotation-scale.py | 4 +++- .../image-patches-differential-translation.py | 3 ++- .../examples/01-Camera/03-Event-Cameras/frogeye2020.py | 4 +++- .../03-Event-Cameras/frogeye2020_with_tracking.py | 4 +++- scripts/examples/01-Camera/04-Global-Shutter/high_fps.py | 3 ++- .../01-Camera/04-Global-Shutter/triggered_mode.py | 3 ++- .../05-FLIR-Lepton/lepton_get_object_high_temp.py | 3 ++- .../01-Camera/05-FLIR-Lepton/lepton_get_object_temp.py | 3 ++- .../05-FLIR-Lepton/lepton_get_object_temp_color.py | 3 ++- .../05-FLIR-Lepton/lepton_get_object_temp_color_lcd.py | 4 +++- .../05-FLIR-Lepton/lepton_get_object_temp_lcd.py | 4 +++- .../lepton_hotspot_grayscale_color_tracking.py | 3 ++- .../lepton_hotspot_grayscale_color_tracking_lcd.py | 4 +++- .../lepton_hotspot_rgb565_color_tracking.py | 3 ++- .../lepton_hotspot_rgb565_color_tracking_lcd.py | 4 +++- ...lepton_target_temp_hotspot_grayscale_color_tracking.py | 3 ++- .../lepton_target_temp_hotspot_rgb565_color_tracking.py | 3 ++- .../examples/01-Camera/06-Time-of-Flight/tof_camera.py | 4 +++- .../examples/01-Camera/06-Time-of-Flight/tof_overlay.py | 5 ++++- .../07-Sensor-Control/sensor_auto_gain_control.py | 3 ++- .../07-Sensor-Control/sensor_exposure_control.py | 3 ++- .../07-Sensor-Control/sensor_horizontal_mirror.py | 3 ++- .../07-Sensor-Control/sensor_manual_whitebal_control.py | 3 ++- .../01-Camera/07-Sensor-Control/sensor_vertical_flip.py | 3 ++- .../07-Sensor-Control/sesnor_manual_gain_control.py | 3 ++- .../08-Readout-Control/100_fps_ir_led_tracking.py | 3 ++- .../01-Camera/08-Readout-Control/apriltag_tracking.py | 3 ++- .../02-Image-Processing/00-Drawing/arrow_drawing.py | 4 +++- .../02-Image-Processing/00-Drawing/circle_drawing.py | 4 +++- .../examples/02-Image-Processing/00-Drawing/copy2fb.py | 4 +++- .../02-Image-Processing/00-Drawing/cross_drawing.py | 4 +++- .../02-Image-Processing/00-Drawing/ellipse_drawing.py | 4 +++- .../examples/02-Image-Processing/00-Drawing/flood_fill.py | 3 ++- .../02-Image-Processing/00-Drawing/image_drawing.py | 4 +++- .../00-Drawing/image_drawing_advanced.py | 5 ++++- .../00-Drawing/image_drawing_alpha_blending_test.py | 4 +++- .../image_drawing_alpha_blending_with_color_table_test.py | 4 +++- .../00-Drawing/image_drawing_alpha_table_test.py | 4 +++- .../image_drawing_alpha_table_with_color_table_test.py | 4 +++- .../00-Drawing/image_drawing_scale_down_test.py | 4 +++- .../00-Drawing/image_drawing_scale_up_test.py | 4 +++- .../00-Drawing/image_drawing_with_custom_palette.py | 5 ++++- .../02-Image-Processing/00-Drawing/keypoints_drawing.py | 4 +++- .../02-Image-Processing/00-Drawing/line_drawing.py | 4 +++- .../02-Image-Processing/00-Drawing/rectangle_drawing.py | 4 +++- .../02-Image-Processing/00-Drawing/text_drawing.py | 4 +++- .../01-Image-Filters/adaptive_histogram_equalization.py | 3 ++- .../02-Image-Processing/01-Image-Filters/blur_filter.py | 3 ++- .../01-Image-Filters/cartoon_filter.py | 3 ++- .../01-Image-Filters/color_bilateral_filter.py | 3 ++- .../01-Image-Filters/color_binary_filter.py | 3 ++- .../01-Image-Filters/color_light_removal.py | 3 ++- .../02-Image-Processing/01-Image-Filters/edge_filter.py | 3 ++- .../01-Image-Filters/erode_and_dilate.py | 3 ++- .../01-Image-Filters/gamma_correction.py | 3 ++- .../01-Image-Filters/grayscale_bilateral_filter.py | 3 ++- .../01-Image-Filters/grayscale_binary_filter.py | 3 ++- .../01-Image-Filters/grayscale_light_removal.py | 3 ++- .../01-Image-Filters/histogram_equalization.py | 3 ++- .../01-Image-Filters/kernel_filters.py | 3 ++- .../01-Image-Filters/lens_correction.py | 3 ++- .../02-Image-Processing/01-Image-Filters/linear_polar.py | 3 ++- .../02-Image-Processing/01-Image-Filters/log_polar.py | 3 ++- .../01-Image-Filters/mean_adaptive_threshold_filter.py | 3 ++- .../02-Image-Processing/01-Image-Filters/mean_filter.py | 3 ++- .../01-Image-Filters/median_adaptive_threshold_filter.py | 3 ++- .../02-Image-Processing/01-Image-Filters/median_filter.py | 3 ++- .../midpoint_adaptive_threshold_filter.py | 3 ++- .../01-Image-Filters/midpoint_filter.py | 3 ++- .../01-Image-Filters/mode_adaptive_threshold_filter.py | 3 ++- .../02-Image-Processing/01-Image-Filters/mode_filter.py | 3 ++- .../02-Image-Processing/01-Image-Filters/negative.py | 3 ++- .../perspective_and_rotation_correction.py | 3 ++- .../01-Image-Filters/perspective_correction.py | 3 ++- .../01-Image-Filters/rotation_correction.py | 3 ++- .../01-Image-Filters/sharpen_filter.py | 3 ++- .../examples/02-Image-Processing/01-Image-Filters/ulab.py | 3 ++- .../01-Image-Filters/unsharp_filter.py | 3 ++- .../01-Image-Filters/vflip_hmirror_transpose.py | 4 +++- .../automatic_grayscale_color_tracking.py | 3 ++- .../02-Color-Tracking/automatic_rgb565_color_tracking.py | 3 ++- .../02-Color-Tracking/black_grayscale_line_following.py | 4 +++- .../02-Color-Tracking/image_histogram_info.py | 3 ++- .../02-Color-Tracking/image_statistics_info.py | 3 ++- .../02-Color-Tracking/ir_beacon_grayscale_tracking.py | 3 ++- .../02-Color-Tracking/ir_beacon_rgb565_tracking.py | 3 ++- .../02-Color-Tracking/multi_color_blob_tracking.py | 4 +++- .../02-Color-Tracking/multi_color_code_tracking.py | 3 ++- .../02-Color-Tracking/single_color_code_tracking.py | 4 +++- .../single_color_grayscale_blob_tracking.py | 4 +++- .../single_color_rgb565_blob_tracking.py | 4 +++- .../in_memory_advanced_frame_differencing.py | 5 ++++- .../in_memory_basic_frame_differencing.py | 5 ++++- .../03-Frame-Differencing/in_memory_shadow_removal.py | 5 ++++- .../in_memory_structural_similarity.py | 5 ++++- .../on_disk_advanced_frame_differencing.py | 5 ++++- .../on_disk_basic_frame_differencing.py | 5 ++++- .../03-Frame-Differencing/on_disk_shadow_removal.py | 5 ++++- .../on_disk_structural_similarity.py | 5 ++++- .../00-TensorFlow/tf_image_classification.py | 5 ++++- .../00-TensorFlow/tf_object_detection.py | 5 ++++- .../03-Machine-Learning/01-ST-CubeAI/nn_stm32cubeai.py | 4 +++- .../03-Machine-Learning/02-Haar-Cascade/face_detection.py | 4 +++- .../02-Haar-Cascade/face_eye_detection.py | 4 +++- .../02-Haar-Cascade/face_recognition.py | 4 +++- .../03-Machine-Learning/02-Haar-Cascade/face_tracking.py | 4 +++- .../03-Machine-Learning/02-Haar-Cascade/iris_detection.py | 4 +++- scripts/examples/04-Barcodes/find_barcodes.py | 5 ++++- scripts/examples/04-Barcodes/find_datamatrices.py | 4 +++- .../examples/04-Barcodes/find_datamatrices_w_lens_zoom.py | 4 +++- scripts/examples/04-Barcodes/qrcodes_with_lens_corr.py | 3 ++- scripts/examples/04-Barcodes/qrcodes_with_lens_zoom.py | 3 ++- scripts/examples/05-Feature-Detection/edges.py | 4 +++- scripts/examples/05-Feature-Detection/find_circles.py | 3 ++- .../examples/05-Feature-Detection/find_line_segments.py | 3 ++- scripts/examples/05-Feature-Detection/find_lines.py | 3 ++- scripts/examples/05-Feature-Detection/find_rects.py | 3 ++- scripts/examples/05-Feature-Detection/hog.py | 3 ++- scripts/examples/05-Feature-Detection/keypoints.py | 4 +++- scripts/examples/05-Feature-Detection/keypoints_save.py | 4 +++- scripts/examples/05-Feature-Detection/lbp.py | 4 +++- .../05-Feature-Detection/linear_regression_fast.py | 3 ++- .../05-Feature-Detection/linear_regression_robust.py | 3 ++- scripts/examples/05-Feature-Detection/selective_search.py | 3 ++- .../examples/05-Feature-Detection/template_matching.py | 4 +++- scripts/examples/06-April-Tags/find_apriltags.py | 5 ++++- scripts/examples/06-April-Tags/find_apriltags_3d_pose.py | 4 +++- scripts/examples/06-April-Tags/find_apriltags_max_res.py | 6 +++++- .../examples/06-April-Tags/find_apriltags_w_lens_zoom.py | 4 +++- scripts/examples/06-April-Tags/find_small_apriltags.py | 5 ++++- .../07-Interface-Library/00-Arduino/arduino_i2c_slave.py | 3 ++- .../07-Interface-Library/00-Arduino/arduino_spi_slave.py | 4 +++- .../01-Pixy-Emulation/apriltags_pixy_i2c_emulation.py | 6 +++++- .../01-Pixy-Emulation/apriltags_pixy_spi_emulation.py | 6 +++++- .../01-Pixy-Emulation/apriltags_pixy_uart_emulation.py | 6 +++++- .../01-Pixy-Emulation/pixy_i2c_emulation.py | 6 +++++- .../01-Pixy-Emulation/pixy_spi_emulation.py | 6 +++++- .../01-Pixy-Emulation/pixy_uart_emulation.py | 6 +++++- .../02-MAVLink/mavlink_apriltags_landing_target.py | 6 +++++- .../02-MAVLink/mavlink_opticalflow.py | 5 ++++- .../07-Interface-Library/03-Modbus/modbus_apriltag.py | 2 +- ...transfer_jpg_as_the_remote_device_for_your_computer.py | 6 +++++- ...pg_streaming_as_the_remote_device_for_your_computer.py | 5 ++++- .../image_transfer_raw_as_the_controller_device.py | 8 +++++++- .../image_transfer_raw_as_the_remote_device.py | 5 ++++- .../popular_features_as_the_controller_device.py | 4 +++- .../popular_features_as_the_remote_device.py | 8 +++++++- .../36-Web-Servers/rtsp_video_server_lan.py | 6 +++++- .../36-Web-Servers/rtsp_video_server_wlan.py | 6 +++++- .../00-Board-Control/adc_read_int_channel.py | 3 ++- scripts/examples/09-OpenMV-Boards/00-Board-Control/can.py | 3 ++- .../09-OpenMV-Boards/00-Board-Control/cpufreq_scaling.py | 5 ++++- .../09-OpenMV-Boards/00-Board-Control/spi_control.py | 3 ++- .../examples/09-OpenMV-Boards/00-Board-Control/usb_hid.py | 3 ++- .../examples/09-OpenMV-Boards/00-Board-Control/usb_vcp.py | 4 +++- .../00-Board-Control/vsync_gpio_output.py | 3 ++- scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/dns.py | 3 ++- .../09-OpenMV-Boards/01-WiFi-Shield/http_client.py | 3 ++- .../09-OpenMV-Boards/01-WiFi-Shield/http_client_ssl.py | 4 +++- .../examples/09-OpenMV-Boards/01-WiFi-Shield/http_post.py | 5 ++++- .../09-OpenMV-Boards/01-WiFi-Shield/mjpeg_streamer.py | 6 +++++- .../09-OpenMV-Boards/01-WiFi-Shield/mjpeg_streamer_ap.py | 6 +++++- .../09-OpenMV-Boards/01-WiFi-Shield/mjpeg_streamer_fir.py | 6 +++++- .../examples/09-OpenMV-Boards/01-WiFi-Shield/mqtt_pub.py | 3 ++- .../examples/09-OpenMV-Boards/01-WiFi-Shield/mqtt_sub.py | 3 ++- scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/ntp.py | 5 ++++- scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/scan.py | 3 ++- .../examples/09-OpenMV-Boards/01-WiFi-Shield/static_ip.py | 5 ++++- scripts/examples/09-OpenMV-Boards/02-LCD-Shield/lcd.py | 3 ++- .../04-Thermopile-Shield/thermal_camera.py | 4 +++- .../04-Thermopile-Shield/thermal_overlay.py | 5 ++++- .../04-Thermopile-Shield/thermal_overlay_lcd.py | 6 +++++- scripts/examples/09-OpenMV-Boards/09-TV-Shield/tv.py | 4 +++- .../examples/09-OpenMV-Boards/11-Low-Power/deep_sleep.py | 4 +++- .../09-OpenMV-Boards/11-Low-Power/extint_wakeup.py | 4 +++- .../09-OpenMV-Boards/11-Low-Power/sensor_sleep.py | 3 ++- .../examples/09-OpenMV-Boards/11-Low-Power/stop_mode.py | 4 +++- scripts/examples/09-OpenMV-Boards/99-Tests/colorbar.py | 3 ++- scripts/examples/09-OpenMV-Boards/99-Tests/fps.py | 3 ++- scripts/examples/09-OpenMV-Boards/99-Tests/selftest.py | 4 +++- scripts/examples/09-OpenMV-Boards/99-Tests/unittests.py | 4 +++- scripts/examples/09-OpenMV-Boards/main.py | 3 ++- .../Nano-33-BLE-Sense/02-Audio/audio_fft.py | 4 +++- .../Nano-33-BLE-Sense/04-Thermal/thermal_camera.py | 4 +++- .../10-Arduino-Boards/Nano-RP2040/03-Audio/audio_fft.py | 4 +++- .../10-Arduino-Boards/Nano-RP2040/03-WiFi/ap_mode.py | 6 +++++- .../10-Arduino-Boards/Nano-RP2040/03-WiFi/http_client.py | 3 ++- .../examples/10-Arduino-Boards/Nano-RP2040/03-WiFi/ntp.py | 5 ++++- .../10-Arduino-Boards/Nano-RP2040/03-WiFi/scan.py | 3 ++- .../Nano-RP2040/05-Thermal/thermal_camera.py | 4 +++- .../Nicla-Vision/00-Board-Control/adc_read_int_channel.py | 3 ++- .../Nicla-Vision/00-Board-Control/can.py | 3 ++- .../Nicla-Vision/00-Board-Control/cpufreq_scaling.py | 5 ++++- .../Nicla-Vision/00-Board-Control/spi_control.py | 3 ++- .../Nicla-Vision/00-Board-Control/usb_hid.py | 3 ++- .../Nicla-Vision/00-Board-Control/usb_vcp.py | 4 +++- .../Nicla-Vision/00-Board-Control/vsync_gpio_output.py | 3 ++- .../10-Arduino-Boards/Nicla-Vision/02-Audio/audio_fft.py | 4 +++- .../Nicla-Vision/02-Audio/micro_speech.py | 6 +++++- .../10-Arduino-Boards/Nicla-Vision/03-WiFi/dns.py | 3 ++- .../10-Arduino-Boards/Nicla-Vision/03-WiFi/http_client.py | 3 ++- .../Nicla-Vision/03-WiFi/http_client_ssl.py | 4 +++- .../Nicla-Vision/03-WiFi/mjpeg_streamer.py | 6 +++++- .../10-Arduino-Boards/Nicla-Vision/03-WiFi/mqtt_pub.py | 3 ++- .../10-Arduino-Boards/Nicla-Vision/03-WiFi/mqtt_sub.py | 3 ++- .../10-Arduino-Boards/Nicla-Vision/03-WiFi/ntp.py | 5 ++++- .../10-Arduino-Boards/Nicla-Vision/03-WiFi/scan.py | 3 ++- .../10-Arduino-Boards/Nicla-Vision/03-WiFi/static_ip.py | 5 ++++- .../Nicla-Vision/05-Low-Power/deep_sleep.py | 4 +++- .../Nicla-Vision/05-Low-Power/extint_wakeup.py | 4 +++- .../Nicla-Vision/05-Low-Power/stop_mode.py | 4 +++- .../Portenta-H7/00-Board-Control/adc_read_int_channel.py | 3 ++- .../10-Arduino-Boards/Portenta-H7/00-Board-Control/can.py | 3 ++- .../Portenta-H7/00-Board-Control/cpufreq_scaling.py | 5 ++++- .../Portenta-H7/00-Board-Control/spi_control.py | 3 ++- .../Portenta-H7/00-Board-Control/usb_hid.py | 3 ++- .../Portenta-H7/00-Board-Control/usb_vcp.py | 4 +++- .../Portenta-H7/00-Board-Control/vsync_gpio_output.py | 3 ++- .../10-Arduino-Boards/Portenta-H7/01-Audio/audio_fft.py | 4 +++- .../Portenta-H7/01-Audio/micro_speech.py | 6 +++++- .../examples/10-Arduino-Boards/Portenta-H7/02-WiFi/dns.py | 3 ++- .../10-Arduino-Boards/Portenta-H7/02-WiFi/http_client.py | 3 ++- .../Portenta-H7/02-WiFi/http_client_ssl.py | 4 +++- .../Portenta-H7/02-WiFi/mjpeg_streamer.py | 6 +++++- .../10-Arduino-Boards/Portenta-H7/02-WiFi/mqtt_pub.py | 3 ++- .../10-Arduino-Boards/Portenta-H7/02-WiFi/mqtt_sub.py | 3 ++- .../examples/10-Arduino-Boards/Portenta-H7/02-WiFi/ntp.py | 5 ++++- .../10-Arduino-Boards/Portenta-H7/02-WiFi/scan.py | 3 ++- .../10-Arduino-Boards/Portenta-H7/02-WiFi/static_ip.py | 5 ++++- .../Portenta-H7/05-Ethernet/eth_cable_test.py | 3 ++- .../Portenta-H7/05-Ethernet/http_client.py | 3 ++- .../Portenta-H7/05-Ethernet/http_client_ssl.py | 4 +++- .../Portenta-H7/05-Ethernet/peer_to_peer.py | 3 ++- .../Portenta-H7/06-Low-Power/deep_sleep.py | 4 +++- .../Portenta-H7/06-Low-Power/extint_wakeup.py | 4 +++- .../06-Low-Power/himax_wakeup_on_motion_detection.py | 5 ++++- .../Portenta-H7/06-Low-Power/sensor_sleep.py | 3 ++- .../Portenta-H7/06-Low-Power/stop_mode.py | 4 +++- 259 files changed, 751 insertions(+), 259 deletions(-) diff --git a/scripts/examples/00-HelloWorld/helloworld.py b/scripts/examples/00-HelloWorld/helloworld.py index a18b84b21..ba7d02c7b 100644 --- a/scripts/examples/00-HelloWorld/helloworld.py +++ b/scripts/examples/00-HelloWorld/helloworld.py @@ -2,7 +2,8 @@ # # Welcome to the OpenMV IDE! Click on the green run arrow button below to run the script! -import sensor, image, time +import sensor +import time sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) diff --git a/scripts/examples/01-Camera/00-Snapshot/emboss_snapshot.py b/scripts/examples/01-Camera/00-Snapshot/emboss_snapshot.py index 69bc105c6..56e4f66ee 100644 --- a/scripts/examples/01-Camera/00-Snapshot/emboss_snapshot.py +++ b/scripts/examples/01-Camera/00-Snapshot/emboss_snapshot.py @@ -4,7 +4,8 @@ # # You can use your OpenMV Cam to save modified image files. -import sensor, image, pyb +import sensor +import pyb RED_LED_PIN = 1 BLUE_LED_PIN = 3 diff --git a/scripts/examples/01-Camera/00-Snapshot/snapshot.py b/scripts/examples/01-Camera/00-Snapshot/snapshot.py index 8eb621e6d..340da39d5 100644 --- a/scripts/examples/01-Camera/00-Snapshot/snapshot.py +++ b/scripts/examples/01-Camera/00-Snapshot/snapshot.py @@ -4,7 +4,8 @@ # # You can use your OpenMV Cam to save image files. -import sensor, image, pyb +import sensor +import pyb RED_LED_PIN = 1 BLUE_LED_PIN = 3 diff --git a/scripts/examples/01-Camera/00-Snapshot/snapshot_on_face_detection.py b/scripts/examples/01-Camera/00-Snapshot/snapshot_on_face_detection.py index a716df263..3611cfb99 100644 --- a/scripts/examples/01-Camera/00-Snapshot/snapshot_on_face_detection.py +++ b/scripts/examples/01-Camera/00-Snapshot/snapshot_on_face_detection.py @@ -5,7 +5,9 @@ # This example demonstrates using face tracking on your OpenMV Cam to take a # picture. -import sensor, image, pyb +import sensor +import image +import pyb RED_LED_PIN = 1 BLUE_LED_PIN = 3 diff --git a/scripts/examples/01-Camera/00-Snapshot/snapshot_on_movement.py b/scripts/examples/01-Camera/00-Snapshot/snapshot_on_movement.py index 4b0a625e1..e1f56936d 100644 --- a/scripts/examples/01-Camera/00-Snapshot/snapshot_on_movement.py +++ b/scripts/examples/01-Camera/00-Snapshot/snapshot_on_movement.py @@ -5,7 +5,9 @@ # This example demonstrates using frame differencing with your OpenMV Cam to do # motion detection. After motion is detected your OpenMV Cam will take picture. -import sensor, image, pyb, os +import sensor +import pyb +import os RED_LED_PIN = 1 BLUE_LED_PIN = 3 diff --git a/scripts/examples/01-Camera/00-Snapshot/time_lapse_photos.py b/scripts/examples/01-Camera/00-Snapshot/time_lapse_photos.py index be6c370dd..332d5d316 100644 --- a/scripts/examples/01-Camera/00-Snapshot/time_lapse_photos.py +++ b/scripts/examples/01-Camera/00-Snapshot/time_lapse_photos.py @@ -8,7 +8,10 @@ # pictures it will run the bootloader each time. Please power the camera # from something other than USB to not have the bootloader run. -import pyb, machine, sensor, image, pyb, os +import pyb +import machine +import sensor +import os # Create and init RTC object. This will allow us to set the current time for # the RTC and let us set an interrupt to wake up later on. diff --git a/scripts/examples/01-Camera/01-Video-Recording/gif.py b/scripts/examples/01-Camera/01-Video-Recording/gif.py index 35f0933d7..57d0b91a5 100644 --- a/scripts/examples/01-Camera/01-Video-Recording/gif.py +++ b/scripts/examples/01-Camera/01-Video-Recording/gif.py @@ -6,7 +6,10 @@ # recorder object RGB565 frames or Grayscale frames. Use photo editing software # like GIMP to compress and optimize the Gif before uploading it to the web. -import sensor, image, time, gif, pyb +import sensor +import time +import gif +import pyb RED_LED_PIN = 1 BLUE_LED_PIN = 3 diff --git a/scripts/examples/01-Camera/01-Video-Recording/gif_on_face_detection.py b/scripts/examples/01-Camera/01-Video-Recording/gif_on_face_detection.py index 0732ca1c8..4306be49c 100644 --- a/scripts/examples/01-Camera/01-Video-Recording/gif_on_face_detection.py +++ b/scripts/examples/01-Camera/01-Video-Recording/gif_on_face_detection.py @@ -9,7 +9,11 @@ # This example demonstrates using face tracking on your OpenMV Cam to take a # gif. -import sensor, image, time, gif, pyb +import sensor +import image +import time +import gif +import pyb RED_LED_PIN = 1 BLUE_LED_PIN = 3 diff --git a/scripts/examples/01-Camera/01-Video-Recording/gif_on_movement.py b/scripts/examples/01-Camera/01-Video-Recording/gif_on_movement.py index 4cf3fbae6..f542327a5 100644 --- a/scripts/examples/01-Camera/01-Video-Recording/gif_on_movement.py +++ b/scripts/examples/01-Camera/01-Video-Recording/gif_on_movement.py @@ -9,7 +9,11 @@ # This example demonstrates using frame differencing with your OpenMV Cam to do # motion detection. After motion is detected your OpenMV Cam will take video. -import sensor, image, time, gif, pyb, os +import sensor +import time +import gif +import pyb +import os RED_LED_PIN = 1 BLUE_LED_PIN = 3 diff --git a/scripts/examples/01-Camera/01-Video-Recording/imageio_memory.py b/scripts/examples/01-Camera/01-Video-Recording/imageio_memory.py index d64341be4..7270d500c 100644 --- a/scripts/examples/01-Camera/01-Video-Recording/imageio_memory.py +++ b/scripts/examples/01-Camera/01-Video-Recording/imageio_memory.py @@ -2,7 +2,9 @@ # # This example shows how to use the ImageIO stream to record frames in memory and play them back. # Note: While this should work on any board, the board should have an SDRAM to be of any use. -import sensor, image, time +import sensor +import image +import time # Number of frames to pre-allocate and record N_FRAMES = 500 diff --git a/scripts/examples/01-Camera/01-Video-Recording/imageio_read.py b/scripts/examples/01-Camera/01-Video-Recording/imageio_read.py index 430c2fe30..c30d05612 100644 --- a/scripts/examples/01-Camera/01-Video-Recording/imageio_read.py +++ b/scripts/examples/01-Camera/01-Video-Recording/imageio_read.py @@ -8,7 +8,9 @@ # Altered to allow full speed reading from SD card for extraction of sequences to the network etc. # Set the new pause parameter to false -import sensor, image, time +import sensor +import image +import time snapshot_source = False # Set to true once finished to pull data from sensor. diff --git a/scripts/examples/01-Camera/01-Video-Recording/imageio_write.py b/scripts/examples/01-Camera/01-Video-Recording/imageio_write.py index aa3f5bff0..9cbd7a72f 100644 --- a/scripts/examples/01-Camera/01-Video-Recording/imageio_write.py +++ b/scripts/examples/01-Camera/01-Video-Recording/imageio_write.py @@ -6,7 +6,10 @@ # OpenMV Cam sees for later analysis using the Image Reader object. Images written to disk # by the Image Writer object are stored in a simple file format readable by your OpenMV Cam. -import sensor, image, pyb, time +import sensor +import image +import pyb +import time record_time = 10000 # 10 seconds in milliseconds diff --git a/scripts/examples/01-Camera/01-Video-Recording/mjpeg.py b/scripts/examples/01-Camera/01-Video-Recording/mjpeg.py index 1e3f732e4..bb5f5bb08 100644 --- a/scripts/examples/01-Camera/01-Video-Recording/mjpeg.py +++ b/scripts/examples/01-Camera/01-Video-Recording/mjpeg.py @@ -7,7 +7,10 @@ # recording a Mjpeg file you can use VLC to play it. If you are on Ubuntu then # the built-in video player will work too. -import sensor, image, time, mjpeg, pyb +import sensor +import time +import mjpeg +import pyb RED_LED_PIN = 1 BLUE_LED_PIN = 3 diff --git a/scripts/examples/01-Camera/01-Video-Recording/mjpeg_on_face_detection.py b/scripts/examples/01-Camera/01-Video-Recording/mjpeg_on_face_detection.py index fd567a4f6..ddbcb3122 100644 --- a/scripts/examples/01-Camera/01-Video-Recording/mjpeg_on_face_detection.py +++ b/scripts/examples/01-Camera/01-Video-Recording/mjpeg_on_face_detection.py @@ -10,7 +10,11 @@ # This example demonstrates using face tracking on your OpenMV Cam to take a # mjpeg. -import sensor, image, time, mjpeg, pyb +import sensor +import image +import time +import mjpeg +import pyb RED_LED_PIN = 1 BLUE_LED_PIN = 3 diff --git a/scripts/examples/01-Camera/01-Video-Recording/mjpeg_on_movement.py b/scripts/examples/01-Camera/01-Video-Recording/mjpeg_on_movement.py index 5f13363f2..5d4d0944f 100644 --- a/scripts/examples/01-Camera/01-Video-Recording/mjpeg_on_movement.py +++ b/scripts/examples/01-Camera/01-Video-Recording/mjpeg_on_movement.py @@ -10,7 +10,11 @@ # This example demonstrates using frame differencing with your OpenMV Cam to do # motion detection. After motion is detected your OpenMV Cam will take video. -import sensor, image, time, mjpeg, pyb, os +import sensor +import time +import mjpeg +import pyb +import os RED_LED_PIN = 1 BLUE_LED_PIN = 3 diff --git a/scripts/examples/01-Camera/02-Optical-Flow/absolute-rotation-scale.py b/scripts/examples/01-Camera/02-Optical-Flow/absolute-rotation-scale.py index 1dafa06ee..a55900ff2 100644 --- a/scripts/examples/01-Camera/02-Optical-Flow/absolute-rotation-scale.py +++ b/scripts/examples/01-Camera/02-Optical-Flow/absolute-rotation-scale.py @@ -10,7 +10,9 @@ # forward/backwards to see the numbers change. # I.e. Z direction changes only. -import sensor, image, time, math +import sensor +import time +import math # NOTE!!! You have to use a small power of 2 resolution when using # find_displacement(). This is because the algorithm is powered by diff --git a/scripts/examples/01-Camera/02-Optical-Flow/absolute-translation.py b/scripts/examples/01-Camera/02-Optical-Flow/absolute-translation.py index f4dd2e49f..9071b9005 100644 --- a/scripts/examples/01-Camera/02-Optical-Flow/absolute-translation.py +++ b/scripts/examples/01-Camera/02-Optical-Flow/absolute-translation.py @@ -10,7 +10,8 @@ # watch the numbers change. Note that you can see displacement numbers # up +- half of the hoizontal and vertical resolution. -import sensor, image, time +import sensor +import time # NOTE!!! You have to use a small power of 2 resolution when using # find_displacement(). This is because the algorithm is powered by diff --git a/scripts/examples/01-Camera/02-Optical-Flow/differential-rotation-scale.py b/scripts/examples/01-Camera/02-Optical-Flow/differential-rotation-scale.py index 8e1b54c64..ea00004a6 100644 --- a/scripts/examples/01-Camera/02-Optical-Flow/differential-rotation-scale.py +++ b/scripts/examples/01-Camera/02-Optical-Flow/differential-rotation-scale.py @@ -10,7 +10,9 @@ # forward/backwards to see the numbers change. # I.e. Z direction changes only. -import sensor, image, time, math +import sensor +import time +import math # NOTE!!! You have to use a small power of 2 resolution when using # find_displacement(). This is because the algorithm is powered by diff --git a/scripts/examples/01-Camera/02-Optical-Flow/differential-translation.py b/scripts/examples/01-Camera/02-Optical-Flow/differential-translation.py index 04416cf09..fbefae11a 100644 --- a/scripts/examples/01-Camera/02-Optical-Flow/differential-translation.py +++ b/scripts/examples/01-Camera/02-Optical-Flow/differential-translation.py @@ -10,7 +10,8 @@ # watch the numbers change. Note that you can see displacement numbers # up +- half of the hoizontal and vertical resolution. -import sensor, image, time +import sensor +import time # NOTE!!! You have to use a small power of 2 resolution when using # find_displacement(). This is because the algorithm is powered by diff --git a/scripts/examples/01-Camera/02-Optical-Flow/image-patches-absolute-rotation-scale.py b/scripts/examples/01-Camera/02-Optical-Flow/image-patches-absolute-rotation-scale.py index 414a105e6..3eda1db97 100644 --- a/scripts/examples/01-Camera/02-Optical-Flow/image-patches-absolute-rotation-scale.py +++ b/scripts/examples/01-Camera/02-Optical-Flow/image-patches-absolute-rotation-scale.py @@ -22,7 +22,9 @@ BLOCK_H = 16 # pow2 # forward/backwards to see the numbers change. # I.e. Z direction changes only. -import sensor, image, time, math +import sensor +import time +import math # NOTE!!! You have to use a small power of 2 resolution when using # find_displacement(). This is because the algorithm is powered by diff --git a/scripts/examples/01-Camera/02-Optical-Flow/image-patches-absolute-translation.py b/scripts/examples/01-Camera/02-Optical-Flow/image-patches-absolute-translation.py index 0bfae8ca6..9525ef75b 100644 --- a/scripts/examples/01-Camera/02-Optical-Flow/image-patches-absolute-translation.py +++ b/scripts/examples/01-Camera/02-Optical-Flow/image-patches-absolute-translation.py @@ -20,7 +20,8 @@ BLOCK_H = 16 # pow2 # watch the numbers change. Note that you can see displacement numbers # up +- half of the hoizontal and vertical resolution. -import sensor, image, time +import sensor +import time # NOTE!!! You have to use a small power of 2 resolution when using # find_displacement(). This is because the algorithm is powered by diff --git a/scripts/examples/01-Camera/02-Optical-Flow/image-patches-differential-rotation-scale.py b/scripts/examples/01-Camera/02-Optical-Flow/image-patches-differential-rotation-scale.py index bb1bc2eea..5b32fd8e0 100644 --- a/scripts/examples/01-Camera/02-Optical-Flow/image-patches-differential-rotation-scale.py +++ b/scripts/examples/01-Camera/02-Optical-Flow/image-patches-differential-rotation-scale.py @@ -22,7 +22,9 @@ BLOCK_H = 16 # pow2 # forward/backwards to see the numbers change. # I.e. Z direction changes only. -import sensor, image, time, math +import sensor +import time +import math # NOTE!!! You have to use a small power of 2 resolution when using # find_displacement(). This is because the algorithm is powered by diff --git a/scripts/examples/01-Camera/02-Optical-Flow/image-patches-differential-translation.py b/scripts/examples/01-Camera/02-Optical-Flow/image-patches-differential-translation.py index 0a87c42f3..106be8342 100644 --- a/scripts/examples/01-Camera/02-Optical-Flow/image-patches-differential-translation.py +++ b/scripts/examples/01-Camera/02-Optical-Flow/image-patches-differential-translation.py @@ -20,7 +20,8 @@ BLOCK_H = 16 # pow2 # watch the numbers change. Note that you can see displacement numbers # up +- half of the hoizontal and vertical resolution. -import sensor, image, time +import sensor +import time # NOTE!!! You have to use a small power of 2 resolution when using # find_displacement(). This is because the algorithm is powered by diff --git a/scripts/examples/01-Camera/03-Event-Cameras/frogeye2020.py b/scripts/examples/01-Camera/03-Event-Cameras/frogeye2020.py index 445c0849a..e8e25c6db 100644 --- a/scripts/examples/01-Camera/03-Event-Cameras/frogeye2020.py +++ b/scripts/examples/01-Camera/03-Event-Cameras/frogeye2020.py @@ -3,7 +3,9 @@ # The frogeye2020 is a 320x240 event camera. There are two bits per pixel which show no motion, # motion in one direction, or motion in another direction. The sensor runs at 50 FPS. -import sensor, image, time +import sensor +import image +import time sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) diff --git a/scripts/examples/01-Camera/03-Event-Cameras/frogeye2020_with_tracking.py b/scripts/examples/01-Camera/03-Event-Cameras/frogeye2020_with_tracking.py index c373715dc..a99205804 100644 --- a/scripts/examples/01-Camera/03-Event-Cameras/frogeye2020_with_tracking.py +++ b/scripts/examples/01-Camera/03-Event-Cameras/frogeye2020_with_tracking.py @@ -3,7 +3,9 @@ # The frogeye2020 is a 320x240 event camera. There are two bits per pixel which show no motion, # motion in one direction, or motion in another direction. The sensor runs at 50 FPS. -import sensor, image, time +import sensor +import image +import time sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) diff --git a/scripts/examples/01-Camera/04-Global-Shutter/high_fps.py b/scripts/examples/01-Camera/04-Global-Shutter/high_fps.py index d2cf12ded..bae9ea673 100644 --- a/scripts/examples/01-Camera/04-Global-Shutter/high_fps.py +++ b/scripts/examples/01-Camera/04-Global-Shutter/high_fps.py @@ -12,7 +12,8 @@ # time so you will not get the maximum readout speed unless you reduce the exposure time too. # This results in a dark image however so YOU NEED A LOT of lighting for high FPS. -import sensor, image, time +import sensor +import time sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE diff --git a/scripts/examples/01-Camera/04-Global-Shutter/triggered_mode.py b/scripts/examples/01-Camera/04-Global-Shutter/triggered_mode.py index 08e78c916..764615e0a 100644 --- a/scripts/examples/01-Camera/04-Global-Shutter/triggered_mode.py +++ b/scripts/examples/01-Camera/04-Global-Shutter/triggered_mode.py @@ -12,7 +12,8 @@ # continously by the camera and because you have to wait for the integration to finish before # readout of the frame. -import sensor, image, time +import sensor +import time sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE diff --git a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_high_temp.py b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_high_temp.py index fc8c2cb56..7ddf0bfc9 100644 --- a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_high_temp.py +++ b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_high_temp.py @@ -17,7 +17,8 @@ # leptons don't have radiometry support or they don't activate their calibration process often # enough to deal with temperature changes (FLIR 2.5). -import sensor, image, time, math +import sensor +import time # Color Tracking Thresholds (Grayscale Min, Grayscale Max) threshold_list = [(100, 255)] # track very hot objects diff --git a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp.py b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp.py index 15c91ebf2..6165e728a 100644 --- a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp.py +++ b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp.py @@ -17,7 +17,8 @@ # leptons don't have radiometry support or they don't activate their calibration process often # enough to deal with temperature changes (FLIR 2.5). -import sensor, image, time, math +import sensor +import time # Color Tracking Thresholds (Grayscale Min, Grayscale Max) threshold_list = [(200, 255)] diff --git a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_color.py b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_color.py index 2cd2c5406..a9c0cf554 100644 --- a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_color.py +++ b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_color.py @@ -17,7 +17,8 @@ # leptons don't have radiometry support or they don't activate their calibration process often # enough to deal with temperature changes (FLIR 2.5). -import sensor, image, time, math +import sensor +import time # Color Tracking Thresholds (Grayscale Min, Grayscale Max) threshold_list = [(200, 255)] diff --git a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_color_lcd.py b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_color_lcd.py index f7d389b57..4e69da4c7 100644 --- a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_color_lcd.py +++ b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_color_lcd.py @@ -17,7 +17,9 @@ # leptons don't have radiometry support or they don't activate their calibration process often # enough to deal with temperature changes (FLIR 2.5). -import sensor, image, time, math, lcd +import sensor +import time +import lcd # Color Tracking Thresholds (Grayscale Min, Grayscale Max) threshold_list = [(200, 255)] diff --git a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_lcd.py b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_lcd.py index 6594395b1..7064bfee2 100644 --- a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_lcd.py +++ b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_lcd.py @@ -17,7 +17,9 @@ # leptons don't have radiometry support or they don't activate their calibration process often # enough to deal with temperature changes (FLIR 2.5). -import sensor, image, time, math, lcd +import sensor +import time +import lcd # Color Tracking Thresholds (Grayscale Min, Grayscale Max) threshold_list = [(200, 255)] diff --git a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_grayscale_color_tracking.py b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_grayscale_color_tracking.py index 36127724a..c007bc7e5 100644 --- a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_grayscale_color_tracking.py +++ b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_grayscale_color_tracking.py @@ -7,7 +7,8 @@ # stablizes. You can force the re-calibration to not happen if you need to via the lepton API. # However, it is not recommended because the image will degrade overtime. -import sensor, image, time, math +import sensor +import time # Color Tracking Thresholds (Grayscale Min, Grayscale Max) threshold_list = [(220, 255)] diff --git a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_grayscale_color_tracking_lcd.py b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_grayscale_color_tracking_lcd.py index c642da053..b3ed117fd 100644 --- a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_grayscale_color_tracking_lcd.py +++ b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_grayscale_color_tracking_lcd.py @@ -7,7 +7,9 @@ # stablizes. You can force the re-calibration to not happen if you need to via the lepton API. # However, it is not recommended because the image will degrade overtime. -import sensor, image, time, math, lcd +import sensor +import time +import lcd # Color Tracking Thresholds (Grayscale Min, Grayscale Max) threshold_list = [(220, 255)] diff --git a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_rgb565_color_tracking.py b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_rgb565_color_tracking.py index 3bab9b536..438acdee8 100644 --- a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_rgb565_color_tracking.py +++ b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_rgb565_color_tracking.py @@ -7,7 +7,8 @@ # stablizes. You can force the re-calibration to not happen if you need to via the lepton API. # However, it is not recommended because the image will degrade overtime. -import sensor, image, time, math +import sensor +import time # Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) threshold_list = [( 70, 100, -30, 40, 20, 100)] diff --git a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_rgb565_color_tracking_lcd.py b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_rgb565_color_tracking_lcd.py index 2ca9d9043..8a9cb8116 100644 --- a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_rgb565_color_tracking_lcd.py +++ b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_rgb565_color_tracking_lcd.py @@ -7,7 +7,9 @@ # stablizes. You can force the re-calibration to not happen if you need to via the lepton API. # However, it is not recommended because the image will degrade overtime. -import sensor, image, time, math, lcd +import sensor +import time +import lcd # Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) threshold_list = [( 70, 100, -30, 40, 20, 100)] diff --git a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_target_temp_hotspot_grayscale_color_tracking.py b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_target_temp_hotspot_grayscale_color_tracking.py index c5ffa1a51..2b97f2714 100644 --- a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_target_temp_hotspot_grayscale_color_tracking.py +++ b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_target_temp_hotspot_grayscale_color_tracking.py @@ -17,7 +17,8 @@ # leptons don't have radiometry support or they don't activate their calibration process often # enough to deal with temperature changes (FLIR 2.5). -import sensor, image, time, math +import sensor +import time # Color Tracking Thresholds (Grayscale Min, Grayscale Max) threshold_list = [(220, 255)] diff --git a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_target_temp_hotspot_rgb565_color_tracking.py b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_target_temp_hotspot_rgb565_color_tracking.py index 7ce91a823..2797c6f73 100644 --- a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_target_temp_hotspot_rgb565_color_tracking.py +++ b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_target_temp_hotspot_rgb565_color_tracking.py @@ -17,7 +17,8 @@ # leptons don't have radiometry support or they don't activate their calibration process often # enough to deal with temperature changes (FLIR 2.5). -import sensor, image, time, math +import sensor +import time # Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) threshold_list = [( 70, 100, -30, 40, 20, 100)] diff --git a/scripts/examples/01-Camera/06-Time-of-Flight/tof_camera.py b/scripts/examples/01-Camera/06-Time-of-Flight/tof_camera.py index 5493ad767..dcc4b9ef8 100644 --- a/scripts/examples/01-Camera/06-Time-of-Flight/tof_camera.py +++ b/scripts/examples/01-Camera/06-Time-of-Flight/tof_camera.py @@ -3,7 +3,9 @@ # This example shows off how to overlay a depth map onto # OpenMV Cam's live video output from the main camera. -import image, time, tof +import image +import time +import tof IMAGE_SCALE = 10 # Higher scaling uses more memory. drawing_hint = image.BILINEAR # or image.BILINEAR or 0 (nearest neighbor) diff --git a/scripts/examples/01-Camera/06-Time-of-Flight/tof_overlay.py b/scripts/examples/01-Camera/06-Time-of-Flight/tof_overlay.py index 317ccc125..dcb7d515e 100644 --- a/scripts/examples/01-Camera/06-Time-of-Flight/tof_overlay.py +++ b/scripts/examples/01-Camera/06-Time-of-Flight/tof_overlay.py @@ -2,7 +2,10 @@ # # This example shows off how to overlay a depth map onto # OpenMV Cam's live video output from the main camera. -import sensor, image, time, tof +import sensor +import image +import time +import tof sensor.reset() sensor.set_pixformat(sensor.RGB565) diff --git a/scripts/examples/01-Camera/07-Sensor-Control/sensor_auto_gain_control.py b/scripts/examples/01-Camera/07-Sensor-Control/sensor_auto_gain_control.py index 3ecdc89d7..90fee87ea 100644 --- a/scripts/examples/01-Camera/07-Sensor-Control/sensor_auto_gain_control.py +++ b/scripts/examples/01-Camera/07-Sensor-Control/sensor_auto_gain_control.py @@ -22,7 +22,8 @@ # more when the lighting changes versus the exposure being constant and # the gain changing. -import sensor, image, time +import sensor +import time sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) diff --git a/scripts/examples/01-Camera/07-Sensor-Control/sensor_exposure_control.py b/scripts/examples/01-Camera/07-Sensor-Control/sensor_exposure_control.py index edc994c44..9f7bc3f78 100644 --- a/scripts/examples/01-Camera/07-Sensor-Control/sensor_exposure_control.py +++ b/scripts/examples/01-Camera/07-Sensor-Control/sensor_exposure_control.py @@ -15,7 +15,8 @@ # noise. So, it's best to let the exposure increase as much as possible # and then use gain control to make up any remaining ground. -import sensor, image, time +import sensor +import time # Change this value to adjust the exposure. Try 10.0/0.1/etc. EXPOSURE_TIME_SCALE = 1.0 diff --git a/scripts/examples/01-Camera/07-Sensor-Control/sensor_horizontal_mirror.py b/scripts/examples/01-Camera/07-Sensor-Control/sensor_horizontal_mirror.py index 467b46286..6291dc5c3 100644 --- a/scripts/examples/01-Camera/07-Sensor-Control/sensor_horizontal_mirror.py +++ b/scripts/examples/01-Camera/07-Sensor-Control/sensor_horizontal_mirror.py @@ -3,7 +3,8 @@ # This example shows off horizontally mirroring the image in hardware # from the camera sensor. -import sensor, image, time +import sensor +import time sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) diff --git a/scripts/examples/01-Camera/07-Sensor-Control/sensor_manual_whitebal_control.py b/scripts/examples/01-Camera/07-Sensor-Control/sensor_manual_whitebal_control.py index d9420ada3..69ba02bb3 100644 --- a/scripts/examples/01-Camera/07-Sensor-Control/sensor_manual_whitebal_control.py +++ b/scripts/examples/01-Camera/07-Sensor-Control/sensor_manual_whitebal_control.py @@ -13,7 +13,8 @@ # the sensor on startup you can control the colors # the camera sees. -import sensor, image, time +import sensor +import time sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) diff --git a/scripts/examples/01-Camera/07-Sensor-Control/sensor_vertical_flip.py b/scripts/examples/01-Camera/07-Sensor-Control/sensor_vertical_flip.py index 0470f27bf..453f4ab81 100644 --- a/scripts/examples/01-Camera/07-Sensor-Control/sensor_vertical_flip.py +++ b/scripts/examples/01-Camera/07-Sensor-Control/sensor_vertical_flip.py @@ -3,7 +3,8 @@ # This example shows off vertically flipping the image in hardware # from the camera sensor. -import sensor, image, time +import sensor +import time sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) diff --git a/scripts/examples/01-Camera/07-Sensor-Control/sesnor_manual_gain_control.py b/scripts/examples/01-Camera/07-Sensor-Control/sesnor_manual_gain_control.py index 01d7d3232..3099c37a0 100644 --- a/scripts/examples/01-Camera/07-Sensor-Control/sesnor_manual_gain_control.py +++ b/scripts/examples/01-Camera/07-Sensor-Control/sesnor_manual_gain_control.py @@ -15,7 +15,8 @@ # noise. So, it's best to let the exposure increase as much as possible # and then use gain control to make up any remaining ground. -import sensor, image, time +import sensor +import time # Change this value to adjust the gain. Try 10.0/0/0.1/etc. GAIN_SCALE = 1.0 diff --git a/scripts/examples/01-Camera/08-Readout-Control/100_fps_ir_led_tracking.py b/scripts/examples/01-Camera/08-Readout-Control/100_fps_ir_led_tracking.py index 95493db42..5791d2f8d 100644 --- a/scripts/examples/01-Camera/08-Readout-Control/100_fps_ir_led_tracking.py +++ b/scripts/examples/01-Camera/08-Readout-Control/100_fps_ir_led_tracking.py @@ -3,7 +3,8 @@ # This example is was designed and tested on the OpenMV Cam H7 Plus using the OV5640 sensor. -import sensor, image, time +import sensor +import time EXPOSURE_MICROSECONDS = 1000 TRACKING_THRESHOLDS = [(128, 255)] # When you lower the exposure you darken everything. diff --git a/scripts/examples/01-Camera/08-Readout-Control/apriltag_tracking.py b/scripts/examples/01-Camera/08-Readout-Control/apriltag_tracking.py index c465d1d92..ce5c6c9f8 100644 --- a/scripts/examples/01-Camera/08-Readout-Control/apriltag_tracking.py +++ b/scripts/examples/01-Camera/08-Readout-Control/apriltag_tracking.py @@ -3,7 +3,8 @@ # This example is was designed and tested on the OpenMV Cam H7 Plus using the OV5640 sensor. -import sensor, image, time +import sensor +import time # This example script forces the exposure to a constant value for the whole time. However, you may # wish to dynamically adjust the exposure when the readout window shrinks to a small size. diff --git a/scripts/examples/02-Image-Processing/00-Drawing/arrow_drawing.py b/scripts/examples/02-Image-Processing/00-Drawing/arrow_drawing.py index 33b190280..31f00be4b 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/arrow_drawing.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/arrow_drawing.py @@ -2,7 +2,9 @@ # # This example shows off drawing arrows on the OpenMV Cam. -import sensor, image, time, pyb +import sensor +import time +import pyb sensor.reset() sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... diff --git a/scripts/examples/02-Image-Processing/00-Drawing/circle_drawing.py b/scripts/examples/02-Image-Processing/00-Drawing/circle_drawing.py index 373d5a40f..49756d30f 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/circle_drawing.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/circle_drawing.py @@ -2,7 +2,9 @@ # # This example shows off drawing circles on the OpenMV Cam. -import sensor, image, time, pyb +import sensor +import time +import pyb sensor.reset() sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... diff --git a/scripts/examples/02-Image-Processing/00-Drawing/copy2fb.py b/scripts/examples/02-Image-Processing/00-Drawing/copy2fb.py index 3b0761a5a..07dbedd13 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/copy2fb.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/copy2fb.py @@ -2,7 +2,9 @@ # # This example shows how to load and copy an image to framebuffer for testing. -import sensor, image, time +import sensor +import image +import time # Still need to init sensor sensor.reset() diff --git a/scripts/examples/02-Image-Processing/00-Drawing/cross_drawing.py b/scripts/examples/02-Image-Processing/00-Drawing/cross_drawing.py index 8d7e1e5ff..68b422bf4 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/cross_drawing.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/cross_drawing.py @@ -2,7 +2,9 @@ # # This example shows off drawing crosses on the OpenMV Cam. -import sensor, image, time, pyb +import sensor +import time +import pyb sensor.reset() sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... diff --git a/scripts/examples/02-Image-Processing/00-Drawing/ellipse_drawing.py b/scripts/examples/02-Image-Processing/00-Drawing/ellipse_drawing.py index 9080a99d2..d527ab3d0 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/ellipse_drawing.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/ellipse_drawing.py @@ -2,7 +2,9 @@ # # This example shows off drawing ellipses on the OpenMV Cam. -import sensor, image, time, pyb +import sensor +import time +import pyb sensor.reset() sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... diff --git a/scripts/examples/02-Image-Processing/00-Drawing/flood_fill.py b/scripts/examples/02-Image-Processing/00-Drawing/flood_fill.py index 4857ce3ca..49976dc05 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/flood_fill.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/flood_fill.py @@ -2,7 +2,8 @@ # # This example shows off flood filling areas in the image. -import sensor, image, time +import sensor +import time sensor.reset() sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... diff --git a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing.py b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing.py index 5931c394d..091d4ab0f 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing.py @@ -2,7 +2,9 @@ # # This example shows off how to draw images in the frame buffer. -import sensor, image, time, pyb +import sensor +import time +import pyb sensor.reset() sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... diff --git a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_advanced.py b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_advanced.py index 6d3b7b7e0..b4d2fa94e 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_advanced.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_advanced.py @@ -2,7 +2,10 @@ # # Exercise draw image with many different values for testing -import sensor, image, time, pyb +import sensor +import image +import time +import pyb sensor.reset() sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... diff --git a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_blending_test.py b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_blending_test.py index 8cae0107a..a77e9a819 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_blending_test.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_blending_test.py @@ -5,7 +5,9 @@ # area scaling along with color channel extraction, alpha blending, # color palette application, and alpha palette application. -import sensor, image, time +import sensor +import image +import time sensor.reset() sensor.set_pixformat(sensor.RGB565) diff --git a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_blending_with_color_table_test.py b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_blending_with_color_table_test.py index a3f70c0a6..bb27e926c 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_blending_with_color_table_test.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_blending_with_color_table_test.py @@ -5,7 +5,9 @@ # area scaling along with color channel extraction, alpha blending, # color palette application, and alpha palette application. -import sensor, image, time +import sensor +import image +import time sensor.reset() sensor.set_pixformat(sensor.RGB565) diff --git a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_table_test.py b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_table_test.py index 7ca783fe9..58937de34 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_table_test.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_table_test.py @@ -5,7 +5,9 @@ # area scaling along with color channel extraction, alpha blending, # color palette application, and alpha palette application. -import sensor, image, time +import sensor +import image +import time sensor.reset() sensor.set_pixformat(sensor.RGB565) diff --git a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_table_with_color_table_test.py b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_table_with_color_table_test.py index 014a2c1ce..f1e7129de 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_table_with_color_table_test.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_table_with_color_table_test.py @@ -5,7 +5,9 @@ # area scaling along with color channel extraction, alpha blending, # color palette application, and alpha palette application. -import sensor, image, time +import sensor +import image +import time sensor.reset() sensor.set_pixformat(sensor.RGB565) diff --git a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_scale_down_test.py b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_scale_down_test.py index 7b4f9a67c..ed9c10d98 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_scale_down_test.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_scale_down_test.py @@ -7,7 +7,9 @@ # DISABLE THE FRAME BUFFER TO SEE THE REAL FPS -import sensor, image, time +import sensor +import image +import time up_hint = 0 # image.BILINEAR image.BICUBIC down_hint = image.AREA # image.BILINEAR image.BICUBIC image.AREA diff --git a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_scale_up_test.py b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_scale_up_test.py index 92bcf91ee..bc43d230a 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_scale_up_test.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_scale_up_test.py @@ -7,7 +7,9 @@ # DISABLE THE FRAME BUFFER TO SEE THE REAL FPS -import sensor, image, time +import sensor +import image +import time hint = 0 # image.BILINEAR image.BICUBIC diff --git a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_with_custom_palette.py b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_with_custom_palette.py index d379b3342..b9de5099d 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_with_custom_palette.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_with_custom_palette.py @@ -2,7 +2,10 @@ # # This example shows off how to draw images in the frame buffer with a custom generated color palette. -import sensor, image, time, pyb +import sensor +import image +import time +import pyb sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) # or GRAYSCALE... diff --git a/scripts/examples/02-Image-Processing/00-Drawing/keypoints_drawing.py b/scripts/examples/02-Image-Processing/00-Drawing/keypoints_drawing.py index c41a8161d..db2bc8b1e 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/keypoints_drawing.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/keypoints_drawing.py @@ -3,7 +3,9 @@ # This example shows off drawing keypoints on the OpenMV Cam. Usually you call draw_keypoints() # on a keypoints object but you can also call it on a list of 3-value tuples... -import sensor, image, time, pyb +import sensor +import time +import pyb sensor.reset() sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... diff --git a/scripts/examples/02-Image-Processing/00-Drawing/line_drawing.py b/scripts/examples/02-Image-Processing/00-Drawing/line_drawing.py index eb2d761bd..c84ee6c83 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/line_drawing.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/line_drawing.py @@ -2,7 +2,9 @@ # # This example shows off drawing lines on the OpenMV Cam. -import sensor, image, time, pyb +import sensor +import time +import pyb sensor.reset() sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... diff --git a/scripts/examples/02-Image-Processing/00-Drawing/rectangle_drawing.py b/scripts/examples/02-Image-Processing/00-Drawing/rectangle_drawing.py index ab2afac66..ce73fcc60 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/rectangle_drawing.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/rectangle_drawing.py @@ -2,7 +2,9 @@ # # This example shows off drawing rectangles on the OpenMV Cam. -import sensor, image, time, pyb +import sensor +import time +import pyb sensor.reset() sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... diff --git a/scripts/examples/02-Image-Processing/00-Drawing/text_drawing.py b/scripts/examples/02-Image-Processing/00-Drawing/text_drawing.py index da37af656..806f63787 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/text_drawing.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/text_drawing.py @@ -2,7 +2,9 @@ # # This example shows off drawing text on the OpenMV Cam. -import sensor, image, time, pyb +import sensor +import time +import pyb sensor.reset() sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/adaptive_histogram_equalization.py b/scripts/examples/02-Image-Processing/01-Image-Filters/adaptive_histogram_equalization.py index a958b531e..180d30018 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/adaptive_histogram_equalization.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/adaptive_histogram_equalization.py @@ -6,7 +6,8 @@ # the image contrast versus a global histogram equalization. Additionally, # you may specify a clip limit to prevent the contrast from going wild. -import sensor, image, time +import sensor +import time sensor.reset() sensor.set_pixformat(sensor.RGB565) diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/blur_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/blur_filter.py index 6074d2f2b..c5a39d9de 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/blur_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/blur_filter.py @@ -2,7 +2,8 @@ # # This example shows off using the guassian filter to blur images. -import sensor, image, time +import sensor +import time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/cartoon_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/cartoon_filter.py index c1703ca47..b5193ef52 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/cartoon_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/cartoon_filter.py @@ -4,7 +4,8 @@ # filter works by joining similar pixel areas of an image and replacing # the pixels in those areas with the area mean. -import sensor, image, time +import sensor +import time sensor.reset() sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/color_bilateral_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/color_bilateral_filter.py index 1bdbbb7eb..7e860678a 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/color_bilateral_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/color_bilateral_filter.py @@ -2,7 +2,8 @@ # # This example shows off using the bilateral filter on color images. -import sensor, image, time +import sensor +import time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.RGB565) # or sensor.RGB565 diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/color_binary_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/color_binary_filter.py index b8686f7bc..425d1bbc3 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/color_binary_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/color_binary_filter.py @@ -3,7 +3,8 @@ # This script shows off the binary image filter. You may pass binary any # number of thresholds to segment the image by. -import sensor, image, time +import sensor +import time sensor.reset() sensor.set_framesize(sensor.QVGA) diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/color_light_removal.py b/scripts/examples/02-Image-Processing/01-Image-Filters/color_light_removal.py index 270104cfb..3855a6b3a 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/color_light_removal.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/color_light_removal.py @@ -7,7 +7,8 @@ # histeq() on the image without outliers from oversaturated # parts of the image breaking the algorithm... -import sensor, image, time +import sensor +import time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/edge_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/edge_filter.py index cdc03ddf3..111f22471 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/edge_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/edge_filter.py @@ -2,7 +2,8 @@ # # This example shows off using the laplacian filter to detect edges. -import sensor, image, time +import sensor +import time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/erode_and_dilate.py b/scripts/examples/02-Image-Processing/01-Image-Filters/erode_and_dilate.py index 06a6fde68..2b66a50bb 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/erode_and_dilate.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/erode_and_dilate.py @@ -4,7 +4,8 @@ # a binary image to remove noise. This example was originally a test but its # useful for showing off how these functions work. -import pyb, sensor, image +import pyb +import sensor sensor.reset() sensor.set_framesize(sensor.QVGA) diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/gamma_correction.py b/scripts/examples/02-Image-Processing/01-Image-Filters/gamma_correction.py index 2dd1137c5..03c876d91 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/gamma_correction.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/gamma_correction.py @@ -3,7 +3,8 @@ # This example shows off gamma correction to make the image brighter. The gamma # correction method can also fix contrast and brightness too. -import sensor, image, time +import sensor +import time sensor.reset() sensor.set_pixformat(sensor.RGB565) diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_bilateral_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_bilateral_filter.py index 6b3a67b21..88ace3880 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_bilateral_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_bilateral_filter.py @@ -2,7 +2,8 @@ # # This example shows off using the bilateral filter on grayscale images. -import sensor, image, time +import sensor +import time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_binary_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_binary_filter.py index dfaed5012..7e02cbe20 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_binary_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_binary_filter.py @@ -3,7 +3,8 @@ # This script shows off the binary image filter. You may pass binary any # number of thresholds to segment the image by. -import sensor, image, time +import sensor +import time sensor.reset() sensor.set_framesize(sensor.QVGA) diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_light_removal.py b/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_light_removal.py index d42b8a8e9..3748ccd91 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_light_removal.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_light_removal.py @@ -7,7 +7,8 @@ # histeq() on the image without outliers from oversaturated # parts of the image breaking the algorithm... -import sensor, image, time +import sensor +import time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/histogram_equalization.py b/scripts/examples/02-Image-Processing/01-Image-Filters/histogram_equalization.py index 2a3aece50..137a00957 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/histogram_equalization.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/histogram_equalization.py @@ -3,7 +3,8 @@ # This example shows off how to use histogram equalization to improve # the contrast in the image. -import sensor, image, time +import sensor +import time sensor.reset() sensor.set_pixformat(sensor.RGB565) diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/kernel_filters.py b/scripts/examples/02-Image-Processing/01-Image-Filters/kernel_filters.py index 9b9dd565f..508635d85 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/kernel_filters.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/kernel_filters.py @@ -2,7 +2,8 @@ # # This example shows off how to use a generic kernel filter. -import sensor, image, time +import sensor +import time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/lens_correction.py b/scripts/examples/02-Image-Processing/01-Image-Filters/lens_correction.py index 56066f583..91b6dadc1 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/lens_correction.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/lens_correction.py @@ -5,7 +5,8 @@ # detection. Increase the strength below until lines are straight in the view. # Zoom in (higher) or out (lower) until you see enough of the image. -import sensor, image, time +import sensor +import time sensor.reset() sensor.set_pixformat(sensor.RGB565) diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/linear_polar.py b/scripts/examples/02-Image-Processing/01-Image-Filters/linear_polar.py index 98106542f..cd481617f 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/linear_polar.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/linear_polar.py @@ -5,7 +5,8 @@ # become translations in the X direction and linear changes # in scale become linear translations in the Y direction. -import sensor, image, time +import sensor +import time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/log_polar.py b/scripts/examples/02-Image-Processing/01-Image-Filters/log_polar.py index d79f374b9..99479b3e1 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/log_polar.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/log_polar.py @@ -5,7 +5,8 @@ # become translations in the X direction and exponential changes # in scale (x2, x4, etc.) become linear translations in the Y direction. -import sensor, image, time +import sensor +import time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/mean_adaptive_threshold_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/mean_adaptive_threshold_filter.py index 2d140ecc4..b3435d1bd 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/mean_adaptive_threshold_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/mean_adaptive_threshold_filter.py @@ -4,7 +4,8 @@ # When mean(threshold=True) the mean() method adaptive thresholds the image # by comparing the mean of the pixels around a pixel, minus an offset, with that pixel. -import sensor, image, time +import sensor +import time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/mean_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/mean_filter.py index c6de0c81b..132a594d2 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/mean_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/mean_filter.py @@ -4,7 +4,8 @@ # filter in a NxN neighborhood. Mean filtering removes noise in the image by # bluring everything. But, it's the fastest kernel filter operation. -import sensor, image, time +import sensor +import time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/median_adaptive_threshold_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/median_adaptive_threshold_filter.py index 673b28482..a81b3b9ac 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/median_adaptive_threshold_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/median_adaptive_threshold_filter.py @@ -4,7 +4,8 @@ # When median(threshold=True) the median() method adaptive thresholds the image # by comparing the median of the pixels around a pixel, minus an offset, with that pixel. -import sensor, image, time +import sensor +import time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/median_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/median_filter.py index 441464a36..5a2193194 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/median_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/median_filter.py @@ -4,7 +4,8 @@ # with the median value of it's NxN neighborhood. Median filtering is good for # removing noise in the image while preserving edges. -import sensor, image, time +import sensor +import time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/midpoint_adaptive_threshold_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/midpoint_adaptive_threshold_filter.py index adaeaaa5d..39549205a 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/midpoint_adaptive_threshold_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/midpoint_adaptive_threshold_filter.py @@ -4,7 +4,8 @@ # When midpoint(threshold=True) the midpoint() method adaptive thresholds the image # by comparing the midpoint of the pixels around a pixel, minus an offset, with that pixel. -import sensor, image, time +import sensor +import time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/midpoint_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/midpoint_filter.py index ee9ab5d94..340f47cad 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/midpoint_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/midpoint_filter.py @@ -3,7 +3,8 @@ # This example shows off midpoint filtering. Midpoint filtering replaces each # pixel by the average of the min and max pixel values for a NxN neighborhood. -import sensor, image, time +import sensor +import time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/mode_adaptive_threshold_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/mode_adaptive_threshold_filter.py index 8ab9a0675..29ae76b52 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/mode_adaptive_threshold_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/mode_adaptive_threshold_filter.py @@ -5,7 +5,8 @@ # by comparing the mode of the pixels around a pixel, minus an offset, with that pixel. # Avoid using the mode filter on RGB565 images. It will cause artifacts on image edges... -import sensor, image, time +import sensor +import time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/mode_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/mode_filter.py index 170937c58..cc0135bf7 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/mode_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/mode_filter.py @@ -5,7 +5,8 @@ # of pixels around it. Avoid using the mode filter on RGB565 images. It will # cause artifacts on image edges... -import sensor, image, time +import sensor +import time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/negative.py b/scripts/examples/02-Image-Processing/01-Image-Filters/negative.py index 36186cba9..ff89a26ef 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/negative.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/negative.py @@ -3,7 +3,8 @@ # This example shows off negating the image. This is not a particularly # useful method but it can come in handy once in a while. -import sensor, image, time +import sensor +import time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/perspective_and_rotation_correction.py b/scripts/examples/02-Image-Processing/01-Image-Filters/perspective_and_rotation_correction.py index b89117d40..77c3f8df6 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/perspective_and_rotation_correction.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/perspective_and_rotation_correction.py @@ -4,7 +4,8 @@ # perspective distortion and then to rotate the new corrected image in 3D # space aftwards to handle movement. -import sensor, image, time +import sensor +import time sensor.reset() sensor.set_pixformat(sensor.RGB565) diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/perspective_correction.py b/scripts/examples/02-Image-Processing/01-Image-Filters/perspective_correction.py index a8f2aa58d..ba8ec3849 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/perspective_correction.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/perspective_correction.py @@ -3,7 +3,8 @@ # This example shows off how to use the rotation_corr() to fix perspective # issues related to how your OpenMV Cam is mounted. -import sensor, image, time +import sensor +import time sensor.reset() sensor.set_pixformat(sensor.RGB565) diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/rotation_correction.py b/scripts/examples/02-Image-Processing/01-Image-Filters/rotation_correction.py index b95e41d78..0eef3421c 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/rotation_correction.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/rotation_correction.py @@ -3,7 +3,8 @@ # This example shows off how to use the rotation_corr() to play with the scene # window your OpenMV Cam sees. -import sensor, image, time +import sensor +import time # Degrees per frame to rotation by... X_ROTATION_DEGREE_RATE = 5 diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/sharpen_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/sharpen_filter.py index 0f541e203..e6165c431 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/sharpen_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/sharpen_filter.py @@ -2,7 +2,8 @@ # # This example shows off using the laplacian filter to sharpen images. -import sensor, image, time +import sensor +import time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/ulab.py b/scripts/examples/02-Image-Processing/01-Image-Filters/ulab.py index b7cfa4927..f16c68798 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/ulab.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/ulab.py @@ -4,7 +4,8 @@ # NOTE: ndarrays cause the heap to be fragmented easily. If you run out of memory, # there's not much that can be done about it, lowering the resolution might help. -import sensor, image, time +import sensor +import time from ulab import numpy as np sensor.reset() # Reset and initialize the sensor. diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/unsharp_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/unsharp_filter.py index eb8eb2270..89bd423ee 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/unsharp_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/unsharp_filter.py @@ -2,7 +2,8 @@ # # This example shows off using the guassian filter to unsharp mask filter images. -import sensor, image, time +import sensor +import time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/vflip_hmirror_transpose.py b/scripts/examples/02-Image-Processing/01-Image-Filters/vflip_hmirror_transpose.py index bf43c4e73..3d8b225f8 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/vflip_hmirror_transpose.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/vflip_hmirror_transpose.py @@ -8,7 +8,9 @@ # vflip=True, hmirror=True, transpose=False -> 180 degree rotation # vflip=False, hmirror=True, transpose=True -> 270 degree rotation -import sensor, image, time, pyb +import sensor +import time +import pyb sensor.reset() sensor.set_pixformat(sensor.RGB565) diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/automatic_grayscale_color_tracking.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/automatic_grayscale_color_tracking.py index 51f8a0a91..e52bde80c 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/automatic_grayscale_color_tracking.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/automatic_grayscale_color_tracking.py @@ -2,7 +2,8 @@ # # This example shows off single color automatic grayscale color tracking using the OpenMV Cam. -import sensor, image, time +import sensor +import time print("Letting auto algorithms run. Don't put anything in front of the camera!") sensor.reset() diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/automatic_rgb565_color_tracking.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/automatic_rgb565_color_tracking.py index 50a3f4e6e..af46fc50e 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/automatic_rgb565_color_tracking.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/automatic_rgb565_color_tracking.py @@ -2,7 +2,8 @@ # # This example shows off single color automatic RGB565 color tracking using the OpenMV Cam. -import sensor, image, time +import sensor +import time print("Letting auto algorithms run. Don't put anything in front of the camera!") sensor.reset() diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/black_grayscale_line_following.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/black_grayscale_line_following.py index 6e0674b63..706340e40 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/black_grayscale_line_following.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/black_grayscale_line_following.py @@ -10,7 +10,9 @@ # 45 or so degree angle. Please make sure that only the line is within the # camera's field of view. -import sensor, image, time, math +import sensor +import time +import math # Tracks a black line. Use [(128, 255)] for a tracking a white line. GRAYSCALE_THRESHOLD = [(0, 64)] diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/image_histogram_info.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/image_histogram_info.py index 04b386b3e..ac669b70c 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/image_histogram_info.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/image_histogram_info.py @@ -2,7 +2,8 @@ # # This script computes the histogram of the image and prints it out. -import sensor, image, time +import sensor +import time sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) # or RGB565. diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/image_statistics_info.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/image_statistics_info.py index 04f306109..93c9459ab 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/image_statistics_info.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/image_statistics_info.py @@ -2,7 +2,8 @@ # # This script computes the statistics of the image and prints it out. -import sensor, image, time +import sensor +import time sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) # or RGB565. diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/ir_beacon_grayscale_tracking.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/ir_beacon_grayscale_tracking.py index 0a4d9e0b7..4f95a2eb5 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/ir_beacon_grayscale_tracking.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/ir_beacon_grayscale_tracking.py @@ -2,7 +2,8 @@ # # This example shows off IR beacon Grayscale tracking using the OpenMV Cam. -import sensor, image, time +import sensor +import time thresholds = (255, 255) # thresholds for bright white light from IR. diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/ir_beacon_rgb565_tracking.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/ir_beacon_rgb565_tracking.py index 033908131..75d668750 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/ir_beacon_rgb565_tracking.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/ir_beacon_rgb565_tracking.py @@ -2,7 +2,8 @@ # # This example shows off IR beacon RGB565 tracking using the OpenMV Cam. -import sensor, image, time +import sensor +import time thresholds = (100, 100, 0, 0, 0, 0) # thresholds for bright white light from IR. diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/multi_color_blob_tracking.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/multi_color_blob_tracking.py index bc94d257d..f14cd83ac 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/multi_color_blob_tracking.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/multi_color_blob_tracking.py @@ -2,7 +2,9 @@ # # This example shows off multi color blob tracking using the OpenMV Cam. -import sensor, image, time, math +import sensor +import time +import math # Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) # The below thresholds track in general red/green things. You may wish to tune them... diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/multi_color_code_tracking.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/multi_color_code_tracking.py index 917fb5057..06125db6f 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/multi_color_code_tracking.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/multi_color_code_tracking.py @@ -5,7 +5,8 @@ # A color code is a blob composed of two or more colors. The example below will # only track colored objects which have two or more the colors below in them. -import sensor, image, time +import sensor +import time # Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) # The below thresholds track in general red/green things. You may wish to tune them... diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_code_tracking.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_code_tracking.py index ed8fa651e..72a15fe8e 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_code_tracking.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_code_tracking.py @@ -5,7 +5,9 @@ # A color code is a blob composed of two or more colors. The example below will # only track colored objects which have both the colors below in them. -import sensor, image, time, math +import sensor +import time +import math # Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) # The below thresholds track in general red/green things. You may wish to tune them... diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_grayscale_blob_tracking.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_grayscale_blob_tracking.py index 8f8b44d3b..ec5d907b4 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_grayscale_blob_tracking.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_grayscale_blob_tracking.py @@ -2,7 +2,9 @@ # # This example shows off single color grayscale tracking using the OpenMV Cam. -import sensor, image, time, math +import sensor +import time +import math # Color Tracking Thresholds (Grayscale Min, Grayscale Max) # The below grayscale threshold is set to only find extremely bright white areas. diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_rgb565_blob_tracking.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_rgb565_blob_tracking.py index e7dc5bec1..cf9843b13 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_rgb565_blob_tracking.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_rgb565_blob_tracking.py @@ -2,7 +2,9 @@ # # This example shows off single color RGB565 tracking using the OpenMV Cam. -import sensor, image, time, math +import sensor +import time +import math threshold_index = 0 # 0 for red, 1 for green, 2 for blue diff --git a/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_advanced_frame_differencing.py b/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_advanced_frame_differencing.py index d130151a9..fde8e5785 100644 --- a/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_advanced_frame_differencing.py +++ b/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_advanced_frame_differencing.py @@ -4,7 +4,10 @@ # example is advanced because it preforms a background update to deal with the # backgound image changing overtime. -import sensor, image, pyb, os, time +import sensor +import pyb +import os +import time TRIGGER_THRESHOLD = 5 diff --git a/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_basic_frame_differencing.py b/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_basic_frame_differencing.py index cbe0daed2..2b1d438ab 100644 --- a/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_basic_frame_differencing.py +++ b/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_basic_frame_differencing.py @@ -4,7 +4,10 @@ # called basic frame differencing because there's no background image update. # So, as time passes the background image may change resulting in issues. -import sensor, image, pyb, os, time +import sensor +import pyb +import os +import time TRIGGER_THRESHOLD = 5 diff --git a/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_shadow_removal.py b/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_shadow_removal.py index b1f65895f..1f1bc7f9f 100644 --- a/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_shadow_removal.py +++ b/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_shadow_removal.py @@ -3,7 +3,10 @@ # This example demonstrates using frame differencing with your OpenMV Cam using # shadow removal to help reduce the affects of cast shadows in your scene. -import sensor, image, pyb, os, time +import sensor +import pyb +import os +import time TRIGGER_THRESHOLD = 5 diff --git a/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_structural_similarity.py b/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_structural_similarity.py index 18762ff81..e80342b71 100644 --- a/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_structural_similarity.py +++ b/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_structural_similarity.py @@ -5,7 +5,10 @@ # 8x8 blocks of pixels between two images to determine a similarity # score between two images. -import sensor, image, pyb, os, time +import sensor +import pyb +import os +import time # The image has likely changed if the sim.min() is lower than this. MIN_TRIGGER_THRESHOLD = -0.4 diff --git a/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_advanced_frame_differencing.py b/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_advanced_frame_differencing.py index 503643d13..9725b7abd 100644 --- a/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_advanced_frame_differencing.py +++ b/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_advanced_frame_differencing.py @@ -6,7 +6,10 @@ # example is advanced because it preforms a background update to deal with the # backgound image changing overtime. -import sensor, image, pyb, os, time +import sensor +import pyb +import os +import time TRIGGER_THRESHOLD = 5 diff --git a/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_basic_frame_differencing.py b/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_basic_frame_differencing.py index 2b0b775e9..93b937b0f 100644 --- a/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_basic_frame_differencing.py +++ b/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_basic_frame_differencing.py @@ -6,7 +6,10 @@ # called basic frame differencing because there's no background image update. # So, as time passes the background image may change resulting in issues. -import sensor, image, pyb, os, time +import sensor +import pyb +import os +import time TRIGGER_THRESHOLD = 5 diff --git a/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_shadow_removal.py b/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_shadow_removal.py index a48677ae8..b6062f851 100644 --- a/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_shadow_removal.py +++ b/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_shadow_removal.py @@ -5,7 +5,10 @@ # This example demonstrates using frame differencing with your OpenMV Cam using # shadow removal to help reduce the affects of cast shadows in your scene. -import sensor, image, pyb, os, time +import sensor +import pyb +import os +import time TRIGGER_THRESHOLD = 5 diff --git a/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_structural_similarity.py b/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_structural_similarity.py index 4a1e73706..151d43c24 100644 --- a/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_structural_similarity.py +++ b/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_structural_similarity.py @@ -7,7 +7,10 @@ # 8x8 blocks of pixels between two images to determine a similarity # score between two images. -import sensor, image, pyb, os, time +import sensor +import pyb +import os +import time # The image has likely changed if the sim.min() is lower than this. MIN_TRIGGER_THRESHOLD = -0.4 diff --git a/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_image_classification.py b/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_image_classification.py index a29379c91..6c3a557f4 100644 --- a/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_image_classification.py +++ b/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_image_classification.py @@ -18,7 +18,10 @@ # of activations. Note that use a CNN with a sliding window is extremely compute # expensive so for an exhaustive search do not expect the CNN to be real-time. -import sensor, image, time, os, tf +import sensor +import time +import os +import tf sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) diff --git a/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_object_detection.py b/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_object_detection.py index cf0625655..571ffe71e 100644 --- a/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_object_detection.py +++ b/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_object_detection.py @@ -2,7 +2,10 @@ # # This examples uses the builtin FOMO model to detect faces. -import sensor, image, time, tf, math +import sensor +import time +import tf +import math sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) diff --git a/scripts/examples/03-Machine-Learning/01-ST-CubeAI/nn_stm32cubeai.py b/scripts/examples/03-Machine-Learning/01-ST-CubeAI/nn_stm32cubeai.py index 542aaf458..4b53e8d50 100644 --- a/scripts/examples/03-Machine-Learning/01-ST-CubeAI/nn_stm32cubeai.py +++ b/scripts/examples/03-Machine-Learning/01-ST-CubeAI/nn_stm32cubeai.py @@ -1,7 +1,9 @@ # STM32 CUBE.AI on OpenMV MNIST Example # See https://github.com/openmv/openmv/blob/master/src/stm32cubeai/README.MD -import sensor, image, time, nn_st +import sensor +import time +import nn_st sensor.reset() # Reset and initialize the sensor. sensor.set_contrast(3) diff --git a/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_detection.py b/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_detection.py index aca735d33..12da0291a 100644 --- a/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_detection.py +++ b/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_detection.py @@ -11,7 +11,9 @@ # contrast check in constant time (the reason for feature detection being # grayscale only is because of the space requirment for the integral image). -import sensor, time, image +import sensor +import time +import image # Reset sensor sensor.reset() diff --git a/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_eye_detection.py b/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_eye_detection.py index 13e5ab454..a166bede1 100644 --- a/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_eye_detection.py +++ b/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_eye_detection.py @@ -4,7 +4,9 @@ # the eyes within the face. If you want to determine the eye gaze please see the # iris_detection script for an example on how to do that. -import sensor, time, image +import sensor +import time +import image # Reset sensor sensor.reset() diff --git a/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_recognition.py b/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_recognition.py index 8a514664a..a68ccfc41 100644 --- a/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_recognition.py +++ b/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_recognition.py @@ -7,7 +7,9 @@ # # NOTE: This is just a PoC implementation of the paper mentioned above, it does Not work well in real life conditions. -import sensor, time, image +import sensor +import time +import image SUB = "s2" NUM_SUBJECTS = 5 diff --git a/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_tracking.py b/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_tracking.py index f4571e872..7da30278f 100644 --- a/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_tracking.py +++ b/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_tracking.py @@ -5,7 +5,9 @@ # script finds a face in the image using the frontalface Haar Cascade. # After which the script uses the keypoints feature to automatically learn your # face and track it. Keypoints can be used to automatically track anything. -import sensor, time, image +import sensor +import time +import image # Reset sensor sensor.reset() diff --git a/scripts/examples/03-Machine-Learning/02-Haar-Cascade/iris_detection.py b/scripts/examples/03-Machine-Learning/02-Haar-Cascade/iris_detection.py index 9d51498c3..6fc0e263f 100644 --- a/scripts/examples/03-Machine-Learning/02-Haar-Cascade/iris_detection.py +++ b/scripts/examples/03-Machine-Learning/02-Haar-Cascade/iris_detection.py @@ -7,7 +7,9 @@ # # Note: This script does not detect a face first, use it with the telephoto lens. -import sensor, time, image +import sensor +import time +import image # Reset sensor sensor.reset() diff --git a/scripts/examples/04-Barcodes/find_barcodes.py b/scripts/examples/04-Barcodes/find_barcodes.py index 07dee507c..775034cb3 100644 --- a/scripts/examples/04-Barcodes/find_barcodes.py +++ b/scripts/examples/04-Barcodes/find_barcodes.py @@ -3,7 +3,10 @@ # This example shows off how easy it is to detect bar codes using the # OpenMV Cam M7. Barcode detection does not work on the M4 Camera. -import sensor, image, time, math +import sensor +import image +import time +import math sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) diff --git a/scripts/examples/04-Barcodes/find_datamatrices.py b/scripts/examples/04-Barcodes/find_datamatrices.py index 95d0fe38a..a8df0caa9 100644 --- a/scripts/examples/04-Barcodes/find_datamatrices.py +++ b/scripts/examples/04-Barcodes/find_datamatrices.py @@ -3,7 +3,9 @@ # This example shows off how easy it is to detect data matrices using the # OpenMV Cam M7. Data matrices detection does not work on the M4 Camera. -import sensor, image, time, math +import sensor +import time +import math sensor.reset() sensor.set_pixformat(sensor.RGB565) diff --git a/scripts/examples/04-Barcodes/find_datamatrices_w_lens_zoom.py b/scripts/examples/04-Barcodes/find_datamatrices_w_lens_zoom.py index 889326df1..751cad8bd 100644 --- a/scripts/examples/04-Barcodes/find_datamatrices_w_lens_zoom.py +++ b/scripts/examples/04-Barcodes/find_datamatrices_w_lens_zoom.py @@ -3,7 +3,9 @@ # This example shows off how easy it is to detect data matrices using the # OpenMV Cam M7. Data matrices detection does not work on the M4 Camera. -import sensor, image, time, math +import sensor +import time +import math sensor.reset() sensor.set_pixformat(sensor.RGB565) diff --git a/scripts/examples/04-Barcodes/qrcodes_with_lens_corr.py b/scripts/examples/04-Barcodes/qrcodes_with_lens_corr.py index 06b839f49..cecd345a3 100644 --- a/scripts/examples/04-Barcodes/qrcodes_with_lens_corr.py +++ b/scripts/examples/04-Barcodes/qrcodes_with_lens_corr.py @@ -3,7 +3,8 @@ # This example shows the power of the OpenMV Cam to detect QR Codes # using lens correction (see the qrcodes_with_lens_corr.py script for higher performance). -import sensor, image, time +import sensor +import time sensor.reset() sensor.set_pixformat(sensor.RGB565) diff --git a/scripts/examples/04-Barcodes/qrcodes_with_lens_zoom.py b/scripts/examples/04-Barcodes/qrcodes_with_lens_zoom.py index c9798a4bf..5a39d3dbe 100644 --- a/scripts/examples/04-Barcodes/qrcodes_with_lens_zoom.py +++ b/scripts/examples/04-Barcodes/qrcodes_with_lens_zoom.py @@ -3,7 +3,8 @@ # This example shows the power of the OpenMV Cam to detect QR Codes # without needing lens correction. -import sensor, image, time +import sensor +import time sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) diff --git a/scripts/examples/05-Feature-Detection/edges.py b/scripts/examples/05-Feature-Detection/edges.py index 166820b72..d84fa7b2f 100644 --- a/scripts/examples/05-Feature-Detection/edges.py +++ b/scripts/examples/05-Feature-Detection/edges.py @@ -1,7 +1,9 @@ # Edge detection with Canny: # # This example demonstrates the Canny edge detector. -import sensor, image, time +import sensor +import image +import time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 diff --git a/scripts/examples/05-Feature-Detection/find_circles.py b/scripts/examples/05-Feature-Detection/find_circles.py index a208b4a5b..445766fde 100644 --- a/scripts/examples/05-Feature-Detection/find_circles.py +++ b/scripts/examples/05-Feature-Detection/find_circles.py @@ -6,7 +6,8 @@ # Note that the find_circles() method will only find circles which are completely # inside of the image. Circles which go outside of the image/roi are ignored... -import sensor, image, time +import sensor +import time sensor.reset() sensor.set_pixformat(sensor.RGB565) # grayscale is faster diff --git a/scripts/examples/05-Feature-Detection/find_line_segments.py b/scripts/examples/05-Feature-Detection/find_line_segments.py index 4aa42cf17..dd86a10f3 100644 --- a/scripts/examples/05-Feature-Detection/find_line_segments.py +++ b/scripts/examples/05-Feature-Detection/find_line_segments.py @@ -8,7 +8,8 @@ enable_lens_corr = False # turn on for straighter lines... -import sensor, image, time +import sensor +import time sensor.reset() sensor.set_pixformat(sensor.RGB565) # grayscale is faster diff --git a/scripts/examples/05-Feature-Detection/find_lines.py b/scripts/examples/05-Feature-Detection/find_lines.py index 6c45fcfbc..6675ec081 100644 --- a/scripts/examples/05-Feature-Detection/find_lines.py +++ b/scripts/examples/05-Feature-Detection/find_lines.py @@ -11,7 +11,8 @@ enable_lens_corr = False # turn on for straighter lines... -import sensor, image, time +import sensor +import time sensor.reset() sensor.set_pixformat(sensor.RGB565) # grayscale is faster diff --git a/scripts/examples/05-Feature-Detection/find_rects.py b/scripts/examples/05-Feature-Detection/find_rects.py index 5fafba626..d56f37558 100644 --- a/scripts/examples/05-Feature-Detection/find_rects.py +++ b/scripts/examples/05-Feature-Detection/find_rects.py @@ -7,7 +7,8 @@ # distortion causes those rectangles to look bent. Rounded rectangles are no problem! # (But, given this the code will also detect small radius circles too)... -import sensor, image, time +import sensor +import time sensor.reset() sensor.set_pixformat(sensor.RGB565) # grayscale is faster (160x120 max on OpenMV-M7) diff --git a/scripts/examples/05-Feature-Detection/hog.py b/scripts/examples/05-Feature-Detection/hog.py index 7bc17446b..5a9e6a300 100644 --- a/scripts/examples/05-Feature-Detection/hog.py +++ b/scripts/examples/05-Feature-Detection/hog.py @@ -5,7 +5,8 @@ # Note: Due to JPEG artifacts, the HoG visualization looks blurry. To see the # image without JPEG artifacts, uncomment the lines that save the image to uSD. -import sensor, image, time +import sensor +import time sensor.reset() # Set sensor settings diff --git a/scripts/examples/05-Feature-Detection/keypoints.py b/scripts/examples/05-Feature-Detection/keypoints.py index 0c4316a4d..f2a2f286f 100644 --- a/scripts/examples/05-Feature-Detection/keypoints.py +++ b/scripts/examples/05-Feature-Detection/keypoints.py @@ -2,7 +2,9 @@ # Show the camera an object and then run the script. A set of keypoints will be extracted # once and then tracked in the following frames. If you want a new set of keypoints re-run # the script. NOTE: see the docs for arguments to tune find_keypoints and match_keypoints. -import sensor, time, image +import sensor +import time +import image # Reset sensor sensor.reset() diff --git a/scripts/examples/05-Feature-Detection/keypoints_save.py b/scripts/examples/05-Feature-Detection/keypoints_save.py index bcd1a2edf..6230ab0de 100644 --- a/scripts/examples/05-Feature-Detection/keypoints_save.py +++ b/scripts/examples/05-Feature-Detection/keypoints_save.py @@ -4,7 +4,9 @@ # You can use the keypoints_editor.py util to remove unwanted keypoints. # # NOTE: Please reset the camera after running this script to see the new file. -import sensor, time, image +import sensor +import time +import image # Reset sensor sensor.reset() diff --git a/scripts/examples/05-Feature-Detection/lbp.py b/scripts/examples/05-Feature-Detection/lbp.py index b54838313..72e5ea19d 100644 --- a/scripts/examples/05-Feature-Detection/lbp.py +++ b/scripts/examples/05-Feature-Detection/lbp.py @@ -7,7 +7,9 @@ # a lot of work to be made into somethin useful. This script will reamin to show # that the functionality exists, but, in its current state is inadequate. -import sensor, time, image +import sensor +import time +import image sensor.reset() # Reset sensor diff --git a/scripts/examples/05-Feature-Detection/linear_regression_fast.py b/scripts/examples/05-Feature-Detection/linear_regression_fast.py index f200e4ace..b34c7de7c 100644 --- a/scripts/examples/05-Feature-Detection/linear_regression_fast.py +++ b/scripts/examples/05-Feature-Detection/linear_regression_fast.py @@ -14,7 +14,8 @@ THRESHOLD = (0, 100) # Grayscale threshold for dark things... BINARY_VISIBLE = True # Does binary first so you can see what the linear regression # is being run on... might lower FPS though. -import sensor, image, time +import sensor +import time sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) diff --git a/scripts/examples/05-Feature-Detection/linear_regression_robust.py b/scripts/examples/05-Feature-Detection/linear_regression_robust.py index 9f24c618d..8764c01aa 100644 --- a/scripts/examples/05-Feature-Detection/linear_regression_robust.py +++ b/scripts/examples/05-Feature-Detection/linear_regression_robust.py @@ -16,7 +16,8 @@ THRESHOLD = (0, 100) # Grayscale threshold for dark things... BINARY_VISIBLE = True # Does binary first so you can see what the linear regression # is being run on... might lower FPS though. -import sensor, image, time +import sensor +import time sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) diff --git a/scripts/examples/05-Feature-Detection/selective_search.py b/scripts/examples/05-Feature-Detection/selective_search.py index c44c931e9..c4b168a77 100644 --- a/scripts/examples/05-Feature-Detection/selective_search.py +++ b/scripts/examples/05-Feature-Detection/selective_search.py @@ -1,6 +1,7 @@ # Selective Search Example -import sensor, image, time +import sensor +import time from random import randint sensor.reset() # Reset and initialize the sensor. diff --git a/scripts/examples/05-Feature-Detection/template_matching.py b/scripts/examples/05-Feature-Detection/template_matching.py index 79d9c66a7..a18e80236 100644 --- a/scripts/examples/05-Feature-Detection/template_matching.py +++ b/scripts/examples/05-Feature-Detection/template_matching.py @@ -8,7 +8,9 @@ # a lot of work to be made into somethin useful. This script will reamin to show # that the functionality exists, but, in its current state is inadequate. -import time, sensor, image +import time +import sensor +import image from image import SEARCH_EX, SEARCH_DS # Reset sensor diff --git a/scripts/examples/06-April-Tags/find_apriltags.py b/scripts/examples/06-April-Tags/find_apriltags.py index 02409b0f0..8b3de069a 100644 --- a/scripts/examples/06-April-Tags/find_apriltags.py +++ b/scripts/examples/06-April-Tags/find_apriltags.py @@ -3,7 +3,10 @@ # This example shows the power of the OpenMV Cam to detect April Tags # on the OpenMV Cam M7. The M4 versions cannot detect April Tags. -import sensor, image, time, math +import sensor +import image +import time +import math sensor.reset() sensor.set_pixformat(sensor.RGB565) diff --git a/scripts/examples/06-April-Tags/find_apriltags_3d_pose.py b/scripts/examples/06-April-Tags/find_apriltags_3d_pose.py index 64e763253..64cbd6955 100644 --- a/scripts/examples/06-April-Tags/find_apriltags_3d_pose.py +++ b/scripts/examples/06-April-Tags/find_apriltags_3d_pose.py @@ -3,7 +3,9 @@ # This example shows the power of the OpenMV Cam to detect April Tags # on the OpenMV Cam M7. The M4 versions cannot detect April Tags. -import sensor, image, time, math +import sensor +import time +import math sensor.reset() sensor.set_pixformat(sensor.RGB565) diff --git a/scripts/examples/06-April-Tags/find_apriltags_max_res.py b/scripts/examples/06-April-Tags/find_apriltags_max_res.py index 82f05427c..b28b1889c 100644 --- a/scripts/examples/06-April-Tags/find_apriltags_max_res.py +++ b/scripts/examples/06-April-Tags/find_apriltags_max_res.py @@ -3,7 +3,11 @@ # This example shows the power of the OpenMV Cam to detect April Tags # on the OpenMV Cam M7. The M4 versions cannot detect April Tags. -import sensor, image, time, math, omv +import sensor +import image +import time +import math +import omv sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) diff --git a/scripts/examples/06-April-Tags/find_apriltags_w_lens_zoom.py b/scripts/examples/06-April-Tags/find_apriltags_w_lens_zoom.py index 3778ccb68..b1dfbd0c9 100644 --- a/scripts/examples/06-April-Tags/find_apriltags_w_lens_zoom.py +++ b/scripts/examples/06-April-Tags/find_apriltags_w_lens_zoom.py @@ -3,7 +3,9 @@ # This example shows the power of the OpenMV Cam to detect April Tags # on the OpenMV Cam M7. The M4 versions cannot detect April Tags. -import sensor, image, time, math +import sensor +import time +import math sensor.reset() sensor.set_pixformat(sensor.RGB565) diff --git a/scripts/examples/06-April-Tags/find_small_apriltags.py b/scripts/examples/06-April-Tags/find_small_apriltags.py index cbd54bf9e..b2709c170 100644 --- a/scripts/examples/06-April-Tags/find_small_apriltags.py +++ b/scripts/examples/06-April-Tags/find_small_apriltags.py @@ -9,7 +9,10 @@ # pass the thresholding test... otherwise, you don't get a distance # benefit. -import sensor, image, time, math, omv +import sensor +import image +import time +import omv # Set the thresholds to find a white object (i.e. tag border) thresholds = (150, 255) diff --git a/scripts/examples/07-Interface-Library/00-Arduino/arduino_i2c_slave.py b/scripts/examples/07-Interface-Library/00-Arduino/arduino_i2c_slave.py index 41aa5e09d..fd633e7d3 100644 --- a/scripts/examples/07-Interface-Library/00-Arduino/arduino_i2c_slave.py +++ b/scripts/examples/07-Interface-Library/00-Arduino/arduino_i2c_slave.py @@ -6,7 +6,8 @@ # OpenMV Cam Master I2C Clock (P4) - Arduino Uno Clock (A5) # OpenMV Cam Ground - Arduino Ground -import pyb, ustruct +import pyb +import ustruct text = "Hello World!\n" data = ustruct.pack("<%ds" % len(text), text) diff --git a/scripts/examples/07-Interface-Library/00-Arduino/arduino_spi_slave.py b/scripts/examples/07-Interface-Library/00-Arduino/arduino_spi_slave.py index 7b3131c2f..857187731 100644 --- a/scripts/examples/07-Interface-Library/00-Arduino/arduino_spi_slave.py +++ b/scripts/examples/07-Interface-Library/00-Arduino/arduino_spi_slave.py @@ -8,7 +8,9 @@ # OpenMV Cam Slave Select (P3) - Arduino Uno SS (10) # OpenMV Cam Ground - Arduino Ground -import pyb, ustruct, time +import pyb +import ustruct +import time text = "Hello World!\n" data = ustruct.pack(" sudo ifconfig eth0 192.168.1.100 up # $> ping 192.168.1.102 -import network, time +import network +import time lan = network.LAN() lan.active(True) diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/deep_sleep.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/deep_sleep.py index 5dd595604..4fb26c7b2 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/deep_sleep.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/deep_sleep.py @@ -2,7 +2,9 @@ # This example demonstrates the low-power deep sleep mode plus sensor shutdown. # Note the camera will reset after wake-up from deep sleep. To find out if the cause of reset # is deep sleep, call the machine.reset_cause() function and test for machine.DEEPSLEEP_RESET -import pyb, machine, sensor +import pyb +import machine +import sensor # Create and init RTC object. rtc = pyb.RTC() diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/extint_wakeup.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/extint_wakeup.py index 7ce436696..a80f5ef4f 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/extint_wakeup.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/extint_wakeup.py @@ -1,7 +1,9 @@ # ExtInt Wake-Up from Stop Mode Example # This example demonstrates using external interrupts to wake up from low-power mode. -import time, pyb, machine +import time +import pyb +import machine from pyb import Pin, ExtInt def callback(line): diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/himax_wakeup_on_motion_detection.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/himax_wakeup_on_motion_detection.py index 9aa5f504e..45f3b7ab3 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/himax_wakeup_on_motion_detection.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/himax_wakeup_on_motion_detection.py @@ -1,7 +1,10 @@ # This examples shows how to use the Himax Motion Detection feature # to wake up from low-power Stop Mode on motion detection interrupts. -import sensor, image, time, pyb, machine +import sensor +import time +import pyb +import machine from pyb import Pin, ExtInt sensor.reset() diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/sensor_sleep.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/sensor_sleep.py index feaa0c633..33185f097 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/sensor_sleep.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/sensor_sleep.py @@ -2,7 +2,8 @@ # This example demonstrates the sensor sleep mode. The sleep mode saves around # 40mA when enabled and it's automatically cleared when calling sensor reset(). -import sensor, image, time +import sensor +import time sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/stop_mode.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/stop_mode.py index 7c3c4abdc..e51b36b60 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/stop_mode.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/stop_mode.py @@ -1,7 +1,9 @@ # Stop Mode Example # This example demonstrates using the low-power Stop Mode. -import time, pyb, machine +import time +import pyb +import machine # Create and init RTC object. rtc = pyb.RTC() From 27404d6bd21ac6124077732ecee32488ead1e043 Mon Sep 17 00:00:00 2001 From: iabdalkader Date: Wed, 5 Jul 2023 18:51:33 +0200 Subject: [PATCH 2/3] examples: Format scripts. --- scripts/examples/00-HelloWorld/helloworld.py | 20 +- .../01-Camera/00-Snapshot/emboss_snapshot.py | 16 +- .../01-Camera/00-Snapshot/snapshot.py | 12 +- .../00-Snapshot/snapshot_on_face_detection.py | 19 +- .../00-Snapshot/snapshot_on_movement.py | 26 +- .../00-Snapshot/time_lapse_photos.py | 90 +++++-- .../01-Camera/01-Video-Recording/gif.py | 14 +- .../gif_on_face_detection.py | 21 +- .../01-Video-Recording/gif_on_movement.py | 28 +- .../01-Video-Recording/imageio_memory.py | 10 +- .../01-Video-Recording/imageio_read.py | 20 +- .../01-Video-Recording/imageio_write.py | 12 +- .../01-Camera/01-Video-Recording/mjpeg.py | 12 +- .../mjpeg_on_face_detection.py | 19 +- .../01-Video-Recording/mjpeg_on_movement.py | 26 +- .../absolute-rotation-scale.py | 48 ++-- .../02-Optical-Flow/absolute-translation.py | 42 +-- .../differential-rotation-scale.py | 50 ++-- .../differential-translation.py | 42 +-- .../image-patches-absolute-rotation-scale.py | 76 ++++-- .../image-patches-absolute-translation.py | 61 +++-- ...age-patches-differential-rotation-scale.py | 75 ++++-- .../image-patches-differential-translation.py | 60 +++-- .../01-Camera/03-Event-Cameras/frogeye2020.py | 8 +- .../frogeye2020_with_tracking.py | 13 +- .../01-Camera/04-Global-Shutter/high_fps.py | 22 +- .../04-Global-Shutter/triggered_mode.py | 20 +- .../lepton_get_object_high_temp.py | 46 +++- .../05-FLIR-Lepton/lepton_get_object_temp.py | 44 +++- .../lepton_get_object_temp_color.py | 56 +++- .../lepton_get_object_temp_color_lcd.py | 56 +++- .../lepton_get_object_temp_lcd.py | 44 +++- ...lepton_hotspot_grayscale_color_tracking.py | 20 +- ...on_hotspot_grayscale_color_tracking_lcd.py | 20 +- .../lepton_hotspot_rgb565_color_tracking.py | 22 +- ...epton_hotspot_rgb565_color_tracking_lcd.py | 22 +- ...t_temp_hotspot_grayscale_color_tracking.py | 29 +- ...rget_temp_hotspot_rgb565_color_tracking.py | 31 ++- .../01-Camera/06-Time-of-Flight/tof_camera.py | 21 +- .../06-Time-of-Flight/tof_overlay.py | 28 +- .../sensor_auto_gain_control.py | 24 +- .../sensor_exposure_control.py | 27 +- .../sensor_horizontal_mirror.py | 20 +- .../sensor_manual_whitebal_control.py | 19 +- .../07-Sensor-Control/sensor_vertical_flip.py | 20 +- .../sesnor_manual_gain_control.py | 25 +- .../100_fps_ir_led_tracking.py | 58 ++-- .../08-Readout-Control/apriltag_tracking.py | 54 ++-- .../00-Drawing/arrow_drawing.py | 18 +- .../00-Drawing/circle_drawing.py | 16 +- .../02-Image-Processing/00-Drawing/copy2fb.py | 2 +- .../00-Drawing/cross_drawing.py | 14 +- .../00-Drawing/ellipse_drawing.py | 21 +- .../00-Drawing/flood_fill.py | 20 +- .../00-Drawing/image_drawing.py | 15 +- .../00-Drawing/image_drawing_advanced.py | 60 +++-- .../image_drawing_alpha_blending_test.py | 70 ++--- ...ng_alpha_blending_with_color_table_test.py | 75 +++--- .../image_drawing_alpha_table_test.py | 71 ++--- ...awing_alpha_table_with_color_table_test.py | 76 +++--- .../image_drawing_scale_down_test.py | 66 ++--- .../00-Drawing/image_drawing_scale_up_test.py | 60 +++-- .../image_drawing_with_custom_palette.py | 37 ++- .../00-Drawing/keypoints_drawing.py | 16 +- .../00-Drawing/line_drawing.py | 18 +- .../00-Drawing/rectangle_drawing.py | 18 +- .../00-Drawing/text_drawing.py | 29 +- .../adaptive_histogram_equalization.py | 4 +- .../01-Image-Filters/blur_filter.py | 18 +- .../01-Image-Filters/cartoon_filter.py | 8 +- .../color_bilateral_filter.py | 18 +- .../01-Image-Filters/color_binary_filter.py | 17 +- .../01-Image-Filters/color_light_removal.py | 16 +- .../01-Image-Filters/edge_filter.py | 18 +- .../01-Image-Filters/erode_and_dilate.py | 4 +- .../01-Image-Filters/gamma_correction.py | 6 +- .../grayscale_bilateral_filter.py | 18 +- .../grayscale_binary_filter.py | 9 +- .../grayscale_light_removal.py | 16 +- .../histogram_equalization.py | 4 +- .../01-Image-Filters/kernel_filters.py | 24 +- .../01-Image-Filters/lens_correction.py | 6 +- .../01-Image-Filters/linear_polar.py | 16 +- .../01-Image-Filters/log_polar.py | 16 +- .../mean_adaptive_threshold_filter.py | 18 +- .../01-Image-Filters/mean_filter.py | 18 +- .../median_adaptive_threshold_filter.py | 18 +- .../01-Image-Filters/median_filter.py | 18 +- .../midpoint_adaptive_threshold_filter.py | 18 +- .../01-Image-Filters/midpoint_filter.py | 18 +- .../mode_adaptive_threshold_filter.py | 18 +- .../01-Image-Filters/mode_filter.py | 18 +- .../01-Image-Filters/negative.py | 16 +- .../perspective_and_rotation_correction.py | 48 ++-- .../perspective_correction.py | 16 +- .../01-Image-Filters/rotation_correction.py | 36 +-- .../01-Image-Filters/sharpen_filter.py | 18 +- .../01-Image-Filters/ulab.py | 13 +- .../01-Image-Filters/unsharp_filter.py | 18 +- .../vflip_hmirror_transpose.py | 14 +- .../automatic_grayscale_color_tracking.py | 37 ++- .../automatic_rgb565_color_tracking.py | 37 ++- .../black_grayscale_line_following.py | 48 ++-- .../02-Color-Tracking/image_histogram_info.py | 10 +- .../image_statistics_info.py | 10 +- .../ir_beacon_grayscale_tracking.py | 18 +- .../ir_beacon_rgb565_tracking.py | 18 +- .../multi_color_blob_tracking.py | 26 +- .../multi_color_code_tracking.py | 42 ++- .../single_color_code_tracking.py | 37 ++- .../single_color_grayscale_blob_tracking.py | 18 +- .../single_color_rgb565_blob_tracking.py | 35 ++- .../in_memory_advanced_frame_differencing.py | 32 ++- .../in_memory_basic_frame_differencing.py | 24 +- .../in_memory_shadow_removal.py | 28 +- .../in_memory_structural_similarity.py | 22 +- .../on_disk_advanced_frame_differencing.py | 34 +-- .../on_disk_basic_frame_differencing.py | 26 +- .../on_disk_shadow_removal.py | 30 +-- .../on_disk_structural_similarity.py | 24 +- .../00-TensorFlow/tf_image_classification.py | 35 ++- .../00-TensorFlow/tf_object_detection.py | 40 +-- .../01-ST-CubeAI/nn_stm32cubeai.py | 38 +-- .../02-Haar-Cascade/face_detection.py | 2 +- .../02-Haar-Cascade/face_eye_detection.py | 6 +- .../02-Haar-Cascade/face_recognition.py | 12 +- .../02-Haar-Cascade/face_tracking.py | 33 ++- .../02-Haar-Cascade/iris_detection.py | 2 +- scripts/examples/04-Barcodes/find_barcodes.py | 55 ++-- .../examples/04-Barcodes/find_datamatrices.py | 20 +- .../find_datamatrices_w_lens_zoom.py | 20 +- .../04-Barcodes/qrcodes_with_lens_corr.py | 10 +- .../04-Barcodes/qrcodes_with_lens_zoom.py | 10 +- .../examples/05-Feature-Detection/edges.py | 20 +- .../05-Feature-Detection/find_circles.py | 19 +- .../find_line_segments.py | 17 +- .../05-Feature-Detection/find_lines.py | 17 +- .../05-Feature-Detection/find_rects.py | 13 +- scripts/examples/05-Feature-Detection/hog.py | 10 +- .../05-Feature-Detection/keypoints.py | 24 +- .../05-Feature-Detection/keypoints_save.py | 12 +- scripts/examples/05-Feature-Detection/lbp.py | 11 +- .../linear_regression_fast.py | 20 +- .../linear_regression_robust.py | 26 +- .../05-Feature-Detection/selective_search.py | 22 +- .../05-Feature-Detection/template_matching.py | 12 +- .../examples/06-April-Tags/find_apriltags.py | 42 +-- .../06-April-Tags/find_apriltags_3d_pose.py | 36 ++- .../06-April-Tags/find_apriltags_max_res.py | 51 ++-- .../find_apriltags_w_lens_zoom.py | 16 +- .../06-April-Tags/find_small_apriltags.py | 49 ++-- .../00-Arduino/arduino_i2c_slave.py | 16 +- .../00-Arduino/arduino_spi_slave.py | 16 +- .../00-Arduino/arduino_uart.py | 6 +- .../apriltags_pixy_i2c_emulation.py | 154 ++++++----- .../apriltags_pixy_spi_emulation.py | 160 ++++++----- .../apriltags_pixy_uart_emulation.py | 146 ++++++----- .../01-Pixy-Emulation/pixy_i2c_emulation.py | 239 +++++++++++------ .../01-Pixy-Emulation/pixy_spi_emulation.py | 248 +++++++++++------- .../01-Pixy-Emulation/pixy_uart_emulation.py | 226 ++++++++++------ .../mavlink_apriltags_landing_target.py | 113 ++++---- .../02-MAVLink/mavlink_opticalflow.py | 77 +++--- .../03-Modbus/modbus_apriltag.py | 18 +- .../03-Modbus/modbus_rtu_slave.py | 8 +- ..._as_the_remote_device_for_your_computer.py | 13 +- ..._as_the_remote_device_for_your_computer.py | 9 +- ...e_transfer_raw_as_the_controller_device.py | 51 ++-- ...image_transfer_raw_as_the_remote_device.py | 15 +- ...pular_features_as_the_controller_device.py | 31 ++- .../popular_features_as_the_remote_device.py | 105 +++++--- .../36-Web-Servers/rtsp_video_server_lan.py | 19 +- .../36-Web-Servers/rtsp_video_server_wlan.py | 19 +- .../00-Board-Control/adc_read_ext_channel.py | 4 +- .../00-Board-Control/adc_read_int_channel.py | 8 +- .../00-Board-Control/blinky.py | 2 +- .../09-OpenMV-Boards/00-Board-Control/can.py | 12 +- .../00-Board-Control/cpufreq_scaling.py | 19 +- .../00-Board-Control/dac_write.py | 6 +- .../00-Board-Control/i2c_control.py | 11 +- .../00-Board-Control/led_control.py | 34 ++- .../00-Board-Control/pin_control.py | 6 +- .../00-Board-Control/pwm_control.py | 4 +- .../09-OpenMV-Boards/00-Board-Control/rtc.py | 2 +- .../00-Board-Control/servo_control.py | 8 +- .../00-Board-Control/spi_control.py | 36 +-- .../00-Board-Control/timer_control.py | 17 +- .../00-Board-Control/timer_tests.py | 9 +- .../00-Board-Control/uart_control.py | 2 +- .../00-Board-Control/usb_hid.py | 10 +- .../00-Board-Control/usb_vcp.py | 15 +- .../00-Board-Control/vsync_gpio_output.py | 20 +- .../01-WiFi-Shield/connect.py | 4 +- .../09-OpenMV-Boards/01-WiFi-Shield/dns.py | 4 +- .../01-WiFi-Shield/http_client.py | 6 +- .../01-WiFi-Shield/http_client_ssl.py | 6 +- .../01-WiFi-Shield/http_post.py | 28 +- .../01-WiFi-Shield/mjpeg_streamer.py | 41 +-- .../01-WiFi-Shield/mjpeg_streamer_ap.py | 45 ++-- .../01-WiFi-Shield/mjpeg_streamer_fir.py | 41 +-- .../01-WiFi-Shield/mqtt_pub.py | 6 +- .../01-WiFi-Shield/mqtt_sub.py | 10 +- .../09-OpenMV-Boards/01-WiFi-Shield/ntp.py | 10 +- .../09-OpenMV-Boards/01-WiFi-Shield/scan.py | 4 +- .../01-WiFi-Shield/static_ip.py | 12 +- .../09-OpenMV-Boards/02-LCD-Shield/lcd.py | 12 +- .../09-OpenMV-Boards/03-Servo-Shield/main.py | 2 +- .../03-Servo-Shield/pca9685.py | 22 +- .../09-OpenMV-Boards/03-Servo-Shield/servo.py | 5 +- .../04-Thermopile-Shield/thermal_camera.py | 22 +- .../04-Thermopile-Shield/thermal_overlay.py | 18 +- .../thermal_overlay_lcd.py | 18 +- .../09-OpenMV-Boards/05-BLE-Shield/ble.py | 111 ++++---- .../motor-shield-power-driver.py | 11 +- .../06-Motor-Shield/motor-shield-pwm.py | 15 +- .../09-OpenMV-Boards/06-Motor-Shield/motor.py | 26 +- .../06-Motor-Shield/stepper.py | 10 +- .../07-IMU-Shield/imu_read.py | 32 +-- .../08-Distance-Shield/distance_read.py | 1 - .../09-OpenMV-Boards/09-TV-Shield/tv.py | 12 +- .../09-OpenMV-Boards/10-Light-Shield/light.py | 4 +- .../11-Low-Power/deep_sleep.py | 2 +- .../11-Low-Power/extint_wakeup.py | 4 +- .../11-Low-Power/sensor_sleep.py | 11 +- .../11-Low-Power/stop_mode.py | 1 - .../09-OpenMV-Boards/99-Tests/colorbar.py | 35 +-- .../examples/09-OpenMV-Boards/99-Tests/fps.py | 14 +- .../09-OpenMV-Boards/99-Tests/selftest.py | 70 ++--- .../09-OpenMV-Boards/99-Tests/unittests.py | 23 +- scripts/examples/09-OpenMV-Boards/main.py | 10 +- .../00-Board-Control/blinky.py | 3 +- .../00-Board-Control/i2c_scanner.py | 7 +- .../01-Sensors/apds9960/ambient.py | 1 - .../01-Sensors/apds9960/gesture.py | 1 - .../01-Sensors/apds9960/proximity.py | 1 - .../Nano-33-BLE-Sense/01-Sensors/hts221.py | 6 +- .../Nano-33-BLE-Sense/01-Sensors/lps22.py | 4 +- .../Nano-33-BLE-Sense/01-Sensors/lsm9ds1.py | 10 +- .../Nano-33-BLE-Sense/02-Audio/audio_fft.py | 41 +-- .../03-Bluetooth/ble_blinky.py | 6 +- .../03-Bluetooth/ble_scan.py | 6 +- .../03-Bluetooth/ble_temperature.py | 12 +- .../04-Thermal/thermal_camera.py | 22 +- .../Nano-RP2040/00-Board-Control/blinky.py | 2 +- .../00-Board-Control/i2c_scanner.py | 10 +- .../Nano-RP2040/01-Sensors/lsm6dsox_basic.py | 7 +- .../Nano-RP2040/01-Sensors/lsm6dsox_mlc.py | 34 +-- .../Nano-RP2040/03-Audio/audio_fft.py | 43 +-- .../Nano-RP2040/03-WiFi/ap_mode.py | 24 +- .../Nano-RP2040/03-WiFi/http_client.py | 6 +- .../Nano-RP2040/03-WiFi/ntp.py | 10 +- .../Nano-RP2040/03-WiFi/scan.py | 8 +- .../Nano-RP2040/04-Bluetooth/ble_blinky.py | 13 +- .../04-Bluetooth/ble_temperature.py | 4 +- .../04-Bluetooth/temp_sensor_aioble.py | 5 +- .../Nano-RP2040/05-Thermal/thermal_camera.py | 20 +- .../00-Board-Control/adc_read_ext_channel.py | 2 +- .../00-Board-Control/adc_read_int_channel.py | 8 +- .../Nicla-Vision/00-Board-Control/blinky.py | 2 +- .../Nicla-Vision/00-Board-Control/can.py | 12 +- .../00-Board-Control/cpufreq_scaling.py | 19 +- .../00-Board-Control/i2c_control.py | 9 +- .../00-Board-Control/led_control.py | 26 +- .../00-Board-Control/pin_control.py | 6 +- .../00-Board-Control/pwm_control.py | 26 +- .../Nicla-Vision/00-Board-Control/rtc.py | 2 +- .../00-Board-Control/spi_control.py | 32 ++- .../00-Board-Control/timer_control.py | 17 +- .../00-Board-Control/uart_control.py | 2 +- .../Nicla-Vision/00-Board-Control/usb_hid.py | 10 +- .../Nicla-Vision/00-Board-Control/usb_vcp.py | 15 +- .../00-Board-Control/vsync_gpio_output.py | 20 +- .../Nicla-Vision/01-Sensors/lsm6dsox_basic.py | 9 +- .../Nicla-Vision/01-Sensors/lsm6dsox_mlc.py | 43 +-- .../Nicla-Vision/01-Sensors/vl53l1x_tof.py | 1 - .../Nicla-Vision/02-Audio/audio_fft.py | 41 +-- .../Nicla-Vision/02-Audio/micro_speech.py | 17 +- .../Nicla-Vision/03-WiFi/connect.py | 6 +- .../Nicla-Vision/03-WiFi/dns.py | 6 +- .../Nicla-Vision/03-WiFi/http_client.py | 8 +- .../Nicla-Vision/03-WiFi/http_client_ssl.py | 8 +- .../Nicla-Vision/03-WiFi/mjpeg_streamer.py | 43 +-- .../Nicla-Vision/03-WiFi/mqtt_pub.py | 8 +- .../Nicla-Vision/03-WiFi/mqtt_sub.py | 12 +- .../Nicla-Vision/03-WiFi/ntp.py | 12 +- .../Nicla-Vision/03-WiFi/scan.py | 8 +- .../Nicla-Vision/03-WiFi/static_ip.py | 14 +- .../04-Bluetooth/ble_temperature.py | 4 +- .../05-Low-Power/extint_wakeup.py | 4 +- .../Nicla-Vision/05-Low-Power/stop_mode.py | 1 - .../00-Board-Control/adc_read_ext_channel.py | 4 +- .../00-Board-Control/adc_read_int_channel.py | 8 +- .../Portenta-H7/00-Board-Control/blinky.py | 2 +- .../Portenta-H7/00-Board-Control/can.py | 12 +- .../00-Board-Control/cpufreq_scaling.py | 19 +- .../Portenta-H7/00-Board-Control/dac_write.py | 6 +- .../00-Board-Control/i2c_control.py | 11 +- .../00-Board-Control/led_control.py | 34 ++- .../00-Board-Control/pin_control.py | 6 +- .../00-Board-Control/pwm_control.py | 26 +- .../Portenta-H7/00-Board-Control/rtc.py | 2 +- .../00-Board-Control/servo_control.py | 8 +- .../00-Board-Control/spi_control.py | 36 +-- .../00-Board-Control/timer_control.py | 17 +- .../00-Board-Control/timer_tests.py | 13 +- .../00-Board-Control/uart_control.py | 2 +- .../Portenta-H7/00-Board-Control/usb_hid.py | 10 +- .../Portenta-H7/00-Board-Control/usb_vcp.py | 15 +- .../00-Board-Control/vsync_gpio_output.py | 20 +- .../Portenta-H7/01-Audio/audio_fft.py | 41 +-- .../Portenta-H7/01-Audio/micro_speech.py | 17 +- .../Portenta-H7/02-WiFi/connect.py | 6 +- .../Portenta-H7/02-WiFi/dns.py | 6 +- .../Portenta-H7/02-WiFi/http_client.py | 8 +- .../Portenta-H7/02-WiFi/http_client_ssl.py | 8 +- .../Portenta-H7/02-WiFi/mjpeg_streamer.py | 43 +-- .../Portenta-H7/02-WiFi/mqtt_pub.py | 8 +- .../Portenta-H7/02-WiFi/mqtt_sub.py | 12 +- .../Portenta-H7/02-WiFi/ntp.py | 12 +- .../Portenta-H7/02-WiFi/scan.py | 8 +- .../Portenta-H7/02-WiFi/static_ip.py | 14 +- .../03-Bluetooth/ble_temperature.py | 4 +- .../Portenta-H7/04-LoRa/lora-example.py | 6 +- .../Portenta-H7/05-Ethernet/eth_cable_test.py | 4 +- .../Portenta-H7/05-Ethernet/http_client.py | 4 +- .../05-Ethernet/http_client_ssl.py | 4 +- .../Portenta-H7/05-Ethernet/peer_to_peer.py | 4 +- .../Portenta-H7/06-Low-Power/extint_wakeup.py | 4 +- .../himax_wakeup_on_motion_detection.py | 13 +- .../Portenta-H7/06-Low-Power/sensor_sleep.py | 11 +- .../Portenta-H7/06-Low-Power/stop_mode.py | 1 - 330 files changed, 4614 insertions(+), 3303 deletions(-) diff --git a/scripts/examples/00-HelloWorld/helloworld.py b/scripts/examples/00-HelloWorld/helloworld.py index ba7d02c7b..1f24d87f9 100644 --- a/scripts/examples/00-HelloWorld/helloworld.py +++ b/scripts/examples/00-HelloWorld/helloworld.py @@ -5,14 +5,14 @@ import sensor import time -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.skip_frames(time=2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected - # to the IDE. The FPS should increase once disconnected. +while True: + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected + # to the IDE. The FPS should increase once disconnected. diff --git a/scripts/examples/01-Camera/00-Snapshot/emboss_snapshot.py b/scripts/examples/01-Camera/00-Snapshot/emboss_snapshot.py index 56e4f66ee..b2e6f9ac8 100644 --- a/scripts/examples/01-Camera/00-Snapshot/emboss_snapshot.py +++ b/scripts/examples/01-Camera/00-Snapshot/emboss_snapshot.py @@ -10,13 +10,13 @@ import pyb RED_LED_PIN = 1 BLUE_LED_PIN = 3 -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.skip_frames(time=2000) # Wait for settings take effect. pyb.LED(RED_LED_PIN).on() -sensor.skip_frames(time = 2000) # Give the user time to get ready. +sensor.skip_frames(time=2000) # Give the user time to get ready. pyb.LED(RED_LED_PIN).off() pyb.LED(BLUE_LED_PIN).on() @@ -24,11 +24,9 @@ pyb.LED(BLUE_LED_PIN).on() print("You're on camera!") img = sensor.snapshot() -img.morph(1, [+2, +1, +0,\ - +1, +1, -1,\ - +0, -1, -2]) # Emboss the image. +img.morph(1, [+2, +1, +0, +1, +1, -1, +0, -1, -2]) # Emboss the image. -img.save("example.jpg") # or "example.bmp" (or others) +img.save("example.jpg") # or "example.bmp" (or others) pyb.LED(BLUE_LED_PIN).off() print("Done! Reset the camera to see the saved image.") diff --git a/scripts/examples/01-Camera/00-Snapshot/snapshot.py b/scripts/examples/01-Camera/00-Snapshot/snapshot.py index 340da39d5..6945eab9e 100644 --- a/scripts/examples/01-Camera/00-Snapshot/snapshot.py +++ b/scripts/examples/01-Camera/00-Snapshot/snapshot.py @@ -10,19 +10,19 @@ import pyb RED_LED_PIN = 1 BLUE_LED_PIN = 3 -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.skip_frames(time=2000) # Wait for settings take effect. pyb.LED(RED_LED_PIN).on() -sensor.skip_frames(time = 2000) # Give the user time to get ready. +sensor.skip_frames(time=2000) # Give the user time to get ready. pyb.LED(RED_LED_PIN).off() pyb.LED(BLUE_LED_PIN).on() print("You're on camera!") -sensor.snapshot().save("example.jpg") # or "example.bmp" (or others) +sensor.snapshot().save("example.jpg") # or "example.bmp" (or others) pyb.LED(BLUE_LED_PIN).off() print("Done! Reset the camera to see the saved image.") diff --git a/scripts/examples/01-Camera/00-Snapshot/snapshot_on_face_detection.py b/scripts/examples/01-Camera/00-Snapshot/snapshot_on_face_detection.py index 3611cfb99..38eaad9e7 100644 --- a/scripts/examples/01-Camera/00-Snapshot/snapshot_on_face_detection.py +++ b/scripts/examples/01-Camera/00-Snapshot/snapshot_on_face_detection.py @@ -12,10 +12,10 @@ import pyb RED_LED_PIN = 1 BLUE_LED_PIN = 3 -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.HQVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.HQVGA) # Set frame size to HQVGA +sensor.skip_frames(time=2000) # Wait for settings take effect. # Load up a face detection HaarCascade. This is object that your OpenMV Cam # can use to detect faces using the find_features() method below. Your OpenMV @@ -25,18 +25,17 @@ sensor.skip_frames(time = 2000) # Let new settings take affect. # stages. face_cascade = image.HaarCascade("frontalface", stages=25) -while(True): - +while True: pyb.LED(RED_LED_PIN).on() print("About to start detecting faces...") - sensor.skip_frames(time = 2000) # Give the user time to get ready. + sensor.skip_frames(time=2000) # Give the user time to get ready. pyb.LED(RED_LED_PIN).off() print("Now detecting faces!") pyb.LED(BLUE_LED_PIN).on() - diff = 10 # We'll say we detected a face after 10 frames. - while(diff): + diff = 10 # We'll say we detected a face after 10 frames. + while diff: img = sensor.snapshot() # Threshold can be between 0.0 and 1.0. A higher threshold results in a # higher detection rate with more false positives. The scale value @@ -50,4 +49,4 @@ while(True): pyb.LED(BLUE_LED_PIN).off() print("Face detected! Saving image...") - sensor.snapshot().save("snapshot-%d.jpg" % pyb.rng()) # Save Pic. + sensor.snapshot().save("snapshot-%d.jpg" % pyb.rng()) # Save Pic. diff --git a/scripts/examples/01-Camera/00-Snapshot/snapshot_on_movement.py b/scripts/examples/01-Camera/00-Snapshot/snapshot_on_movement.py index e1f56936d..13efcf554 100644 --- a/scripts/examples/01-Camera/00-Snapshot/snapshot_on_movement.py +++ b/scripts/examples/01-Camera/00-Snapshot/snapshot_on_movement.py @@ -12,36 +12,36 @@ import os RED_LED_PIN = 1 BLUE_LED_PIN = 3 -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -sensor.set_auto_whitebal(False) # Turn off white balance. +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.skip_frames(time=2000) # Wait for settings take effect. +sensor.set_auto_whitebal(False) # Turn off white balance. -if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory - -while(True): +if not "temp" in os.listdir(): + os.mkdir("temp") # Make a temp directory +while True: pyb.LED(RED_LED_PIN).on() print("About to save background image...") - sensor.skip_frames(time = 2000) # Give the user time to get ready. + sensor.skip_frames(time=2000) # Give the user time to get ready. pyb.LED(RED_LED_PIN).off() sensor.snapshot().save("temp/bg.bmp") print("Saved background image - Now detecting motion!") pyb.LED(BLUE_LED_PIN).on() - diff = 10 # We'll say we detected motion after 10 frames of motion. - while(diff): + diff = 10 # We'll say we detected motion after 10 frames of motion. + while diff: img = sensor.snapshot() img.difference("temp/bg.bmp") stats = img.statistics() # Stats 5 is the max of the lighting color channel. The below code # triggers when the lighting max for the whole image goes above 20. # The lighting difference maximum should be zero normally. - if (stats[5] > 20): + if stats[5] > 20: diff -= 1 pyb.LED(BLUE_LED_PIN).off() print("Movement detected! Saving image...") - sensor.snapshot().save("temp/snapshot-%d.jpg" % pyb.rng()) # Save Pic. + sensor.snapshot().save("temp/snapshot-%d.jpg" % pyb.rng()) # Save Pic. diff --git a/scripts/examples/01-Camera/00-Snapshot/time_lapse_photos.py b/scripts/examples/01-Camera/00-Snapshot/time_lapse_photos.py index 332d5d316..6d1d6445b 100644 --- a/scripts/examples/01-Camera/00-Snapshot/time_lapse_photos.py +++ b/scripts/examples/01-Camera/00-Snapshot/time_lapse_photos.py @@ -19,51 +19,97 @@ rtc = pyb.RTC() newFile = False try: - os.stat('time.txt') -except OSError: # If the log file doesn't exist then set the RTC and set newFile to True - # datetime format: year, month, day, weekday (Monday=1, Sunday=7), - # hours (24 hour clock), minutes, seconds, subseconds (counds down from 255 to 0) - rtc.datetime((2018, 3, 9, 5, 13, 0, 0, 0)) - newFile = True + os.stat("time.txt") +except ( + OSError +): # If the log file doesn't exist then set the RTC and set newFile to True + # datetime format: year, month, day, weekday (Monday=1, Sunday=7), + # hours (24 hour clock), minutes, seconds, subseconds (counds down from 255 to 0) + rtc.datetime((2018, 3, 9, 5, 13, 0, 0, 0)) + newFile = True # Extract the date and time from the RTC object. dateTime = rtc.datetime() year = str(dateTime[0]) -month = '%02d' % dateTime[1] -day = '%02d' % dateTime[2] -hour = '%02d' % dateTime[4] -minute = '%02d' % dateTime[5] -second = '%02d' % dateTime[6] +month = "%02d" % dateTime[1] +day = "%02d" % dateTime[2] +hour = "%02d" % dateTime[4] +minute = "%02d" % dateTime[5] +second = "%02d" % dateTime[6] subSecond = str(dateTime[7]) -newName='I'+year+month+day+hour+minute+second # Image file name based on RTC +newName = ( + "I" + year + month + day + hour + minute + second +) # Image file name based on RTC # Enable RTC interrupts every 10 seconds, camera will RESET after wakeup from deepsleep Mode. rtc.wakeup(10000) BLUE_LED_PIN = 3 -sensor.reset() # Initialize the camera sensor. +sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.VGA) -sensor.skip_frames(time = 1000) # Let new settings take affect. +sensor.skip_frames(time=1000) # Let new settings take affect. # Let folks know we are about to take a picture. pyb.LED(BLUE_LED_PIN).on() -if(newFile): # If log file does not exist then create it. - with open('time.txt', 'a') as timeFile: # Write text file to keep track of date, time and image number. - timeFile.write('Date and time format: year, month, day, hours, minutes, seconds, subseconds' + '\n') - timeFile.write(newName + ',' + year + ',' + month + ',' + day + ',' + hour + ',' + minute + ',' + second + ',' + subSecond + '\n') +if newFile: # If log file does not exist then create it. + with open( + "time.txt", "a" + ) as timeFile: # Write text file to keep track of date, time and image number. + timeFile.write( + "Date and time format: year, month, day, hours, minutes, seconds, subseconds" + + "\n" + ) + timeFile.write( + newName + + "," + + year + + "," + + month + + "," + + day + + "," + + hour + + "," + + minute + + "," + + second + + "," + + subSecond + + "\n" + ) else: - with open('time.txt', 'a') as timeFile: # Append to date, time and image number to text file. - timeFile.write(newName + ',' + year + ',' + month + ',' + day + ',' + hour + ',' + minute + ',' + second + ',' + subSecond + '\n') + with open( + "time.txt", "a" + ) as timeFile: # Append to date, time and image number to text file. + timeFile.write( + newName + + "," + + year + + "," + + month + + "," + + day + + "," + + hour + + "," + + minute + + "," + + second + + "," + + subSecond + + "\n" + ) -if not "images" in os.listdir(): os.mkdir("images") # Make a temp directory +if not "images" in os.listdir(): + os.mkdir("images") # Make a temp directory # Take photo and save to SD card img = sensor.snapshot() -img.save('images/' + newName, quality=90) +img.save("images/" + newName, quality=90) pyb.LED(BLUE_LED_PIN).off() # Enter Deepsleep Mode (i.e. the OpenMV Cam effectively turns itself off except for the RTC). diff --git a/scripts/examples/01-Camera/01-Video-Recording/gif.py b/scripts/examples/01-Camera/01-Video-Recording/gif.py index 57d0b91a5..85cdbc2ea 100644 --- a/scripts/examples/01-Camera/01-Video-Recording/gif.py +++ b/scripts/examples/01-Camera/01-Video-Recording/gif.py @@ -14,14 +14,14 @@ import pyb RED_LED_PIN = 1 BLUE_LED_PIN = 3 -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120) +sensor.skip_frames(time=2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. pyb.LED(RED_LED_PIN).on() -sensor.skip_frames(time = 2000) # Give the user time to get ready. +sensor.skip_frames(time=2000) # Give the user time to get ready. pyb.LED(RED_LED_PIN).off() pyb.LED(BLUE_LED_PIN).on() @@ -32,7 +32,7 @@ print("You're on camera!") for i in range(100): clock.tick() # clock.avg() returns the milliseconds between frames - gif delay is in - g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds. + g.add_frame(sensor.snapshot(), delay=int(clock.avg() / 10)) # centiseconds. print(clock.fps()) g.close() diff --git a/scripts/examples/01-Camera/01-Video-Recording/gif_on_face_detection.py b/scripts/examples/01-Camera/01-Video-Recording/gif_on_face_detection.py index 4306be49c..bf7881db8 100644 --- a/scripts/examples/01-Camera/01-Video-Recording/gif_on_face_detection.py +++ b/scripts/examples/01-Camera/01-Video-Recording/gif_on_face_detection.py @@ -18,10 +18,10 @@ import pyb RED_LED_PIN = 1 BLUE_LED_PIN = 3 -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor. -sensor.set_framesize(sensor.QQVGA) # or sensor.HQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120) +sensor.skip_frames(time=2000) # Wait for settings take effect. # Load up a face detection HaarCascade. This is object that your OpenMV Cam # can use to detect faces using the find_features() method below. Your OpenMV @@ -31,18 +31,17 @@ sensor.skip_frames(time = 2000) # Let new settings take affect. # stages. face_cascade = image.HaarCascade("frontalface", stages=25) -while(True): - +while True: pyb.LED(RED_LED_PIN).on() print("About to start detecting faces...") - sensor.skip_frames(time = 2000) # Give the user time to get ready. + sensor.skip_frames(time=2000) # Give the user time to get ready. pyb.LED(RED_LED_PIN).off() print("Now detecting faces!") pyb.LED(BLUE_LED_PIN).on() - diff = 10 # We'll say we detected a face after 10 frames. - while(diff): + diff = 10 # We'll say we detected a face after 10 frames. + while diff: img = sensor.snapshot() # Threshold can be between 0.0 and 1.0. A higher threshold results in a # higher detection rate with more false positives. The scale value @@ -56,12 +55,12 @@ while(True): g = gif.Gif("example-%d.gif" % pyb.rng(), loop=True) - clock = time.clock() # Tracks FPS. + clock = time.clock() # Tracks FPS. print("You're on camera!") for i in range(100): clock.tick() # clock.avg() returns the milliseconds between frames - gif delay is in - g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds. + g.add_frame(sensor.snapshot(), delay=int(clock.avg() / 10)) # centiseconds. print(clock.fps()) g.close() diff --git a/scripts/examples/01-Camera/01-Video-Recording/gif_on_movement.py b/scripts/examples/01-Camera/01-Video-Recording/gif_on_movement.py index f542327a5..52d4e5387 100644 --- a/scripts/examples/01-Camera/01-Video-Recording/gif_on_movement.py +++ b/scripts/examples/01-Camera/01-Video-Recording/gif_on_movement.py @@ -18,44 +18,44 @@ import os RED_LED_PIN = 1 BLUE_LED_PIN = 3 -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -sensor.set_auto_whitebal(False) # Turn off white balance. +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120) +sensor.skip_frames(time=2000) # Wait for settings take effect. +sensor.set_auto_whitebal(False) # Turn off white balance. -if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory - -while(True): +if not "temp" in os.listdir(): + os.mkdir("temp") # Make a temp directory +while True: pyb.LED(RED_LED_PIN).on() print("About to save background image...") - sensor.skip_frames(time = 2000) # Give the user time to get ready. + sensor.skip_frames(time=2000) # Give the user time to get ready. pyb.LED(RED_LED_PIN).off() sensor.snapshot().save("temp/bg.bmp") print("Saved background image - Now detecting motion!") pyb.LED(BLUE_LED_PIN).on() - diff = 10 # We'll say we detected motion after 10 frames of motion. - while(diff): + diff = 10 # We'll say we detected motion after 10 frames of motion. + while diff: img = sensor.snapshot() img.difference("temp/bg.bmp") stats = img.statistics() # Stats 5 is the max of the lighting color channel. The below code # triggers when the lighting max for the whole image goes above 20. # The lighting difference maximum should be zero normally. - if (stats[5] > 20): + if stats[5] > 20: diff -= 1 g = gif.Gif("example-%d.gif" % pyb.rng(), loop=True) - clock = time.clock() # Tracks FPS. + clock = time.clock() # Tracks FPS. print("You're on camera!") for i in range(100): clock.tick() # clock.avg() returns the milliseconds between frames - gif delay is in - g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds. + g.add_frame(sensor.snapshot(), delay=int(clock.avg() / 10)) # centiseconds. print(clock.fps()) g.close() diff --git a/scripts/examples/01-Camera/01-Video-Recording/imageio_memory.py b/scripts/examples/01-Camera/01-Video-Recording/imageio_memory.py index 7270d500c..a8511c31c 100644 --- a/scripts/examples/01-Camera/01-Video-Recording/imageio_memory.py +++ b/scripts/examples/01-Camera/01-Video-Recording/imageio_memory.py @@ -9,13 +9,13 @@ import time # Number of frames to pre-allocate and record N_FRAMES = 500 -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QVGA) +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) # This frame size must match the image size passed to ImageIO sensor.set_windowing((120, 120)) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) clock = time.clock() @@ -27,7 +27,7 @@ for i in range(0, N_FRAMES): stream.write(sensor.snapshot()) print(clock.fps()) -while (True): +while True: # Rewind stream and play back stream.seek(0) for i in range(0, N_FRAMES): diff --git a/scripts/examples/01-Camera/01-Video-Recording/imageio_read.py b/scripts/examples/01-Camera/01-Video-Recording/imageio_read.py index c30d05612..a541f1467 100644 --- a/scripts/examples/01-Camera/01-Video-Recording/imageio_read.py +++ b/scripts/examples/01-Camera/01-Video-Recording/imageio_read.py @@ -5,29 +5,29 @@ # This example shows how to use the Image Reader object to replay snapshots of what your # OpenMV Cam saw saved by the Image Writer object for testing machine vision algorithms. -# Altered to allow full speed reading from SD card for extraction of sequences to the network etc. +# Altered to allow full speed reading from SD card for extraction of sequences to the network etc. # Set the new pause parameter to false import sensor import image import time -snapshot_source = False # Set to true once finished to pull data from sensor. +snapshot_source = False # Set to true once finished to pull data from sensor. -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120) +sensor.skip_frames(time=2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. stream = None -if snapshot_source == False: +if snapshot_source is False: stream = image.ImageIO("/stream.bin", "r") -while(True): +while True: clock.tick() if snapshot_source: - img = sensor.snapshot() + img = sensor.snapshot() else: img = stream.read(copy_to_fb=True, loop=True, pause=True) # Do machine vision algorithms on the image here. diff --git a/scripts/examples/01-Camera/01-Video-Recording/imageio_write.py b/scripts/examples/01-Camera/01-Video-Recording/imageio_write.py index 9cbd7a72f..1ab3df3b2 100644 --- a/scripts/examples/01-Camera/01-Video-Recording/imageio_write.py +++ b/scripts/examples/01-Camera/01-Video-Recording/imageio_write.py @@ -11,13 +11,13 @@ import image import pyb import time -record_time = 10000 # 10 seconds in milliseconds +record_time = 10000 # 10 seconds in milliseconds -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) -clock = time.clock() +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120) +sensor.skip_frames(time=2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. stream = image.ImageIO("/stream.bin", "w") diff --git a/scripts/examples/01-Camera/01-Video-Recording/mjpeg.py b/scripts/examples/01-Camera/01-Video-Recording/mjpeg.py index bb5f5bb08..94a54b49c 100644 --- a/scripts/examples/01-Camera/01-Video-Recording/mjpeg.py +++ b/scripts/examples/01-Camera/01-Video-Recording/mjpeg.py @@ -15,14 +15,14 @@ import pyb RED_LED_PIN = 1 BLUE_LED_PIN = 3 -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.skip_frames(time=2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. pyb.LED(RED_LED_PIN).on() -sensor.skip_frames(time = 2000) # Give the user time to get ready. +sensor.skip_frames(time=2000) # Give the user time to get ready. pyb.LED(RED_LED_PIN).off() pyb.LED(BLUE_LED_PIN).on() diff --git a/scripts/examples/01-Camera/01-Video-Recording/mjpeg_on_face_detection.py b/scripts/examples/01-Camera/01-Video-Recording/mjpeg_on_face_detection.py index ddbcb3122..16e0bd11a 100644 --- a/scripts/examples/01-Camera/01-Video-Recording/mjpeg_on_face_detection.py +++ b/scripts/examples/01-Camera/01-Video-Recording/mjpeg_on_face_detection.py @@ -19,10 +19,10 @@ import pyb RED_LED_PIN = 1 BLUE_LED_PIN = 3 -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor. -sensor.set_framesize(sensor.QQVGA) # or sensor.HQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120) +sensor.skip_frames(time=2000) # Wait for settings take effect. # Load up a face detection HaarCascade. This is object that your OpenMV Cam # can use to detect faces using the find_features() method below. Your OpenMV @@ -32,18 +32,17 @@ sensor.skip_frames(time = 2000) # Let new settings take affect. # stages. face_cascade = image.HaarCascade("frontalface", stages=25) -while(True): - +while True: pyb.LED(RED_LED_PIN).on() print("About to start detecting faces...") - sensor.skip_frames(time = 2000) # Give the user time to get ready. + sensor.skip_frames(time=2000) # Give the user time to get ready. pyb.LED(RED_LED_PIN).off() print("Now detecting faces!") pyb.LED(BLUE_LED_PIN).on() - diff = 10 # We'll say we detected a face after 10 frames. - while(diff): + diff = 10 # We'll say we detected a face after 10 frames. + while diff: img = sensor.snapshot() # Threshold can be between 0.0 and 1.0. A higher threshold results in a # higher detection rate with more false positives. The scale value @@ -57,7 +56,7 @@ while(True): m = mjpeg.Mjpeg("example-%d.mjpeg" % pyb.rng()) - clock = time.clock() # Tracks FPS. + clock = time.clock() # Tracks FPS. print("You're on camera!") for i in range(200): clock.tick() diff --git a/scripts/examples/01-Camera/01-Video-Recording/mjpeg_on_movement.py b/scripts/examples/01-Camera/01-Video-Recording/mjpeg_on_movement.py index 5d4d0944f..d37a8a139 100644 --- a/scripts/examples/01-Camera/01-Video-Recording/mjpeg_on_movement.py +++ b/scripts/examples/01-Camera/01-Video-Recording/mjpeg_on_movement.py @@ -19,39 +19,39 @@ import os RED_LED_PIN = 1 BLUE_LED_PIN = 3 -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -sensor.set_auto_whitebal(False) # Turn off white balance. +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.skip_frames(time=2000) # Wait for settings take effect. +sensor.set_auto_whitebal(False) # Turn off white balance. -if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory - -while(True): +if not "temp" in os.listdir(): + os.mkdir("temp") # Make a temp directory +while True: pyb.LED(RED_LED_PIN).on() print("About to save background image...") - sensor.skip_frames(time = 2000) # Give the user time to get ready. + sensor.skip_frames(time=2000) # Give the user time to get ready. pyb.LED(RED_LED_PIN).off() sensor.snapshot().save("temp/bg.bmp") print("Saved background image - Now detecting motion!") pyb.LED(BLUE_LED_PIN).on() - diff = 10 # We'll say we detected motion after 10 frames of motion. - while(diff): + diff = 10 # We'll say we detected motion after 10 frames of motion. + while diff: img = sensor.snapshot() img.difference("temp/bg.bmp") stats = img.statistics() # Stats 5 is the max of the lighting color channel. The below code # triggers when the lighting max for the whole image goes above 20. # The lighting difference maximum should be zero normally. - if (stats[5] > 20): + if stats[5] > 20: diff -= 1 m = mjpeg.Mjpeg("example-%d.mjpeg" % pyb.rng()) - clock = time.clock() # Tracks FPS. + clock = time.clock() # Tracks FPS. print("You're on camera!") for i in range(200): clock.tick() diff --git a/scripts/examples/01-Camera/02-Optical-Flow/absolute-rotation-scale.py b/scripts/examples/01-Camera/02-Optical-Flow/absolute-rotation-scale.py index a55900ff2..352084698 100644 --- a/scripts/examples/01-Camera/02-Optical-Flow/absolute-rotation-scale.py +++ b/scripts/examples/01-Camera/02-Optical-Flow/absolute-rotation-scale.py @@ -4,32 +4,32 @@ # rotation/scale by comparing the current and a previous # image against each other. Note that only rotation/scale is # handled - not X and Y translation in this mode. - +# # To run this demo effectively please mount your OpenMV Cam on a steady # base and SLOWLY rotate the camera around the lens and move the camera # forward/backwards to see the numbers change. # I.e. Z direction changes only. - -import sensor -import time -import math - -# NOTE!!! You have to use a small power of 2 resolution when using +# +# NOTE You have to use a small power of 2 resolution when using # find_displacement(). This is because the algorithm is powered by # something called phase correlation which does the image comparison # using FFTs. A non-power of 2 resolution requires padding to a power # of 2 which reduces the usefulness of the algorithm results. Please # use a resolution like B64X64 or B64X32 (2x faster). - +# # Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, # 128x64, and 128x128. If you want a resolution of 32x32 you can create # it by doing "img.pool(2, 2)" on a 64x64 image. -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. +import sensor +import time +import math + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... +sensor.skip_frames(time=2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. # Take from the main frame buffer's RAM to allocate a second frame buffer. # There's a lot more RAM in the frame buffer than in the MicroPython heap. @@ -38,19 +38,19 @@ clock = time.clock() # Create a clock object to track the FPS. extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) extra_fb.replace(sensor.snapshot()) -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works. # Put in a z_rotation value below and you should see the r output be equal to that. - if(0): + if 0: expected_rotation = 20.0 img.rotation_corr(z_rotation=expected_rotation) # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works. # Put in a zoom value below and you should see the z output be equal to that. - if(0): + if 0: expected_zoom = 0.8 img.rotation_corr(zoom=expected_zoom) @@ -61,9 +61,13 @@ while(True): rotation_change = int(math.degrees(displacement.rotation()) * 5) / 5.0 zoom_amount = displacement.scale() - if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. - print("{0:+f}r {1:+f}z {2} {3} FPS".format(rotation_change, zoom_amount, \ - displacement.response(), - clock.fps())) + if ( + displacement.response() > 0.1 + ): # Below 0.1 or so (YMMV) and the results are just noise. + print( + "{0:+f}r {1:+f}z {2} {3} FPS".format( + rotation_change, zoom_amount, displacement.response(), clock.fps() + ) + ) else: print(clock.fps()) diff --git a/scripts/examples/01-Camera/02-Optical-Flow/absolute-translation.py b/scripts/examples/01-Camera/02-Optical-Flow/absolute-translation.py index 9071b9005..b83b74bc0 100644 --- a/scripts/examples/01-Camera/02-Optical-Flow/absolute-translation.py +++ b/scripts/examples/01-Camera/02-Optical-Flow/absolute-translation.py @@ -4,31 +4,31 @@ # in the X and Y direction by comparing the current and a previous # image against each other. Note that only X and Y translation is # handled - not rotation/scale in this mode. - +# # To run this demo effectively please mount your OpenMV Cam on a steady # base and SLOWLY translate it to the left, right, up, and down and # watch the numbers change. Note that you can see displacement numbers # up +- half of the hoizontal and vertical resolution. - -import sensor -import time - -# NOTE!!! You have to use a small power of 2 resolution when using +# +# NOTE You have to use a small power of 2 resolution when using # find_displacement(). This is because the algorithm is powered by # something called phase correlation which does the image comparison # using FFTs. A non-power of 2 resolution requires padding to a power # of 2 which reduces the usefulness of the algorithm results. Please # use a resolution like B64X64 or B64X32 (2x faster). - +# # Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, # 128x64, and 128x128. If you want a resolution of 32x32 you can create # it by doing "img.pool(2, 2)" on a 64x64 image. -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. +import sensor +import time + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... +sensor.skip_frames(time=2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. # Take from the main frame buffer's RAM to allocate a second frame buffer. # There's a lot more RAM in the frame buffer than in the MicroPython heap. @@ -37,9 +37,9 @@ clock = time.clock() # Create a clock object to track the FPS. extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) extra_fb.replace(sensor.snapshot()) -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. # For this example we never update the old image to measure absolute change. displacement = extra_fb.find_displacement(img) @@ -48,9 +48,13 @@ while(True): sub_pixel_x = int(displacement.x_translation() * 5) / 5.0 sub_pixel_y = int(displacement.y_translation() * 5) / 5.0 - if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. - print("{0:+f}x {1:+f}y {2} {3} FPS".format(sub_pixel_x, sub_pixel_y, - displacement.response(), - clock.fps())) + if ( + displacement.response() > 0.1 + ): # Below 0.1 or so (YMMV) and the results are just noise. + print( + "{0:+f}x {1:+f}y {2} {3} FPS".format( + sub_pixel_x, sub_pixel_y, displacement.response(), clock.fps() + ) + ) else: print(clock.fps()) diff --git a/scripts/examples/01-Camera/02-Optical-Flow/differential-rotation-scale.py b/scripts/examples/01-Camera/02-Optical-Flow/differential-rotation-scale.py index ea00004a6..bfe632d2f 100644 --- a/scripts/examples/01-Camera/02-Optical-Flow/differential-rotation-scale.py +++ b/scripts/examples/01-Camera/02-Optical-Flow/differential-rotation-scale.py @@ -4,32 +4,32 @@ # rotation/scale by comparing the current and the previous # image against each other. Note that only rotation/scale is # handled - not X and Y translation in this mode. - +# # To run this demo effectively please mount your OpenMV Cam on a steady # base and SLOWLY rotate the camera around the lens and move the camera # forward/backwards to see the numbers change. # I.e. Z direction changes only. - -import sensor -import time -import math - -# NOTE!!! You have to use a small power of 2 resolution when using +# +# NOTE You have to use a small power of 2 resolution when using # find_displacement(). This is because the algorithm is powered by # something called phase correlation which does the image comparison # using FFTs. A non-power of 2 resolution requires padding to a power # of 2 which reduces the usefulness of the algorithm results. Please # use a resolution like B64X64 or B64X32 (2x faster). - +# # Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, # 128x64, and 128x128. If you want a resolution of 32x32 you can create # it by doing "img.pool(2, 2)" on a 64x64 image. -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. +import sensor +import time +import math + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... +sensor.skip_frames(time=2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. # Take from the main frame buffer's RAM to allocate a second frame buffer. # There's a lot more RAM in the frame buffer than in the MicroPython heap. @@ -38,21 +38,21 @@ clock = time.clock() # Create a clock object to track the FPS. extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) extra_fb.replace(sensor.snapshot()) -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works. # Put in a z_rotation value below and you should see the r output be equal to that. - if(0): + if 0: expected_rotation = 20.0 extra_fb.rotation_corr(z_rotation=(-expected_rotation)) # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works. # Put in a zoom value below and you should see the z output be equal to that. - if(0): + if 0: expected_zoom = 0.8 - extra_fb.rotation_corr(zoom=(2.00-expected_zoom)) + extra_fb.rotation_corr(zoom=(2.00 - expected_zoom)) displacement = extra_fb.find_displacement(img, logpolar=True) extra_fb.replace(img) @@ -61,9 +61,13 @@ while(True): rotation_change = int(math.degrees(displacement.rotation()) * 5) / 5.0 zoom_amount = displacement.scale() - if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. - print("{0:+f}r {1:+f}z {2} {3} FPS".format(rotation_change, zoom_amount, \ - displacement.response(), - clock.fps())) + if ( + displacement.response() > 0.1 + ): # Below 0.1 or so (YMMV) and the results are just noise. + print( + "{0:+f}r {1:+f}z {2} {3} FPS".format( + rotation_change, zoom_amount, displacement.response(), clock.fps() + ) + ) else: print(clock.fps()) diff --git a/scripts/examples/01-Camera/02-Optical-Flow/differential-translation.py b/scripts/examples/01-Camera/02-Optical-Flow/differential-translation.py index fbefae11a..1f27de745 100644 --- a/scripts/examples/01-Camera/02-Optical-Flow/differential-translation.py +++ b/scripts/examples/01-Camera/02-Optical-Flow/differential-translation.py @@ -4,31 +4,31 @@ # in the X and Y direction by comparing the current and the previous # image against each other. Note that only X and Y translation is # handled - not rotation/scale in this mode. - +# # To run this demo effectively please mount your OpenMV Cam on a steady # base and QUICKLY translate it to the left, right, up, and down and # watch the numbers change. Note that you can see displacement numbers # up +- half of the hoizontal and vertical resolution. - -import sensor -import time - -# NOTE!!! You have to use a small power of 2 resolution when using +# +# NOTE You have to use a small power of 2 resolution when using # find_displacement(). This is because the algorithm is powered by # something called phase correlation which does the image comparison # using FFTs. A non-power of 2 resolution requires padding to a power # of 2 which reduces the usefulness of the algorithm results. Please # use a resolution like B64X64 or B64X32 (2x faster). - +# # Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, # 128x64, and 128x128. If you want a resolution of 32x32 you can create # it by doing "img.pool(2, 2)" on a 64x64 image. -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. +import sensor +import time + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... +sensor.skip_frames(time=2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. # Take from the main frame buffer's RAM to allocate a second frame buffer. # There's a lot more RAM in the frame buffer than in the MicroPython heap. @@ -37,9 +37,9 @@ clock = time.clock() # Create a clock object to track the FPS. extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) extra_fb.replace(sensor.snapshot()) -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. displacement = extra_fb.find_displacement(img) extra_fb.replace(img) @@ -48,9 +48,13 @@ while(True): sub_pixel_x = int(displacement.x_translation() * 5) / 5.0 sub_pixel_y = int(displacement.y_translation() * 5) / 5.0 - if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. - print("{0:+f}x {1:+f}y {2} {3} FPS".format(sub_pixel_x, sub_pixel_y, - displacement.response(), - clock.fps())) + if ( + displacement.response() > 0.1 + ): # Below 0.1 or so (YMMV) and the results are just noise. + print( + "{0:+f}x {1:+f}y {2} {3} FPS".format( + sub_pixel_x, sub_pixel_y, displacement.response(), clock.fps() + ) + ) else: print(clock.fps()) diff --git a/scripts/examples/01-Camera/02-Optical-Flow/image-patches-absolute-rotation-scale.py b/scripts/examples/01-Camera/02-Optical-Flow/image-patches-absolute-rotation-scale.py index 3eda1db97..8ce1e2a89 100644 --- a/scripts/examples/01-Camera/02-Optical-Flow/image-patches-absolute-rotation-scale.py +++ b/scripts/examples/01-Camera/02-Optical-Flow/image-patches-absolute-rotation-scale.py @@ -11,37 +11,35 @@ # # NOTE that surfaces need to have some type of "edge" on them for the # algorithm to work. A featureless surface produces crazy results. - -# NOTE: Unless you have a very nice test rig this example is hard to see usefulness of... - -BLOCK_W = 16 # pow2 -BLOCK_H = 16 # pow2 - +# # To run this demo effectively please mount your OpenMV Cam on a steady # base and SLOWLY rotate the camera around the lens and move the camera # forward/backwards to see the numbers change. # I.e. Z direction changes only. - -import sensor -import time -import math - -# NOTE!!! You have to use a small power of 2 resolution when using +# +# NOTE You have to use a small power of 2 resolution when using # find_displacement(). This is because the algorithm is powered by # something called phase correlation which does the image comparison # using FFTs. A non-power of 2 resolution requires padding to a power # of 2 which reduces the usefulness of the algorithm results. Please # use a resolution like B128X128 or B128X64 (2x faster). - +# # Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, # 128x64, and 128x128. If you want a resolution of 32x32 you can create # it by doing "img.pool(2, 2)" on a 64x64 image. -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) +import sensor +import time +import math + +BLOCK_W = 16 # pow2 +BLOCK_H = 16 # pow2 + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)... -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. +sensor.skip_frames(time=2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. # Take from the main frame buffer's RAM to allocate a second frame buffer. # There's a lot more RAM in the frame buffer than in the MicroPython heap. @@ -50,26 +48,46 @@ clock = time.clock() # Create a clock object to track the FPS. extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) extra_fb.replace(sensor.snapshot()) -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. for y in range(0, sensor.height(), BLOCK_H): for x in range(0, sensor.width(), BLOCK_W): # For this example we never update the old image to measure absolute change. - displacement = extra_fb.find_displacement(img, logpolar=True, \ - roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) + displacement = extra_fb.find_displacement( + img, + logpolar=True, + roi=(x, y, BLOCK_W, BLOCK_H), + template_roi=(x, y, BLOCK_W, BLOCK_H), + ) # Below 0.1 or so (YMMV) and the results are just noise. - if(displacement.response() > 0.1): + if displacement.response() > 0.1: rotation_change = displacement.rotation() zoom_amount = displacement.scale() - pixel_x = x + (BLOCK_W//2) + int(math.sin(rotation_change) * zoom_amount * (BLOCK_W//4)) - pixel_y = y + (BLOCK_H//2) + int(math.cos(rotation_change) * zoom_amount * (BLOCK_H//4)) - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ - color = 255) + pixel_x = ( + x + + (BLOCK_W // 2) + + int(math.sin(rotation_change) * zoom_amount * (BLOCK_W // 4)) + ) + pixel_y = ( + y + + (BLOCK_H // 2) + + int(math.cos(rotation_change) * zoom_amount * (BLOCK_H // 4)) + ) + img.draw_line( + (x + BLOCK_W // 2, y + BLOCK_H // 2, pixel_x, pixel_y), color=255 + ) else: - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \ - color = 0) + img.draw_line( + ( + x + BLOCK_W // 2, + y + BLOCK_H // 2, + x + BLOCK_W // 2, + y + BLOCK_H // 2, + ), + color=0, + ) print(clock.fps()) diff --git a/scripts/examples/01-Camera/02-Optical-Flow/image-patches-absolute-translation.py b/scripts/examples/01-Camera/02-Optical-Flow/image-patches-absolute-translation.py index 9525ef75b..262ae2eb1 100644 --- a/scripts/examples/01-Camera/02-Optical-Flow/image-patches-absolute-translation.py +++ b/scripts/examples/01-Camera/02-Optical-Flow/image-patches-absolute-translation.py @@ -11,34 +11,34 @@ # # NOTE that surfaces need to have some type of "edge" on them for the # algorithm to work. A featureless surface produces crazy results. - -BLOCK_W = 16 # pow2 -BLOCK_H = 16 # pow2 - +# # To run this demo effectively please mount your OpenMV Cam on a steady # base and SLOWLY translate it to the left, right, up, and down and # watch the numbers change. Note that you can see displacement numbers # up +- half of the hoizontal and vertical resolution. - -import sensor -import time - -# NOTE!!! You have to use a small power of 2 resolution when using +# +# NOTE You have to use a small power of 2 resolution when using # find_displacement(). This is because the algorithm is powered by # something called phase correlation which does the image comparison # using FFTs. A non-power of 2 resolution requires padding to a power # of 2 which reduces the usefulness of the algorithm results. Please # use a resolution like B128X128 or B128X64 (2x faster). - +# # Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, # 128x64, and 128x128. If you want a resolution of 32x32 you can create # it by doing "img.pool(2, 2)" on a 64x64 image. -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) +import sensor +import time + +BLOCK_W = 16 # pow2 +BLOCK_H = 16 # pow2 + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)... -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. +sensor.skip_frames(time=2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. # Take from the main frame buffer's RAM to allocate a second frame buffer. # There's a lot more RAM in the frame buffer than in the MicroPython heap. @@ -47,24 +47,33 @@ clock = time.clock() # Create a clock object to track the FPS. extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) extra_fb.replace(sensor.snapshot()) -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. for y in range(0, sensor.height(), BLOCK_H): for x in range(0, sensor.width(), BLOCK_W): # For this example we never update the old image to measure absolute change. - displacement = extra_fb.find_displacement(img, \ - roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) + displacement = extra_fb.find_displacement( + img, roi=(x, y, BLOCK_W, BLOCK_H), template_roi=(x, y, BLOCK_W, BLOCK_H) + ) # Below 0.1 or so (YMMV) and the results are just noise. - if(displacement.response() > 0.1): - pixel_x = x + (BLOCK_W//2) + int(displacement.x_translation()) - pixel_y = y + (BLOCK_H//2) + int(displacement.y_translation()) - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ - color = 255) + if displacement.response() > 0.1: + pixel_x = x + (BLOCK_W // 2) + int(displacement.x_translation()) + pixel_y = y + (BLOCK_H // 2) + int(displacement.y_translation()) + img.draw_line( + (x + BLOCK_W // 2, y + BLOCK_H // 2, pixel_x, pixel_y), color=255 + ) else: - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \ - color = 0) + img.draw_line( + ( + x + BLOCK_W // 2, + y + BLOCK_H // 2, + x + BLOCK_W // 2, + y + BLOCK_H // 2, + ), + color=0, + ) print(clock.fps()) diff --git a/scripts/examples/01-Camera/02-Optical-Flow/image-patches-differential-rotation-scale.py b/scripts/examples/01-Camera/02-Optical-Flow/image-patches-differential-rotation-scale.py index 5b32fd8e0..00721a29a 100644 --- a/scripts/examples/01-Camera/02-Optical-Flow/image-patches-differential-rotation-scale.py +++ b/scripts/examples/01-Camera/02-Optical-Flow/image-patches-differential-rotation-scale.py @@ -11,37 +11,34 @@ # # NOTE that surfaces need to have some type of "edge" on them for the # algorithm to work. A featureless surface produces crazy results. - -# NOTE: Unless you have a very nice test rig this example is hard to see usefulness of... - -BLOCK_W = 16 # pow2 -BLOCK_H = 16 # pow2 - +# # To run this demo effectively please mount your OpenMV Cam on a steady # base and SLOWLY rotate the camera around the lens and move the camera # forward/backwards to see the numbers change. # I.e. Z direction changes only. - -import sensor -import time -import math - -# NOTE!!! You have to use a small power of 2 resolution when using +# +# NOTE You have to use a small power of 2 resolution when using # find_displacement(). This is because the algorithm is powered by # something called phase correlation which does the image comparison # using FFTs. A non-power of 2 resolution requires padding to a power # of 2 which reduces the usefulness of the algorithm results. Please # use a resolution like B128X128 or B128X64 (2x faster). - +# # Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, # 128x64, and 128x128. If you want a resolution of 32x32 you can create # it by doing "img.pool(2, 2)" on a 64x64 image. +import sensor +import time +import math -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) +BLOCK_W = 16 # pow2 +BLOCK_H = 16 # pow2 + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)... -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. +sensor.skip_frames(time=2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. # Take from the main frame buffer's RAM to allocate a second frame buffer. # There's a lot more RAM in the frame buffer than in the MicroPython heap. @@ -50,26 +47,46 @@ clock = time.clock() # Create a clock object to track the FPS. extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) extra_fb.replace(sensor.snapshot()) -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. for y in range(0, sensor.height(), BLOCK_H): for x in range(0, sensor.width(), BLOCK_W): - displacement = extra_fb.find_displacement(img, logpolar=True, \ - roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) + displacement = extra_fb.find_displacement( + img, + logpolar=True, + roi=(x, y, BLOCK_W, BLOCK_H), + template_roi=(x, y, BLOCK_W, BLOCK_H), + ) # Below 0.1 or so (YMMV) and the results are just noise. - if(displacement.response() > 0.1): + if displacement.response() > 0.1: rotation_change = displacement.rotation() zoom_amount = 1.0 + displacement.scale() - pixel_x = x + (BLOCK_W//2) + int(math.sin(rotation_change) * zoom_amount * (BLOCK_W//4)) - pixel_y = y + (BLOCK_H//2) + int(math.cos(rotation_change) * zoom_amount * (BLOCK_H//4)) - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ - color = 255) + pixel_x = ( + x + + (BLOCK_W // 2) + + int(math.sin(rotation_change) * zoom_amount * (BLOCK_W // 4)) + ) + pixel_y = ( + y + + (BLOCK_H // 2) + + int(math.cos(rotation_change) * zoom_amount * (BLOCK_H // 4)) + ) + img.draw_line( + (x + BLOCK_W // 2, y + BLOCK_H // 2, pixel_x, pixel_y), color=255 + ) else: - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \ - color = 0) + img.draw_line( + ( + x + BLOCK_W // 2, + y + BLOCK_H // 2, + x + BLOCK_W // 2, + y + BLOCK_H // 2, + ), + color=0, + ) extra_fb.replace(img) print(clock.fps()) diff --git a/scripts/examples/01-Camera/02-Optical-Flow/image-patches-differential-translation.py b/scripts/examples/01-Camera/02-Optical-Flow/image-patches-differential-translation.py index 106be8342..0515c7e31 100644 --- a/scripts/examples/01-Camera/02-Optical-Flow/image-patches-differential-translation.py +++ b/scripts/examples/01-Camera/02-Optical-Flow/image-patches-differential-translation.py @@ -11,34 +11,33 @@ # # NOTE that surfaces need to have some type of "edge" on them for the # algorithm to work. A featureless surface produces crazy results. - -BLOCK_W = 16 # pow2 -BLOCK_H = 16 # pow2 - +# # To run this demo effectively please mount your OpenMV Cam on a steady # base and SLOWLY translate it to the left, right, up, and down and # watch the numbers change. Note that you can see displacement numbers # up +- half of the hoizontal and vertical resolution. - -import sensor -import time - -# NOTE!!! You have to use a small power of 2 resolution when using +# +# NOTE You have to use a small power of 2 resolution when using # find_displacement(). This is because the algorithm is powered by # something called phase correlation which does the image comparison # using FFTs. A non-power of 2 resolution requires padding to a power # of 2 which reduces the usefulness of the algorithm results. Please # use a resolution like B128X128 or B128X64 (2x faster). - +# # Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, # 128x64, and 128x128. If you want a resolution of 32x32 you can create # it by doing "img.pool(2, 2)" on a 64x64 image. +import sensor +import time -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) +BLOCK_W = 16 # pow2 +BLOCK_H = 16 # pow2 + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)... -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. +sensor.skip_frames(time=2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. # Take from the main frame buffer's RAM to allocate a second frame buffer. # There's a lot more RAM in the frame buffer than in the MicroPython heap. @@ -47,24 +46,33 @@ clock = time.clock() # Create a clock object to track the FPS. extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) extra_fb.replace(sensor.snapshot()) -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. for y in range(0, sensor.height(), BLOCK_H): for x in range(0, sensor.width(), BLOCK_W): - displacement = extra_fb.find_displacement(img, \ - roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) + displacement = extra_fb.find_displacement( + img, roi=(x, y, BLOCK_W, BLOCK_H), template_roi=(x, y, BLOCK_W, BLOCK_H) + ) # Below 0.1 or so (YMMV) and the results are just noise. - if(displacement.response() > 0.1): - pixel_x = x + (BLOCK_W//2) + int(displacement.x_translation()) - pixel_y = y + (BLOCK_H//2) + int(displacement.y_translation()) - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ - color = 255) + if displacement.response() > 0.1: + pixel_x = x + (BLOCK_W // 2) + int(displacement.x_translation()) + pixel_y = y + (BLOCK_H // 2) + int(displacement.y_translation()) + img.draw_line( + (x + BLOCK_W // 2, y + BLOCK_H // 2, pixel_x, pixel_y), color=255 + ) else: - img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \ - color = 0) + img.draw_line( + ( + x + BLOCK_W // 2, + y + BLOCK_H // 2, + x + BLOCK_W // 2, + y + BLOCK_H // 2, + ), + color=0, + ) extra_fb.replace(img) print(clock.fps()) diff --git a/scripts/examples/01-Camera/03-Event-Cameras/frogeye2020.py b/scripts/examples/01-Camera/03-Event-Cameras/frogeye2020.py index e8e25c6db..a3774e4f6 100644 --- a/scripts/examples/01-Camera/03-Event-Cameras/frogeye2020.py +++ b/scripts/examples/01-Camera/03-Event-Cameras/frogeye2020.py @@ -7,9 +7,9 @@ import sensor import image import time -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) palette = image.Image(1, 256, sensor.RGB565) @@ -27,7 +27,7 @@ for i in range(192, 256): clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() diff --git a/scripts/examples/01-Camera/03-Event-Cameras/frogeye2020_with_tracking.py b/scripts/examples/01-Camera/03-Event-Cameras/frogeye2020_with_tracking.py index a99205804..700ba9b8d 100644 --- a/scripts/examples/01-Camera/03-Event-Cameras/frogeye2020_with_tracking.py +++ b/scripts/examples/01-Camera/03-Event-Cameras/frogeye2020_with_tracking.py @@ -7,9 +7,9 @@ import sensor import image import time -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) palette = image.Image(1, 256, sensor.RGB565) @@ -27,7 +27,7 @@ for i in range(192, 256): clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() @@ -38,8 +38,9 @@ while(True): # Cleanup noise. img.erode(1) - blobs = img.find_blobs([(0, 0)], invert=True, - pixels_threshold=10, area_threshold=10, merge=False) + blobs = img.find_blobs( + [(0, 0)], invert=True, pixels_threshold=10, area_threshold=10, merge=False + ) for blob in blobs: img.draw_rectangle(blob.rect(), color=(0, 255, 0)) diff --git a/scripts/examples/01-Camera/04-Global-Shutter/high_fps.py b/scripts/examples/01-Camera/04-Global-Shutter/high_fps.py index bae9ea673..feeabd2a5 100644 --- a/scripts/examples/01-Camera/04-Global-Shutter/high_fps.py +++ b/scripts/examples/01-Camera/04-Global-Shutter/high_fps.py @@ -15,16 +15,16 @@ import sensor import time -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120) - make smaller to go faster -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120) +sensor.skip_frames(time=2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. -sensor.set_auto_exposure(True, exposure_us=5000) # make smaller to go faster +sensor.set_auto_exposure(True, exposure_us=5000) # make smaller to go faster -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected - # to the IDE. The FPS should increase once disconnected. +while True: + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected + # to the IDE. The FPS should increase once disconnected. diff --git a/scripts/examples/01-Camera/04-Global-Shutter/triggered_mode.py b/scripts/examples/01-Camera/04-Global-Shutter/triggered_mode.py index 764615e0a..bb7ff02bd 100644 --- a/scripts/examples/01-Camera/04-Global-Shutter/triggered_mode.py +++ b/scripts/examples/01-Camera/04-Global-Shutter/triggered_mode.py @@ -15,16 +15,16 @@ import sensor import time -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE -sensor.set_framesize(sensor.VGA) # Set frame size to VGA (640x480) -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE +sensor.set_framesize(sensor.VGA) # Set frame size to VGA (640x480) +sensor.skip_frames(time=2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. sensor.ioctl(sensor.IOCTL_SET_TRIGGERED_MODE, True) -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected - # to the IDE. The FPS should increase once disconnected. +while True: + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected + # to the IDE. The FPS should increase once disconnected. diff --git a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_high_temp.py b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_high_temp.py index 7ddf0bfc9..7ec147948 100644 --- a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_high_temp.py +++ b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_high_temp.py @@ -21,7 +21,7 @@ import sensor import time # Color Tracking Thresholds (Grayscale Min, Grayscale Max) -threshold_list = [(100, 255)] # track very hot objects +threshold_list = [(100, 255)] # track very hot objects # Set the target temp range here # 500C is the maximum the Lepton 3.5 sensor can measure @@ -34,10 +34,20 @@ print("Resetting Lepton...") sensor.reset() # Enable measurement mode with high temp sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_MODE, True, True) -sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius) -print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), - sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT))) -print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")) +sensor.ioctl( + sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius +) +print( + "Lepton Res (%dx%d)" + % ( + sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), + sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT), + ) +) +print( + "Radiometry Available: " + + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No") +) sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.QQVGA) @@ -48,15 +58,29 @@ clock = time.clock() # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. "merge=True" merges all overlapping blobs in the image. -def map_g_to_temp(g): - return ((g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0) + min_temp_in_celsius -while(True): +def map_g_to_temp(g): + return ( + (g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0 + ) + min_temp_in_celsius + + +while True: clock.tick() img = sensor.snapshot() - for blob in img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True): + for blob in img.find_blobs( + threshold_list, pixels_threshold=200, area_threshold=200, merge=True + ): stats = img.get_statistics(thresholds=threshold_list, roi=blob.rect()) img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) - img.draw_string(blob.x(), blob.y() - 10, "%.2f C" % map_g_to_temp(stats.mean()), mono_space=False) - print("FPS %f - Lepton Temp: %f C" % (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE))) + img.draw_string( + blob.x(), + blob.y() - 10, + "%.2f C" % map_g_to_temp(stats.mean()), + mono_space=False, + ) + print( + "FPS %f - Lepton Temp: %f C" + % (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE)) + ) diff --git a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp.py b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp.py index 6165e728a..4f07b76c7 100644 --- a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp.py +++ b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp.py @@ -31,10 +31,20 @@ print("Resetting Lepton...") # These settings are applied on reset sensor.reset() sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_MODE, True) -sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius) -print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), - sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT))) -print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")) +sensor.ioctl( + sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius +) +print( + "Lepton Res (%dx%d)" + % ( + sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), + sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT), + ) +) +print( + "Radiometry Available: " + + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No") +) sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.QQVGA) @@ -45,15 +55,29 @@ clock = time.clock() # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. "merge=True" merges all overlapping blobs in the image. -def map_g_to_temp(g): - return ((g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0) + min_temp_in_celsius -while(True): +def map_g_to_temp(g): + return ( + (g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0 + ) + min_temp_in_celsius + + +while True: clock.tick() img = sensor.snapshot() - for blob in img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True): + for blob in img.find_blobs( + threshold_list, pixels_threshold=200, area_threshold=200, merge=True + ): stats = img.get_statistics(thresholds=threshold_list, roi=blob.rect()) img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) - img.draw_string(blob.x(), blob.y() - 10, "%.2f C" % map_g_to_temp(stats.mean()), mono_space=False) - print("FPS %f - Lepton Temp: %f C" % (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE))) + img.draw_string( + blob.x(), + blob.y() - 10, + "%.2f C" % map_g_to_temp(stats.mean()), + mono_space=False, + ) + print( + "FPS %f - Lepton Temp: %f C" + % (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE)) + ) diff --git a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_color.py b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_color.py index a9c0cf554..bd2e55300 100644 --- a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_color.py +++ b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_color.py @@ -31,10 +31,20 @@ print("Resetting Lepton...") # These settings are applied on reset sensor.reset() sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_MODE, True) -sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius) -print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), - sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT))) -print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")) +sensor.ioctl( + sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius +) +print( + "Lepton Res (%dx%d)" + % ( + sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), + sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT), + ) +) +print( + "Radiometry Available: " + + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No") +) sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.QQVGA) @@ -45,23 +55,43 @@ clock = time.clock() # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. "merge=True" merges all overlapping blobs in the image. -def map_g_to_temp(g): - return ((g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0) + min_temp_in_celsius -while(True): +def map_g_to_temp(g): + return ( + (g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0 + ) + min_temp_in_celsius + + +while True: clock.tick() img = sensor.snapshot() blob_stats = [] - blobs = img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True) + blobs = img.find_blobs( + threshold_list, pixels_threshold=200, area_threshold=200, merge=True + ) # Collect stats into a list of tuples for blob in blobs: - blob_stats.append((blob.x(), blob.y(), map_g_to_temp(img.get_statistics(thresholds=threshold_list, - roi=blob.rect()).mean()))) - img.to_rainbow(color_palette=sensor.PALETTE_IRONBOW) # color it + blob_stats.append( + ( + blob.x(), + blob.y(), + map_g_to_temp( + img.get_statistics( + thresholds=threshold_list, roi=blob.rect() + ).mean() + ), + ) + ) + img.to_rainbow(color_palette=sensor.PALETTE_IRONBOW) # color it # Draw stuff on the colored image for blob in blobs: img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) for blob_stat in blob_stats: - img.draw_string(blob_stat[0], blob_stat[1] - 10, "%.2f C" % blob_stat[2], mono_space=False) - print("FPS %f - Lepton Temp: %f C" % (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE))) + img.draw_string( + blob_stat[0], blob_stat[1] - 10, "%.2f C" % blob_stat[2], mono_space=False + ) + print( + "FPS %f - Lepton Temp: %f C" + % (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE)) + ) diff --git a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_color_lcd.py b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_color_lcd.py index 4e69da4c7..919bb2673 100644 --- a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_color_lcd.py +++ b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_color_lcd.py @@ -32,10 +32,20 @@ print("Resetting Lepton...") # These settings are applied on reset sensor.reset() sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_MODE, True) -sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius) -print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), - sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT))) -print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")) +sensor.ioctl( + sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius +) +print( + "Lepton Res (%dx%d)" + % ( + sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), + sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT), + ) +) +print( + "Radiometry Available: " + + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No") +) sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.LCD) @@ -47,24 +57,44 @@ lcd.init() # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. "merge=True" merges all overlapping blobs in the image. -def map_g_to_temp(g): - return ((g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0) + min_temp_in_celsius -while(True): +def map_g_to_temp(g): + return ( + (g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0 + ) + min_temp_in_celsius + + +while True: clock.tick() img = sensor.snapshot() blob_stats = [] - blobs = img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True) + blobs = img.find_blobs( + threshold_list, pixels_threshold=200, area_threshold=200, merge=True + ) # Collect stats into a list of tuples for blob in blobs: - blob_stats.append((blob.x(), blob.y(), map_g_to_temp(img.get_statistics(thresholds=threshold_list, - roi=blob.rect()).mean()))) - img.to_rainbow(color_palette=sensor.PALETTE_IRONBOW) # color it + blob_stats.append( + ( + blob.x(), + blob.y(), + map_g_to_temp( + img.get_statistics( + thresholds=threshold_list, roi=blob.rect() + ).mean() + ), + ) + ) + img.to_rainbow(color_palette=sensor.PALETTE_IRONBOW) # color it # Draw stuff on the colored image for blob in blobs: img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) for blob_stat in blob_stats: - img.draw_string(blob_stat[0], blob_stat[1] - 10, "%.2f C" % blob_stat[2], mono_space=False) + img.draw_string( + blob_stat[0], blob_stat[1] - 10, "%.2f C" % blob_stat[2], mono_space=False + ) lcd.display(img) - print("FPS %f - Lepton Temp: %f C" % (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE))) + print( + "FPS %f - Lepton Temp: %f C" + % (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE)) + ) diff --git a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_lcd.py b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_lcd.py index 7064bfee2..736664a7a 100644 --- a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_lcd.py +++ b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_get_object_temp_lcd.py @@ -32,10 +32,20 @@ print("Resetting Lepton...") # These settings are applied on reset sensor.reset() sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_MODE, True) -sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius) -print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), - sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT))) -print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")) +sensor.ioctl( + sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius +) +print( + "Lepton Res (%dx%d)" + % ( + sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), + sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT), + ) +) +print( + "Radiometry Available: " + + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No") +) sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.LCD) @@ -47,16 +57,30 @@ lcd.init() # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. "merge=True" merges all overlapping blobs in the image. -def map_g_to_temp(g): - return ((g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0) + min_temp_in_celsius -while(True): +def map_g_to_temp(g): + return ( + (g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0 + ) + min_temp_in_celsius + + +while True: clock.tick() img = sensor.snapshot() - for blob in img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True): + for blob in img.find_blobs( + threshold_list, pixels_threshold=200, area_threshold=200, merge=True + ): stats = img.get_statistics(thresholds=threshold_list, roi=blob.rect()) img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) - img.draw_string(blob.x(), blob.y() - 10, "%.2f C" % map_g_to_temp(stats.mean()), mono_space=False) + img.draw_string( + blob.x(), + blob.y() - 10, + "%.2f C" % map_g_to_temp(stats.mean()), + mono_space=False, + ) lcd.display(img) - print("FPS %f - Lepton Temp: %f C" % (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE))) + print( + "FPS %f - Lepton Temp: %f C" + % (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE)) + ) diff --git a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_grayscale_color_tracking.py b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_grayscale_color_tracking.py index c007bc7e5..dceafd249 100644 --- a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_grayscale_color_tracking.py +++ b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_grayscale_color_tracking.py @@ -16,9 +16,17 @@ threshold_list = [(220, 255)] print("Resetting Lepton...") # These settings are applied on reset sensor.reset() -print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), - sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT))) -print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")) +print( + "Lepton Res (%dx%d)" + % ( + sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), + sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT), + ) +) +print( + "Radiometry Available: " + + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No") +) sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.QQVGA) @@ -29,10 +37,12 @@ clock = time.clock() # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. "merge=True" merges all overlapping blobs in the image. -while(True): +while True: clock.tick() img = sensor.snapshot() - for blob in img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True): + for blob in img.find_blobs( + threshold_list, pixels_threshold=200, area_threshold=200, merge=True + ): img.draw_rectangle(blob.rect(), color=127) img.draw_cross(blob.cx(), blob.cy(), color=127) print(clock.fps()) diff --git a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_grayscale_color_tracking_lcd.py b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_grayscale_color_tracking_lcd.py index b3ed117fd..790d15e95 100644 --- a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_grayscale_color_tracking_lcd.py +++ b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_grayscale_color_tracking_lcd.py @@ -17,9 +17,17 @@ threshold_list = [(220, 255)] print("Resetting Lepton...") # These settings are applied on reset sensor.reset() -print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), - sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT))) -print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")) +print( + "Lepton Res (%dx%d)" + % ( + sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), + sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT), + ) +) +print( + "Radiometry Available: " + + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No") +) sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.LCD) @@ -31,10 +39,12 @@ lcd.init() # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. "merge=True" merges all overlapping blobs in the image. -while(True): +while True: clock.tick() img = sensor.snapshot() - for blob in img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True): + for blob in img.find_blobs( + threshold_list, pixels_threshold=200, area_threshold=200, merge=True + ): img.draw_rectangle(blob.rect(), color=127) img.draw_cross(blob.cx(), blob.cy(), color=127) lcd.display(img) diff --git a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_rgb565_color_tracking.py b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_rgb565_color_tracking.py index 438acdee8..75aa08068 100644 --- a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_rgb565_color_tracking.py +++ b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_rgb565_color_tracking.py @@ -11,14 +11,22 @@ import sensor import time # Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) -threshold_list = [( 70, 100, -30, 40, 20, 100)] +threshold_list = [(70, 100, -30, 40, 20, 100)] print("Resetting Lepton...") # These settings are applied on reset sensor.reset() -print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), - sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT))) -print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")) +print( + "Lepton Res (%dx%d)" + % ( + sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), + sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT), + ) +) +print( + "Radiometry Available: " + + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No") +) # Make the color palette cool sensor.set_color_palette(sensor.PALETTE_IRONBOW) @@ -31,10 +39,12 @@ clock = time.clock() # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. "merge=True" merges all overlapping blobs in the image. -while(True): +while True: clock.tick() img = sensor.snapshot() - for blob in img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True): + for blob in img.find_blobs( + threshold_list, pixels_threshold=200, area_threshold=200, merge=True + ): img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) print(clock.fps()) diff --git a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_rgb565_color_tracking_lcd.py b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_rgb565_color_tracking_lcd.py index 8a9cb8116..e51eae5f1 100644 --- a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_rgb565_color_tracking_lcd.py +++ b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_hotspot_rgb565_color_tracking_lcd.py @@ -12,14 +12,22 @@ import time import lcd # Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) -threshold_list = [( 70, 100, -30, 40, 20, 100)] +threshold_list = [(70, 100, -30, 40, 20, 100)] print("Resetting Lepton...") # These settings are applied on reset sensor.reset() -print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), - sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT))) -print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")) +print( + "Lepton Res (%dx%d)" + % ( + sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), + sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT), + ) +) +print( + "Radiometry Available: " + + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No") +) # Make the color palette cool sensor.set_color_palette(sensor.PALETTE_IRONBOW) @@ -33,10 +41,12 @@ lcd.init() # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. "merge=True" merges all overlapping blobs in the image. -while(True): +while True: clock.tick() img = sensor.snapshot() - for blob in img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True): + for blob in img.find_blobs( + threshold_list, pixels_threshold=200, area_threshold=200, merge=True + ): img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) lcd.display(img) diff --git a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_target_temp_hotspot_grayscale_color_tracking.py b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_target_temp_hotspot_grayscale_color_tracking.py index 2b97f2714..7afcd764b 100644 --- a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_target_temp_hotspot_grayscale_color_tracking.py +++ b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_target_temp_hotspot_grayscale_color_tracking.py @@ -31,10 +31,20 @@ print("Resetting Lepton...") # These settings are applied on reset sensor.reset() sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_MODE, True) -sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius) -print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), - sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT))) -print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")) +sensor.ioctl( + sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius +) +print( + "Lepton Res (%dx%d)" + % ( + sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), + sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT), + ) +) +print( + "Radiometry Available: " + + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No") +) sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.QQVGA) @@ -45,10 +55,15 @@ clock = time.clock() # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. "merge=True" merges all overlapping blobs in the image. -while(True): +while True: clock.tick() img = sensor.snapshot() - for blob in img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True): + for blob in img.find_blobs( + threshold_list, pixels_threshold=200, area_threshold=200, merge=True + ): img.draw_rectangle(blob.rect(), color=127) img.draw_cross(blob.cx(), blob.cy(), color=127) - print("FPS %f - Lepton Temp: %f C" % (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE))) + print( + "FPS %f - Lepton Temp: %f C" + % (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE)) + ) diff --git a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_target_temp_hotspot_rgb565_color_tracking.py b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_target_temp_hotspot_rgb565_color_tracking.py index 2797c6f73..f20d746b9 100644 --- a/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_target_temp_hotspot_rgb565_color_tracking.py +++ b/scripts/examples/01-Camera/05-FLIR-Lepton/lepton_target_temp_hotspot_rgb565_color_tracking.py @@ -21,7 +21,7 @@ import sensor import time # Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) -threshold_list = [( 70, 100, -30, 40, 20, 100)] +threshold_list = [(70, 100, -30, 40, 20, 100)] # Set the target temp range here min_temp_in_celsius = 20 @@ -31,10 +31,20 @@ print("Resetting Lepton...") # These settings are applied on reset sensor.reset() sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_MODE, True) -sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius) -print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), - sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT))) -print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")) +sensor.ioctl( + sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius +) +print( + "Lepton Res (%dx%d)" + % ( + sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), + sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT), + ) +) +print( + "Radiometry Available: " + + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No") +) # Make the color palette cool sensor.set_color_palette(sensor.PALETTE_IRONBOW) @@ -47,10 +57,15 @@ clock = time.clock() # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. "merge=True" merges all overlapping blobs in the image. -while(True): +while True: clock.tick() img = sensor.snapshot() - for blob in img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True): + for blob in img.find_blobs( + threshold_list, pixels_threshold=200, area_threshold=200, merge=True + ): img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) - print("FPS %f - Lepton Temp: %f C" % (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE))) + print( + "FPS %f - Lepton Temp: %f C" + % (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE)) + ) diff --git a/scripts/examples/01-Camera/06-Time-of-Flight/tof_camera.py b/scripts/examples/01-Camera/06-Time-of-Flight/tof_camera.py index dcc4b9ef8..347972885 100644 --- a/scripts/examples/01-Camera/06-Time-of-Flight/tof_camera.py +++ b/scripts/examples/01-Camera/06-Time-of-Flight/tof_camera.py @@ -7,24 +7,29 @@ import image import time import tof -IMAGE_SCALE = 10 # Higher scaling uses more memory. -drawing_hint = image.BILINEAR # or image.BILINEAR or 0 (nearest neighbor) +IMAGE_SCALE = 10 # Higher scaling uses more memory. +drawing_hint = image.BILINEAR # or image.BILINEAR or 0 (nearest neighbor) # Initialize the ToF sensor -tof.init() #Auto-detects the connected sensor. -w = tof.width() * IMAGE_SCALE +tof.init() # Auto-detects the connected sensor. +w = tof.width() * IMAGE_SCALE h = tof.height() * IMAGE_SCALE # FPS clock clock = time.clock() -while (True): +while True: clock.tick() try: - img = tof.snapshot(x_size=w, y_size=h, - color_palette=tof.PALETTE_IRONBOW, - hint=drawing_hint, copy_to_fb=True, scale=(0, 4000)) + img = tof.snapshot( + x_size=w, + y_size=h, + color_palette=tof.PALETTE_IRONBOW, + hint=drawing_hint, + copy_to_fb=True, + scale=(0, 4000), + ) except OSError: continue diff --git a/scripts/examples/01-Camera/06-Time-of-Flight/tof_overlay.py b/scripts/examples/01-Camera/06-Time-of-Flight/tof_overlay.py index dcb7d515e..61d1cfd8c 100644 --- a/scripts/examples/01-Camera/06-Time-of-Flight/tof_overlay.py +++ b/scripts/examples/01-Camera/06-Time-of-Flight/tof_overlay.py @@ -7,10 +7,10 @@ import image import time import tof -sensor.reset() -sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QVGA) -sensor.set_windowing((0, 0, 240, 240)) +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.set_windowing((0, 0, 240, 240)) # Set window size to 240x240 # Initialize the ToF sensor tof.init() @@ -18,7 +18,7 @@ tof.init() # FPS clock clock = time.clock() -while (True): +while True: clock.tick() # Capture an image @@ -31,12 +31,22 @@ while (True): continue # Scale the image and belnd it with the framebuffer - tof.draw_depth(img, depth, hint=image.BILINEAR, - alpha=200, scale=(0, 4000), color_palette=tof.PALETTE_IRONBOW) + tof.draw_depth( + img, + depth, + hint=image.BILINEAR, + alpha=200, + scale=(0, 4000), + color_palette=tof.PALETTE_IRONBOW, + ) # Draw min and max distance. - img.draw_string(8, 0, "Min distance: %d mm" % dmin, color = (255, 0, 0), mono_space = False) - img.draw_string(8, 8, "Max distance: %d mm" % dmax, color = (255, 0, 0), mono_space = False) + img.draw_string( + 8, 0, "Min distance: %d mm" % dmin, color=(255, 0, 0), mono_space=False + ) + img.draw_string( + 8, 8, "Max distance: %d mm" % dmax, color=(255, 0, 0), mono_space=False + ) # Force high quality streaming img.compress(quality=90) diff --git a/scripts/examples/01-Camera/07-Sensor-Control/sensor_auto_gain_control.py b/scripts/examples/01-Camera/07-Sensor-Control/sensor_auto_gain_control.py index 90fee87ea..4da9a9fdf 100644 --- a/scripts/examples/01-Camera/07-Sensor-Control/sensor_auto_gain_control.py +++ b/scripts/examples/01-Camera/07-Sensor-Control/sensor_auto_gain_control.py @@ -25,22 +25,24 @@ import sensor import time -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) # The gain db ceiling maxes out at about 24 db for the OV7725 sensor. -sensor.set_auto_gain(True, gain_db_ceiling = 16.0) # Default gain. +sensor.set_auto_gain(True, gain_db_ceiling=16.0) # Default gain. # Note! If you set the gain ceiling to low without adjusting the exposure control # target value then you'll just get a lot of oscillation from the exposure # control if it's on. -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. +sensor.skip_frames(time=2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - print("FPS %f, Gain %f dB, Exposure %d us" % \ - (clock.fps(), sensor.get_gain_db(), sensor.get_exposure_us())) +while True: + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + print( + "FPS %f, Gain %f dB, Exposure %d us" + % (clock.fps(), sensor.get_gain_db(), sensor.get_exposure_us()) + ) diff --git a/scripts/examples/01-Camera/07-Sensor-Control/sensor_exposure_control.py b/scripts/examples/01-Camera/07-Sensor-Control/sensor_exposure_control.py index 9f7bc3f78..3b8d2c05a 100644 --- a/scripts/examples/01-Camera/07-Sensor-Control/sensor_exposure_control.py +++ b/scripts/examples/01-Camera/07-Sensor-Control/sensor_exposure_control.py @@ -21,15 +21,15 @@ import time # Change this value to adjust the exposure. Try 10.0/0.1/etc. EXPOSURE_TIME_SCALE = 1.0 -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) # Print out the initial exposure time for comparison. print("Initial exposure == %d" % sensor.get_exposure_us()) -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. +sensor.skip_frames(time=2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. # You have to turn automatic gain control and automatic white blance off # otherwise they will change the image gains to undo any exposure settings @@ -37,7 +37,7 @@ clock = time.clock() # Create a clock object to track the FPS. sensor.set_auto_gain(False) sensor.set_auto_whitebal(False) # Need to let the above settings get in... -sensor.skip_frames(time = 500) +sensor.skip_frames(time=500) current_exposure_time_in_microseconds = sensor.get_exposure_us() print("Current Exposure == %d" % current_exposure_time_in_microseconds) @@ -45,8 +45,9 @@ print("Current Exposure == %d" % current_exposure_time_in_microseconds) # Auto exposure control (AEC) is enabled by default. Calling the below function # disables sensor auto exposure control. The additionally "exposure_us" # argument then overrides the auto exposure value after AEC is disabled. -sensor.set_auto_exposure(False, \ - exposure_us = int(current_exposure_time_in_microseconds * EXPOSURE_TIME_SCALE)) +sensor.set_auto_exposure( + False, exposure_us=int(current_exposure_time_in_microseconds * EXPOSURE_TIME_SCALE) +) print("New exposure == %d" % sensor.get_exposure_us()) # sensor.get_exposure_us() returns the exact camera sensor exposure time @@ -61,8 +62,8 @@ print("New exposure == %d" % sensor.get_exposure_us()) # Just disables the exposure value update but does not change the exposure # value the camera sensor determined was good. -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected - # to the IDE. The FPS should increase once disconnected. +while True: + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected + # to the IDE. The FPS should increase once disconnected. diff --git a/scripts/examples/01-Camera/07-Sensor-Control/sensor_horizontal_mirror.py b/scripts/examples/01-Camera/07-Sensor-Control/sensor_horizontal_mirror.py index 6291dc5c3..208d242a2 100644 --- a/scripts/examples/01-Camera/07-Sensor-Control/sensor_horizontal_mirror.py +++ b/scripts/examples/01-Camera/07-Sensor-Control/sensor_horizontal_mirror.py @@ -6,17 +6,17 @@ import sensor import time -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.skip_frames(time=2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. # Change this to False to undo the mirror. sensor.set_hmirror(True) -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected - # to the IDE. The FPS should increase once disconnected. +while True: + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected + # to the IDE. The FPS should increase once disconnected. diff --git a/scripts/examples/01-Camera/07-Sensor-Control/sensor_manual_whitebal_control.py b/scripts/examples/01-Camera/07-Sensor-Control/sensor_manual_whitebal_control.py index 69ba02bb3..5f0ecf8ac 100644 --- a/scripts/examples/01-Camera/07-Sensor-Control/sensor_manual_whitebal_control.py +++ b/scripts/examples/01-Camera/07-Sensor-Control/sensor_manual_whitebal_control.py @@ -16,11 +16,11 @@ import sensor import time -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.skip_frames(time=2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. # You can control the white balance gains here. The first value is the # R gain in db, and then the G gain in db, followed by the B gain in db. @@ -33,8 +33,7 @@ clock = time.clock() # Create a clock object to track the FPS. # comming out. Do not expect the exact value going in to be equal to the value # comming out. -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - print(clock.fps(), \ - sensor.get_rgb_gain_db()) # Prints the AWB current RGB gains. +while True: + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + print(clock.fps(), sensor.get_rgb_gain_db()) # Prints the AWB current RGB gains. diff --git a/scripts/examples/01-Camera/07-Sensor-Control/sensor_vertical_flip.py b/scripts/examples/01-Camera/07-Sensor-Control/sensor_vertical_flip.py index 453f4ab81..84c15b5a4 100644 --- a/scripts/examples/01-Camera/07-Sensor-Control/sensor_vertical_flip.py +++ b/scripts/examples/01-Camera/07-Sensor-Control/sensor_vertical_flip.py @@ -6,17 +6,17 @@ import sensor import time -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.skip_frames(time=2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. # Change this to False to undo the flip. sensor.set_vflip(True) -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected - # to the IDE. The FPS should increase once disconnected. +while True: + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected + # to the IDE. The FPS should increase once disconnected. diff --git a/scripts/examples/01-Camera/07-Sensor-Control/sesnor_manual_gain_control.py b/scripts/examples/01-Camera/07-Sensor-Control/sesnor_manual_gain_control.py index 3099c37a0..afd6f41b7 100644 --- a/scripts/examples/01-Camera/07-Sensor-Control/sesnor_manual_gain_control.py +++ b/scripts/examples/01-Camera/07-Sensor-Control/sesnor_manual_gain_control.py @@ -21,15 +21,15 @@ import time # Change this value to adjust the gain. Try 10.0/0/0.1/etc. GAIN_SCALE = 1.0 -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) # Print out the initial gain for comparison. print("Initial gain == %f db" % sensor.get_gain_db()) -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. +sensor.skip_frames(time=2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. # You have to turn automatic exposure control and automatic white blance off # otherwise they will change the image exposure to undo any gain settings @@ -37,7 +37,7 @@ clock = time.clock() # Create a clock object to track the FPS. sensor.set_auto_exposure(False) sensor.set_auto_whitebal(False) # Need to let the above settings get in... -sensor.skip_frames(time = 500) +sensor.skip_frames(time=500) current_gain_in_decibels = sensor.get_gain_db() print("Current Gain == %f db" % current_gain_in_decibels) @@ -45,8 +45,7 @@ print("Current Gain == %f db" % current_gain_in_decibels) # Auto gain control (AGC) is enabled by default. Calling the below function # disables sensor auto gain control. The additionally "gain_db" # argument then overrides the auto gain value after AGC is disabled. -sensor.set_auto_gain(False, \ - gain_db = current_gain_in_decibels * GAIN_SCALE) +sensor.set_auto_gain(False, gain_db=current_gain_in_decibels * GAIN_SCALE) print("New gain == %f db" % sensor.get_gain_db()) # sensor.get_gain_db() returns the exact camera sensor gain decibels. @@ -61,8 +60,8 @@ print("New gain == %f db" % sensor.get_gain_db()) # Just disables the gain value update but does not change the gain # value the camera sensor determined was good. -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected - # to the IDE. The FPS should increase once disconnected. +while True: + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected + # to the IDE. The FPS should increase once disconnected. diff --git a/scripts/examples/01-Camera/08-Readout-Control/100_fps_ir_led_tracking.py b/scripts/examples/01-Camera/08-Readout-Control/100_fps_ir_led_tracking.py index 5791d2f8d..5fdd5c37d 100644 --- a/scripts/examples/01-Camera/08-Readout-Control/100_fps_ir_led_tracking.py +++ b/scripts/examples/01-Camera/08-Readout-Control/100_fps_ir_led_tracking.py @@ -7,7 +7,7 @@ import sensor import time EXPOSURE_MICROSECONDS = 1000 -TRACKING_THRESHOLDS = [(128, 255)] # When you lower the exposure you darken everything. +TRACKING_THRESHOLDS = [(128, 255)] # When you lower the exposure you darken everything. SEARCHING_RESOLUTION = sensor.VGA SEARCHING_AREA_THRESHOLD = 16 @@ -17,32 +17,34 @@ TRACKING_RESOLUTION = sensor.QQVGA TRACKING_AREA_THRESHOLD = 256 TRACKING_PIXEL_THRESHOLD = TRACKING_AREA_THRESHOLD -TRACKING_EDGE_TOLERANCE = 0.05 # Blob can move 5% away from the center. +TRACKING_EDGE_TOLERANCE = 0.05 # Blob can move 5% away from the center. -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE sensor.set_framesize(SEARCHING_RESOLUTION) -sensor.skip_frames(time = 1000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. +sensor.skip_frames(time=1000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. -sensor.set_auto_gain(False) # Turn off as it will oscillate. +sensor.set_auto_gain(False) # Turn off as it will oscillate. sensor.set_auto_exposure(False, exposure_us=EXPOSURE_MICROSECONDS) -sensor.skip_frames(time = 1000) +sensor.skip_frames(time=1000) # sensor_w and sensor_h are the image sensor raw pixels w/h (x/y are 0 initially). x, y, sensor_w, sensor_h = sensor.ioctl(sensor.IOCTL_GET_READOUT_WINDOW) -while(True): +while True: clock.tick() img = sensor.snapshot() # We need to find an IR object to track - it's likely to be really bright. - blobs = img.find_blobs(TRACKING_THRESHOLDS, - area_threshold=SEARCHING_AREA_THRESHOLD, - pixels_threshold=SEARCHING_PIXEL_THRESHOLD) + blobs = img.find_blobs( + TRACKING_THRESHOLDS, + area_threshold=SEARCHING_AREA_THRESHOLD, + pixels_threshold=SEARCHING_PIXEL_THRESHOLD, + ) if len(blobs): - most_dense_blob = max(blobs, key = lambda x: x.density()) + most_dense_blob = max(blobs, key=lambda x: x.density()) img.draw_rectangle(most_dense_blob.rect()) def get_mapped_centroid(b): @@ -69,7 +71,7 @@ while(True): # Add in our displacement from the sensor center mapped_cy += y + (sensor_h / 2.0) - return (mapped_cx, mapped_cy) # X/Y location on the sensor array. + return (mapped_cx, mapped_cy) # X/Y location on the sensor array. def center_on_blob(b, res): mapped_cx, mapped_cy = get_mapped_centroid(b) @@ -93,22 +95,28 @@ while(True): x_error = x - new_x y_error = y - new_y - if x_error < 0: print("-X Limit Reached ", end="") - if x_error > 0: print("+X Limit Reached ", end="") - if y_error < 0: print("-Y Limit Reached ", end="") - if y_error > 0: print("+Y Limit Reached ", end="") + if x_error < 0: + print("-X Limit Reached ", end="") + if x_error > 0: + print("+X Limit Reached ", end="") + if y_error < 0: + print("-Y Limit Reached ", end="") + if y_error > 0: + print("+Y Limit Reached ", end="") center_on_blob(most_dense_blob, TRACKING_RESOLUTION) # This loop will track the blob at a much higher readout speed and lower resolution. - while(True): + while True: clock.tick() img = sensor.snapshot() # Find the blob in the lower resolution image. - blobs = img.find_blobs(TRACKING_THRESHOLDS, - area_threshold=TRACKING_AREA_THRESHOLD, - pixels_threshold=TRACKING_PIXEL_THRESHOLD) + blobs = img.find_blobs( + TRACKING_THRESHOLDS, + area_threshold=TRACKING_AREA_THRESHOLD, + pixels_threshold=TRACKING_PIXEL_THRESHOLD, + ) # If we loose the blob then we need to find a new one. if not len(blobs): @@ -118,10 +126,12 @@ while(True): break # Narrow down the blob list and highlight the blob. - most_dense_blob = max(blobs, key = lambda x: x.density()) + most_dense_blob = max(blobs, key=lambda x: x.density()) img.draw_rectangle(most_dense_blob.rect()) - print(clock.fps(), "BLOB cx:%d, cy:%d" % get_mapped_centroid(most_dense_blob)) + print( + clock.fps(), "BLOB cx:%d, cy:%d" % get_mapped_centroid(most_dense_blob) + ) x_diff = most_dense_blob.cx() - (sensor.width() / 2.0) y_diff = most_dense_blob.cy() - (sensor.height() / 2.0) diff --git a/scripts/examples/01-Camera/08-Readout-Control/apriltag_tracking.py b/scripts/examples/01-Camera/08-Readout-Control/apriltag_tracking.py index ce5c6c9f8..a6fc52707 100644 --- a/scripts/examples/01-Camera/08-Readout-Control/apriltag_tracking.py +++ b/scripts/examples/01-Camera/08-Readout-Control/apriltag_tracking.py @@ -11,25 +11,29 @@ import time EXPOSURE_MICROSECONDS = 20000 SEARCHING_RESOLUTION = sensor.QVGA -TRACKING_RESOLUTION = sensor.QQVGA # or sensor.QQQVGA +TRACKING_RESOLUTION = sensor.QQVGA # or sensor.QQQVGA -TRACKING_LOW_RATIO_THRESHOLD = 0.2 # Go to a smaller readout window when tag side vs res is smaller. -TRACKING_HIGH_RATIO_THRESHOLD = 0.8 # Go to a larger readout window when tag side vs res is larger. +TRACKING_LOW_RATIO_THRESHOLD = ( + 0.2 # Go to a smaller readout window when tag side vs res is smaller. +) +TRACKING_HIGH_RATIO_THRESHOLD = ( + 0.8 # Go to a larger readout window when tag side vs res is larger. +) -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE sensor.set_framesize(SEARCHING_RESOLUTION) -sensor.skip_frames(time = 1000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. +sensor.skip_frames(time=1000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. -sensor.set_auto_gain(False) # Turn off as it will oscillate. +sensor.set_auto_gain(False) # Turn off as it will oscillate. sensor.set_auto_exposure(False, exposure_us=EXPOSURE_MICROSECONDS) -sensor.skip_frames(time = 1000) +sensor.skip_frames(time=1000) # sensor_w and sensor_h are the image sensor raw pixels w/h (x/y are 0 initially). x, y, sensor_w, sensor_h = sensor.ioctl(sensor.IOCTL_GET_READOUT_WINDOW) -while(True): +while True: clock.tick() img = sensor.snapshot() @@ -37,7 +41,7 @@ while(True): tags = img.find_apriltags() if len(tags): - best_tag = max(tags, key = lambda x: x.decision_margin()) + best_tag = max(tags, key=lambda x: x.decision_margin()) img.draw_rectangle(best_tag.rect()) # This needs to be less than the sensor output at default so we can move it around. @@ -68,7 +72,7 @@ while(True): # Add in our displacement from the sensor center mapped_cy += y + (sensor_h / 2.0) - return (mapped_cx, mapped_cy) # X/Y location on the sensor array. + return (mapped_cx, mapped_cy) # X/Y location on the sensor array. def center_on_tag(t, res): global readout_window_w @@ -94,17 +98,21 @@ while(True): x_error = x - new_x y_error = y - new_y - if x_error < 0: print("-X Limit Reached ", end="") - if x_error > 0: print("+X Limit Reached ", end="") - if y_error < 0: print("-Y Limit Reached ", end="") - if y_error > 0: print("+Y Limit Reached ", end="") + if x_error < 0: + print("-X Limit Reached ", end="") + if x_error > 0: + print("+X Limit Reached ", end="") + if y_error < 0: + print("-Y Limit Reached ", end="") + if y_error > 0: + print("+Y Limit Reached ", end="") center_on_tag(best_tag, TRACKING_RESOLUTION) loss_count = 0 # This loop will track the tag at a much higher readout speed and lower resolution. - while(True): + while True: clock.tick() img = sensor.snapshot() @@ -114,7 +122,7 @@ while(True): # If we loose the tag then we need to find a new one. if not len(tags): # Handle a few bad frames due to tag flicker. - if (loss_count < 2): + if loss_count < 2: loss_count += 1 continue # Reset resolution. @@ -125,7 +133,7 @@ while(True): loss_count = 0 # Narrow down the blob list and highlight the blob. - best_tag = max(tags, key = lambda x: x.decision_margin()) + best_tag = max(tags, key=lambda x: x.decision_margin()) img.draw_rectangle(best_tag.rect()) print(clock.fps(), "TAG cx:%d, cy:%d" % get_mapped_centroid(best_tag)) @@ -134,14 +142,18 @@ while(True): h_ratio = best_tag.h() / sensor.height() # Shrink the tracking window until the tag fits. - while (w_ratio < TRACKING_LOW_RATIO_THRESHOLD) or (h_ratio < TRACKING_LOW_RATIO_THRESHOLD): + while (w_ratio < TRACKING_LOW_RATIO_THRESHOLD) or ( + h_ratio < TRACKING_LOW_RATIO_THRESHOLD + ): readout_window_w /= 2 readout_window_h /= 2 w_ratio *= 2 h_ratio *= 2 # Enlarge the tracking window until the tag fits. - while (TRACKING_HIGH_RATIO_THRESHOLD < w_ratio) or (TRACKING_HIGH_RATIO_THRESHOLD < h_ratio): + while (TRACKING_HIGH_RATIO_THRESHOLD < w_ratio) or ( + TRACKING_HIGH_RATIO_THRESHOLD < h_ratio + ): readout_window_w *= 2 readout_window_h *= 2 w_ratio /= 2 diff --git a/scripts/examples/02-Image-Processing/00-Drawing/arrow_drawing.py b/scripts/examples/02-Image-Processing/00-Drawing/arrow_drawing.py index 31f00be4b..693016074 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/arrow_drawing.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/arrow_drawing.py @@ -7,27 +7,27 @@ import time import pyb sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) +sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... +sensor.set_framesize(sensor.QVGA) # or QQVGA... +sensor.skip_frames(time=2000) clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() for i in range(10): - x0 = (pyb.rng() % (2*img.width())) - (img.width()//2) - y0 = (pyb.rng() % (2*img.height())) - (img.height()//2) - x1 = (pyb.rng() % (2*img.width())) - (img.width()//2) - y1 = (pyb.rng() % (2*img.height())) - (img.height()//2) + x0 = (pyb.rng() % (2 * img.width())) - (img.width() // 2) + y0 = (pyb.rng() % (2 * img.height())) - (img.height() // 2) + x1 = (pyb.rng() % (2 * img.width())) - (img.width() // 2) + y1 = (pyb.rng() % (2 * img.height())) - (img.height() // 2) r = (pyb.rng() % 127) + 128 g = (pyb.rng() % 127) + 128 b = (pyb.rng() % 127) + 128 # If the first argument is a scaler then this method expects # to see x0, y0, x1, and y1. Otherwise, it expects a (x0,y0,x1,y1) tuple. - img.draw_arrow(x0, y0, x1, y1, color = (r, g, b), size = 30, thickness = 2) + img.draw_arrow(x0, y0, x1, y1, color=(r, g, b), size=30, thickness=2) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/00-Drawing/circle_drawing.py b/scripts/examples/02-Image-Processing/00-Drawing/circle_drawing.py index 49756d30f..943bf1421 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/circle_drawing.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/circle_drawing.py @@ -7,20 +7,20 @@ import time import pyb sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) +sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... +sensor.set_framesize(sensor.QVGA) # or QQVGA... +sensor.skip_frames(time=2000) clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() for i in range(10): - x = (pyb.rng() % (2*img.width())) - (img.width()//2) - y = (pyb.rng() % (2*img.height())) - (img.height()//2) - radius = pyb.rng() % (max(img.height(), img.width())//2) + x = (pyb.rng() % (2 * img.width())) - (img.width() // 2) + y = (pyb.rng() % (2 * img.height())) - (img.height() // 2) + radius = pyb.rng() % (max(img.height(), img.width()) // 2) r = (pyb.rng() % 127) + 128 g = (pyb.rng() % 127) + 128 @@ -28,6 +28,6 @@ while(True): # If the first argument is a scaler then this method expects # to see x, y, and radius. Otherwise, it expects a (x,y,radius) tuple. - img.draw_circle(x, y, radius, color = (r, g, b), thickness = 2, fill = False) + img.draw_circle(x, y, radius, color=(r, g, b), thickness=2, fill=False) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/00-Drawing/copy2fb.py b/scripts/examples/02-Image-Processing/00-Drawing/copy2fb.py index 07dbedd13..b2f3c3b73 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/copy2fb.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/copy2fb.py @@ -1,4 +1,4 @@ -# Copy image to framebuffer. +# Copy image to framebuffer. # # This example shows how to load and copy an image to framebuffer for testing. diff --git a/scripts/examples/02-Image-Processing/00-Drawing/cross_drawing.py b/scripts/examples/02-Image-Processing/00-Drawing/cross_drawing.py index 68b422bf4..a5bccb7e0 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/cross_drawing.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/cross_drawing.py @@ -7,25 +7,25 @@ import time import pyb sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) +sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... +sensor.set_framesize(sensor.QVGA) # or QQVGA... +sensor.skip_frames(time=2000) clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() for i in range(10): - x = (pyb.rng() % (2*img.width())) - (img.width()//2) - y = (pyb.rng() % (2*img.height())) - (img.height()//2) + x = (pyb.rng() % (2 * img.width())) - (img.width() // 2) + y = (pyb.rng() % (2 * img.height())) - (img.height() // 2) r = (pyb.rng() % 127) + 128 g = (pyb.rng() % 127) + 128 b = (pyb.rng() % 127) + 128 # If the first argument is a scaler then this method expects # to see x and y. Otherwise, it expects a (x,y) tuple. - img.draw_cross(x, y, color = (r, g, b), size = 10, thickness = 2) + img.draw_cross(x, y, color=(r, g, b), size=10, thickness=2) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/00-Drawing/ellipse_drawing.py b/scripts/examples/02-Image-Processing/00-Drawing/ellipse_drawing.py index d527ab3d0..51ded014a 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/ellipse_drawing.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/ellipse_drawing.py @@ -7,21 +7,21 @@ import time import pyb sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) +sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... +sensor.set_framesize(sensor.QVGA) # or QQVGA... +sensor.skip_frames(time=2000) clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() for i in range(10): - x = (pyb.rng() % (2*img.width())) - (img.width()//2) - y = (pyb.rng() % (2*img.height())) - (img.height()//2) - radius_x = pyb.rng() % (max(img.height(), img.width())//2) - radius_y = pyb.rng() % (max(img.height(), img.width())//2) + x = (pyb.rng() % (2 * img.width())) - (img.width() // 2) + y = (pyb.rng() % (2 * img.height())) - (img.height() // 2) + radius_x = pyb.rng() % (max(img.height(), img.width()) // 2) + radius_y = pyb.rng() % (max(img.height(), img.width()) // 2) rot = pyb.rng() r = (pyb.rng() % 127) + 128 @@ -31,7 +31,8 @@ while(True): # If the first argument is a scaler then this method expects # to see x, y, radius x, and radius y. # Otherwise, it expects a (x,y,radius_x,radius_y) tuple. - img.draw_ellipse(x, y, radius_x, radius_y, rot, - color = (r, g, b), thickness = 2, fill = False) + img.draw_ellipse( + x, y, radius_x, radius_y, rot, color=(r, g, b), thickness=2, fill=False + ) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/00-Drawing/flood_fill.py b/scripts/examples/02-Image-Processing/00-Drawing/flood_fill.py index 49976dc05..50957bda2 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/flood_fill.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/flood_fill.py @@ -6,12 +6,12 @@ import sensor import time sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) +sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... +sensor.set_framesize(sensor.QVGA) # or QQVGA... +sensor.skip_frames(time=2000) clock = time.clock() -while(True): +while True: clock.tick() # seed_threshold controls the maximum allowed difference between @@ -29,8 +29,14 @@ while(True): x = sensor.width() // 2 y = sensor.height() // 2 - img = sensor.snapshot().flood_fill(x, y, \ - seed_threshold=0.05, floating_thresholds=0.05, \ - color=(255, 0, 0), invert=False, clear_background=False) + img = sensor.snapshot().flood_fill( + x, + y, + seed_threshold=0.05, + floating_thresholds=0.05, + color=(255, 0, 0), + invert=False, + clear_background=False, + ) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing.py b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing.py index 091d4ab0f..d3cc3a442 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing.py @@ -4,22 +4,21 @@ import sensor import time -import pyb sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) +sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... +sensor.set_framesize(sensor.QVGA) # or QQVGA... +sensor.skip_frames(time=2000) clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() - small_img = img.mean_pooled(4, 4) # Makes a copy. + small_img = img.mean_pooled(4, 4) # Makes a copy. - x = (img.width()//2)-(small_img.width()//2) - y = (img.height()//2)-(small_img.height()//2) + x = (img.width() // 2) - (small_img.width() // 2) + y = (img.height() // 2) - (small_img.height() // 2) # Draws an image in the frame buffer.Pass an optional # mask image to control what pixels are drawn. img.draw_image(small_img, x, y, x_scale=1, y_scale=1) diff --git a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_advanced.py b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_advanced.py index b4d2fa94e..321ce54d7 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_advanced.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_advanced.py @@ -5,12 +5,11 @@ import sensor import image import time -import pyb sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) +sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... +sensor.set_framesize(sensor.QVGA) # or QQVGA... +sensor.skip_frames(time=2000) clock = time.clock() BOUNCE = True @@ -44,7 +43,7 @@ ymin = -sensor.height() / SMALL_IMAGE_SCALE - 8 xmax = sensor.width() + 8 ymax = sensor.height() + 8 -while(True): +while True: clock.tick() status = "" @@ -54,43 +53,66 @@ while(True): # Makes a scaled copy of the sensor small_img = img.mean_pooled(SMALL_IMAGE_SCALE, SMALL_IMAGE_SCALE) - status = 'rgb565 ' + status = "rgb565 " if CYCLE_FORMATS: image_format = (value_mixer >> 8) & 3 # To test combining different formats - if (image_format==1): small_img = small_img.to_bitmap(copy=True); status = 'bitmap ' - if (image_format==2): small_img = small_img.to_grayscale(copy=True); status = 'grayscale ' - if (image_format==3): small_img = small_img.to_rgb565(copy=True); status = 'rgb565 ' + if image_format == 1: + small_img = small_img.to_bitmap(copy=True) + status = "bitmap " + if image_format == 2: + small_img = small_img.to_grayscale(copy=True) + status = "grayscale " + if image_format == 3: + small_img = small_img.to_rgb565(copy=True) + status = "rgb565 " # update small image location if BOUNCE: x = x + xd - if (xxmax): + if x < xmin or x > xmax: xd = -xd y = y + yd - if (yymax): + if y < ymin or y > ymax: yd = -yd # Update small image scale if RESCALE: rescale = rescale + rd - if (rescalemax_rescale): + if rescale < min_rescale or rescale > max_rescale: rd = -rd # Find the center of the image scaled_width = int(small_img.width() * abs(rescale)) - scaled_height= int(small_img.height() * abs(rescale)) + scaled_height = int(small_img.height() * abs(rescale)) apply_mask = CYCLE_MASK and ((value_mixer >> 9) & 1) if apply_mask: - img.draw_image(small_img, int(x), int(y), mask=small_img.to_bitmap(copy=True), x_scale=rescale, y_scale=rescale, alpha=240, hint=image.BILINEAR | image.CENTER) - status += 'alpha:240 ' - status += '+mask ' + img.draw_image( + small_img, + int(x), + int(y), + mask=small_img.to_bitmap(copy=True), + x_scale=rescale, + y_scale=rescale, + alpha=240, + hint=image.BILINEAR | image.CENTER, + ) + status += "alpha:240 " + status += "+mask " else: - img.draw_image(small_img, int(x), int(y), x_scale=rescale, y_scale=rescale, alpha=128, hint=image.BILINEAR | image.CENTER) - status += 'alpha:128 ' + img.draw_image( + small_img, + int(x), + int(y), + x_scale=rescale, + y_scale=rescale, + alpha=128, + hint=image.BILINEAR | image.CENTER, + ) + status += "alpha:128 " - img.draw_string(8, 0, status, mono_space = False) + img.draw_string(8, 0, status, mono_space=False) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_blending_test.py b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_blending_test.py index a77e9a819..da610805e 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_blending_test.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_blending_test.py @@ -13,61 +13,69 @@ sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) -hint = image.BICUBIC # image.BILINEAR image.BICUBIC +hint = image.BICUBIC # image.BILINEAR image.BICUBIC small_img = image.Image(4, 4, sensor.RGB565) -small_img.set_pixel(0, 0, (0, 0, 127)) -small_img.set_pixel(1, 0, (47, 255, 199)) -small_img.set_pixel(2, 0, (0, 188, 255)) -small_img.set_pixel(3, 0, (0, 0, 127)) -small_img.set_pixel(0, 1, (0, 176, 255)) -small_img.set_pixel(1, 1, (222, 0, 0 )) -small_img.set_pixel(2, 1, (50, 255, 195)) -small_img.set_pixel(3, 1, (86, 255, 160)) -small_img.set_pixel(0, 2, (255, 211, 0 )) -small_img.set_pixel(1, 2, (83, 255, 163)) +small_img.set_pixel(0, 0, (0, 0, 127)) +small_img.set_pixel(1, 0, (47, 255, 199)) +small_img.set_pixel(2, 0, (0, 188, 255)) +small_img.set_pixel(3, 0, (0, 0, 127)) +small_img.set_pixel(0, 1, (0, 176, 255)) +small_img.set_pixel(1, 1, (222, 0, 0)) +small_img.set_pixel(2, 1, (50, 255, 195)) +small_img.set_pixel(3, 1, (86, 255, 160)) +small_img.set_pixel(0, 2, (255, 211, 0)) +small_img.set_pixel(1, 2, (83, 255, 163)) small_img.set_pixel(2, 2, (255, 211, 0)) -small_img.set_pixel(3, 2, (0, 80, 255)) -small_img.set_pixel(0, 3, (255, 118, 0 )) -small_img.set_pixel(1, 3, (127, 0, 0 )) -small_img.set_pixel(2, 3, (0, 144, 255)) -small_img.set_pixel(3, 3, (50, 255, 195)) -#small_img.to_grayscale() -#small_img.to_bitmap() +small_img.set_pixel(3, 2, (0, 80, 255)) +small_img.set_pixel(0, 3, (255, 118, 0)) +small_img.set_pixel(1, 3, (127, 0, 0)) +small_img.set_pixel(2, 3, (0, 144, 255)) +small_img.set_pixel(3, 3, (50, 255, 195)) +# small_img.to_grayscale() +# small_img.to_bitmap() big_img = image.Image(128, 128, sensor.RGB565) big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint) -#big_img.to_grayscale() -#big_img.to_bitmap() +# big_img.to_grayscale() +# big_img.to_bitmap() alpha_div = 1 alpha_value = 0 alpha_step = 2 -x_bounce = sensor.width()//2 +x_bounce = sensor.width() // 2 x_bounce_toggle = 1 -y_bounce = sensor.height()//2 +y_bounce = sensor.height() // 2 y_bounce_toggle = 1 clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() - #img.to_grayscale() - #img.to_bitmap() - img.draw_image(big_img, x_bounce, y_bounce, - rgb_channel=-1, alpha=alpha_value//alpha_div, - hint=hint|image.CENTER) + # img.to_grayscale() + # img.to_bitmap() + img.draw_image( + big_img, + x_bounce, + y_bounce, + rgb_channel=-1, + alpha=alpha_value // alpha_div, + hint=hint | image.CENTER, + ) x_bounce += x_bounce_toggle - if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle + if abs(x_bounce - (img.width() // 2)) >= (img.width() // 2): + x_bounce_toggle = -x_bounce_toggle y_bounce += y_bounce_toggle - if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle + if abs(y_bounce - (img.height() // 2)) >= (img.height() // 2): + y_bounce_toggle = -y_bounce_toggle alpha_value += alpha_step - if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step + if not alpha_value or alpha_value // alpha_div == 256: + alpha_step = -alpha_step print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_blending_with_color_table_test.py b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_blending_with_color_table_test.py index bb27e926c..6a3afdb43 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_blending_with_color_table_test.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_blending_with_color_table_test.py @@ -13,71 +13,80 @@ sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) -hint = image.BICUBIC # image.BILINEAR image.BICUBIC +hint = image.BICUBIC # image.BILINEAR image.BICUBIC # RGB channel extraction is done after scaling normally, this # may produce false colors. Set this flag to do it before. # -hint |= 0 # image.EXTRACT_RGB_CHANNEL_FIRST +hint |= 0 # image.EXTRACT_RGB_CHANNEL_FIRST # Color table application is done after scaling normally, this # may produce false colors. Set this flag to do it before. # -hint |= 0 # image.APPLY_COLOR_PALETTE_FIRST +hint |= 0 # image.APPLY_COLOR_PALETTE_FIRST small_img = image.Image(4, 4, sensor.RGB565) -small_img.set_pixel(0, 0, (0, 0, 127)) -small_img.set_pixel(1, 0, (47, 255, 199)) -small_img.set_pixel(2, 0, (0, 188, 255)) -small_img.set_pixel(3, 0, (0, 0, 127)) -small_img.set_pixel(0, 1, (0, 176, 255)) -small_img.set_pixel(1, 1, (222, 0, 0 )) -small_img.set_pixel(2, 1, (50, 255, 195)) -small_img.set_pixel(3, 1, (86, 255, 160)) -small_img.set_pixel(0, 2, (255, 211, 0 )) -small_img.set_pixel(1, 2, (83, 255, 163)) +small_img.set_pixel(0, 0, (0, 0, 127)) +small_img.set_pixel(1, 0, (47, 255, 199)) +small_img.set_pixel(2, 0, (0, 188, 255)) +small_img.set_pixel(3, 0, (0, 0, 127)) +small_img.set_pixel(0, 1, (0, 176, 255)) +small_img.set_pixel(1, 1, (222, 0, 0)) +small_img.set_pixel(2, 1, (50, 255, 195)) +small_img.set_pixel(3, 1, (86, 255, 160)) +small_img.set_pixel(0, 2, (255, 211, 0)) +small_img.set_pixel(1, 2, (83, 255, 163)) small_img.set_pixel(2, 2, (255, 211, 0)) -small_img.set_pixel(3, 2, (0, 80, 255)) -small_img.set_pixel(0, 3, (255, 118, 0 )) -small_img.set_pixel(1, 3, (127, 0, 0 )) -small_img.set_pixel(2, 3, (0, 144, 255)) -small_img.set_pixel(3, 3, (50, 255, 195)) -#small_img.to_grayscale() -#small_img.to_bitmap() +small_img.set_pixel(3, 2, (0, 80, 255)) +small_img.set_pixel(0, 3, (255, 118, 0)) +small_img.set_pixel(1, 3, (127, 0, 0)) +small_img.set_pixel(2, 3, (0, 144, 255)) +small_img.set_pixel(3, 3, (50, 255, 195)) +# small_img.to_grayscale() +# small_img.to_bitmap() big_img = image.Image(128, 128, sensor.RGB565) big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint) -#big_img.to_grayscale() -#big_img.to_bitmap() +# big_img.to_grayscale() +# big_img.to_bitmap() alpha_div = 1 alpha_value = 0 alpha_step = 2 -x_bounce = sensor.width()//2 +x_bounce = sensor.width() // 2 x_bounce_toggle = 1 -y_bounce = sensor.height()//2 +y_bounce = sensor.height() // 2 y_bounce_toggle = 1 clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() - #img.to_grayscale() - #img.to_bitmap() - img.draw_image(big_img, x_bounce, y_bounce, - rgb_channel=-1, alpha=alpha_value//alpha_div, - color_palette=sensor.PALETTE_IRONBOW, hint=hint|image.CENTER) + # img.to_grayscale() + # img.to_bitmap() + img.draw_image( + big_img, + x_bounce, + y_bounce, + rgb_channel=-1, + alpha=alpha_value // alpha_div, + color_palette=sensor.PALETTE_IRONBOW, + hint=hint | image.CENTER, + ) x_bounce += x_bounce_toggle - if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle + if abs(x_bounce - (img.width() // 2)) >= (img.width() // 2): + x_bounce_toggle = -x_bounce_toggle y_bounce += y_bounce_toggle - if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle + if abs(y_bounce - (img.height() // 2)) >= (img.height() // 2): + y_bounce_toggle = -y_bounce_toggle alpha_value += alpha_step - if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step + if not alpha_value or alpha_value // alpha_div == 256: + alpha_step = -alpha_step print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_table_test.py b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_table_test.py index 58937de34..55a8bbc54 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_table_test.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_table_test.py @@ -13,32 +13,32 @@ sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) -hint = image.BICUBIC # image.BILINEAR image.BICUBIC +hint = image.BICUBIC # image.BILINEAR image.BICUBIC small_img = image.Image(4, 4, sensor.RGB565) -small_img.set_pixel(0, 0, (0, 0, 127)) -small_img.set_pixel(1, 0, (47, 255, 199)) -small_img.set_pixel(2, 0, (0, 188, 255)) -small_img.set_pixel(3, 0, (0, 0, 127)) -small_img.set_pixel(0, 1, (0, 176, 255)) -small_img.set_pixel(1, 1, (222, 0, 0 )) -small_img.set_pixel(2, 1, (50, 255, 195)) -small_img.set_pixel(3, 1, (86, 255, 160)) -small_img.set_pixel(0, 2, (255, 211, 0 )) -small_img.set_pixel(1, 2, (83, 255, 163)) +small_img.set_pixel(0, 0, (0, 0, 127)) +small_img.set_pixel(1, 0, (47, 255, 199)) +small_img.set_pixel(2, 0, (0, 188, 255)) +small_img.set_pixel(3, 0, (0, 0, 127)) +small_img.set_pixel(0, 1, (0, 176, 255)) +small_img.set_pixel(1, 1, (222, 0, 0)) +small_img.set_pixel(2, 1, (50, 255, 195)) +small_img.set_pixel(3, 1, (86, 255, 160)) +small_img.set_pixel(0, 2, (255, 211, 0)) +small_img.set_pixel(1, 2, (83, 255, 163)) small_img.set_pixel(2, 2, (255, 211, 0)) -small_img.set_pixel(3, 2, (0, 80, 255)) -small_img.set_pixel(0, 3, (255, 118, 0 )) -small_img.set_pixel(1, 3, (127, 0, 0 )) -small_img.set_pixel(2, 3, (0, 144, 255)) -small_img.set_pixel(3, 3, (50, 255, 195)) -#small_img.to_grayscale() -#small_img.to_bitmap() +small_img.set_pixel(3, 2, (0, 80, 255)) +small_img.set_pixel(0, 3, (255, 118, 0)) +small_img.set_pixel(1, 3, (127, 0, 0)) +small_img.set_pixel(2, 3, (0, 144, 255)) +small_img.set_pixel(3, 3, (50, 255, 195)) +# small_img.to_grayscale() +# small_img.to_bitmap() big_img = image.Image(128, 128, sensor.RGB565) big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint) -#big_img.to_grayscale() -#big_img.to_bitmap() +# big_img.to_grayscale() +# big_img.to_bitmap() alpha_lut = image.Image(256, 1, sensor.GRAYSCALE) for i in range(256): @@ -48,30 +48,39 @@ alpha_div = 1 alpha_value = 0 alpha_step = 2 -x_bounce = sensor.width()//2 +x_bounce = sensor.width() // 2 x_bounce_toggle = 1 -y_bounce = sensor.height()//2 +y_bounce = sensor.height() // 2 y_bounce_toggle = 1 clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() - #img.to_grayscale() - #img.to_bitmap() - img.draw_image(big_img, x_bounce, y_bounce, - rgb_channel=-1, alpha=alpha_value//alpha_div, - alpha_palette=alpha_lut, hint=hint|image.CENTER) + # img.to_grayscale() + # img.to_bitmap() + img.draw_image( + big_img, + x_bounce, + y_bounce, + rgb_channel=-1, + alpha=alpha_value // alpha_div, + alpha_palette=alpha_lut, + hint=hint | image.CENTER, + ) x_bounce += x_bounce_toggle - if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle + if abs(x_bounce - (img.width() // 2)) >= (img.width() // 2): + x_bounce_toggle = -x_bounce_toggle y_bounce += y_bounce_toggle - if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle + if abs(y_bounce - (img.height() // 2)) >= (img.height() // 2): + y_bounce_toggle = -y_bounce_toggle alpha_value += alpha_step - if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step + if not alpha_value or alpha_value // alpha_div == 256: + alpha_step = -alpha_step print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_table_with_color_table_test.py b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_table_with_color_table_test.py index f1e7129de..d04d224dc 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_table_with_color_table_test.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_alpha_table_with_color_table_test.py @@ -13,42 +13,42 @@ sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) -hint = image.BICUBIC # image.BILINEAR image.BICUBIC +hint = image.BICUBIC # image.BILINEAR image.BICUBIC # RGB channel extraction is done after scaling normally, this # may produce false colors. Set this flag to do it before. # -hint |= 0 # image.EXTRACT_RGB_CHANNEL_FIRST +hint |= 0 # image.EXTRACT_RGB_CHANNEL_FIRST # Color table application is done after scaling normally, this # may produce false colors. Set this flag to do it before. # -hint |= 0 # image.APPLY_COLOR_PALETTE_FIRST +hint |= 0 # image.APPLY_COLOR_PALETTE_FIRST small_img = image.Image(4, 4, sensor.RGB565) -small_img.set_pixel(0, 0, (0, 0, 127)) -small_img.set_pixel(1, 0, (47, 255, 199)) -small_img.set_pixel(2, 0, (0, 188, 255)) -small_img.set_pixel(3, 0, (0, 0, 127)) -small_img.set_pixel(0, 1, (0, 176, 255)) -small_img.set_pixel(1, 1, (222, 0, 0 )) -small_img.set_pixel(2, 1, (50, 255, 195)) -small_img.set_pixel(3, 1, (86, 255, 160)) -small_img.set_pixel(0, 2, (255, 211, 0 )) -small_img.set_pixel(1, 2, (83, 255, 163)) +small_img.set_pixel(0, 0, (0, 0, 127)) +small_img.set_pixel(1, 0, (47, 255, 199)) +small_img.set_pixel(2, 0, (0, 188, 255)) +small_img.set_pixel(3, 0, (0, 0, 127)) +small_img.set_pixel(0, 1, (0, 176, 255)) +small_img.set_pixel(1, 1, (222, 0, 0)) +small_img.set_pixel(2, 1, (50, 255, 195)) +small_img.set_pixel(3, 1, (86, 255, 160)) +small_img.set_pixel(0, 2, (255, 211, 0)) +small_img.set_pixel(1, 2, (83, 255, 163)) small_img.set_pixel(2, 2, (255, 211, 0)) -small_img.set_pixel(3, 2, (0, 80, 255)) -small_img.set_pixel(0, 3, (255, 118, 0 )) -small_img.set_pixel(1, 3, (127, 0, 0 )) -small_img.set_pixel(2, 3, (0, 144, 255)) -small_img.set_pixel(3, 3, (50, 255, 195)) -#small_img.to_grayscale() -#small_img.to_bitmap() +small_img.set_pixel(3, 2, (0, 80, 255)) +small_img.set_pixel(0, 3, (255, 118, 0)) +small_img.set_pixel(1, 3, (127, 0, 0)) +small_img.set_pixel(2, 3, (0, 144, 255)) +small_img.set_pixel(3, 3, (50, 255, 195)) +# small_img.to_grayscale() +# small_img.to_bitmap() big_img = image.Image(128, 128, sensor.RGB565) big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint) -#big_img.to_grayscale() -#big_img.to_bitmap() +# big_img.to_grayscale() +# big_img.to_bitmap() alpha_lut = image.Image(256, 1, sensor.GRAYSCALE) for i in range(256): @@ -58,30 +58,40 @@ alpha_div = 1 alpha_value = 0 alpha_step = 2 -x_bounce = sensor.width()//2 +x_bounce = sensor.width() // 2 x_bounce_toggle = 1 -y_bounce = sensor.height()//2 +y_bounce = sensor.height() // 2 y_bounce_toggle = 1 clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() - #img.to_grayscale() - #img.to_bitmap() - img.draw_image(big_img, x_bounce, y_bounce, - rgb_channel=-1, alpha=alpha_value//alpha_div, - color_palette=sensor.PALETTE_IRONBOW, alpha_palette=alpha_lut, hint=hint|image.CENTER) + # img.to_grayscale() + # img.to_bitmap() + img.draw_image( + big_img, + x_bounce, + y_bounce, + rgb_channel=-1, + alpha=alpha_value // alpha_div, + color_palette=sensor.PALETTE_IRONBOW, + alpha_palette=alpha_lut, + hint=hint | image.CENTER, + ) x_bounce += x_bounce_toggle - if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle + if abs(x_bounce - (img.width() // 2)) >= (img.width() // 2): + x_bounce_toggle = -x_bounce_toggle y_bounce += y_bounce_toggle - if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle + if abs(y_bounce - (img.height() // 2)) >= (img.height() // 2): + y_bounce_toggle = -y_bounce_toggle alpha_value += alpha_step - if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step + if not alpha_value or alpha_value // alpha_div == 256: + alpha_step = -alpha_step print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_scale_down_test.py b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_scale_down_test.py index ed9c10d98..28e453c1e 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_scale_down_test.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_scale_down_test.py @@ -11,39 +11,39 @@ import sensor import image import time -up_hint = 0 # image.BILINEAR image.BICUBIC -down_hint = image.AREA # image.BILINEAR image.BICUBIC image.AREA +up_hint = 0 # image.BILINEAR image.BICUBIC +down_hint = image.AREA # image.BILINEAR image.BICUBIC image.AREA bounce_div = 128 medium_img = image.Image(32, 32, sensor.RGB565, copy_to_fb=True) -#medium_img.to_grayscale() -#medium_img.to_bitmap() +# medium_img.to_grayscale() +# medium_img.to_bitmap() small_img = image.Image(4, 4, sensor.RGB565) -small_img.set_pixel(0, 0, (0, 0, 127)) -small_img.set_pixel(1, 0, (47, 255, 199)) -small_img.set_pixel(2, 0, (0, 188, 255)) -small_img.set_pixel(3, 0, (0, 0, 127)) -small_img.set_pixel(0, 1, (0, 176, 255)) -small_img.set_pixel(1, 1, (222, 0, 0 )) -small_img.set_pixel(2, 1, (50, 255, 195)) -small_img.set_pixel(3, 1, (86, 255, 160)) -small_img.set_pixel(0, 2, (255, 211, 0 )) -small_img.set_pixel(1, 2, (83, 255, 163)) +small_img.set_pixel(0, 0, (0, 0, 127)) +small_img.set_pixel(1, 0, (47, 255, 199)) +small_img.set_pixel(2, 0, (0, 188, 255)) +small_img.set_pixel(3, 0, (0, 0, 127)) +small_img.set_pixel(0, 1, (0, 176, 255)) +small_img.set_pixel(1, 1, (222, 0, 0)) +small_img.set_pixel(2, 1, (50, 255, 195)) +small_img.set_pixel(3, 1, (86, 255, 160)) +small_img.set_pixel(0, 2, (255, 211, 0)) +small_img.set_pixel(1, 2, (83, 255, 163)) small_img.set_pixel(2, 2, (255, 211, 0)) -small_img.set_pixel(3, 2, (0, 80, 255)) -small_img.set_pixel(0, 3, (255, 118, 0 )) -small_img.set_pixel(1, 3, (127, 0, 0 )) -small_img.set_pixel(2, 3, (0, 144, 255)) -small_img.set_pixel(3, 3, (50, 255, 195)) -#small_img.to_grayscale() -#small_img.to_bitmap() +small_img.set_pixel(3, 2, (0, 80, 255)) +small_img.set_pixel(0, 3, (255, 118, 0)) +small_img.set_pixel(1, 3, (127, 0, 0)) +small_img.set_pixel(2, 3, (0, 144, 255)) +small_img.set_pixel(3, 3, (50, 255, 195)) +# small_img.to_grayscale() +# small_img.to_bitmap() big_img = image.Image(128, 128, sensor.RGB565) big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=up_hint) -#big_img.to_grayscale() -#big_img.to_bitmap() +# big_img.to_grayscale() +# big_img.to_bitmap() x_bounce = 0 x_bounce_toggle = 0 @@ -52,20 +52,26 @@ y_bounce = 0 y_bounce_toggle = 0 clock = time.clock() -while(True): +while True: clock.tick() medium_img.clear() - medium_img.draw_image(big_img, - x_bounce // bounce_div, y_bounce // bounce_div, - x_scale=0.25, y_scale=0.25, - hint=down_hint) + medium_img.draw_image( + big_img, + x_bounce // bounce_div, + y_bounce // bounce_div, + x_scale=0.25, + y_scale=0.25, + hint=down_hint, + ) sensor.flush() x_bounce += x_bounce_toggle - if abs(x_bounce // bounce_div) >= (medium_img.width()*1.1): x_bounce_toggle = -x_bounce_toggle + if abs(x_bounce // bounce_div) >= (medium_img.width() * 1.1): + x_bounce_toggle = -x_bounce_toggle y_bounce += y_bounce_toggle - if abs(y_bounce // bounce_div) >= (medium_img.height()*1.1): y_bounce_toggle = -y_bounce_toggle + if abs(y_bounce // bounce_div) >= (medium_img.height() * 1.1): + y_bounce_toggle = -y_bounce_toggle print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_scale_up_test.py b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_scale_up_test.py index bc43d230a..21686e05d 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_scale_up_test.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_scale_up_test.py @@ -11,33 +11,33 @@ import sensor import image import time -hint = 0 # image.BILINEAR image.BICUBIC +hint = 0 # image.BILINEAR image.BICUBIC bounce_div = 32 big_img = image.Image(128, 128, sensor.RGB565, copy_to_fb=True) -#big_img.to_grayscale() -#big_img.to_bitmap() +# big_img.to_grayscale() +# big_img.to_bitmap() small_img = image.Image(4, 4, sensor.RGB565) -small_img.set_pixel(0, 0, (0, 0, 127)) -small_img.set_pixel(1, 0, (47, 255, 199)) -small_img.set_pixel(2, 0, (0, 188, 255)) -small_img.set_pixel(3, 0, (0, 0, 127)) -small_img.set_pixel(0, 1, (0, 176, 255)) -small_img.set_pixel(1, 1, (222, 0, 0 )) -small_img.set_pixel(2, 1, (50, 255, 195)) -small_img.set_pixel(3, 1, (86, 255, 160)) -small_img.set_pixel(0, 2, (255, 211, 0 )) -small_img.set_pixel(1, 2, (83, 255, 163)) +small_img.set_pixel(0, 0, (0, 0, 127)) +small_img.set_pixel(1, 0, (47, 255, 199)) +small_img.set_pixel(2, 0, (0, 188, 255)) +small_img.set_pixel(3, 0, (0, 0, 127)) +small_img.set_pixel(0, 1, (0, 176, 255)) +small_img.set_pixel(1, 1, (222, 0, 0)) +small_img.set_pixel(2, 1, (50, 255, 195)) +small_img.set_pixel(3, 1, (86, 255, 160)) +small_img.set_pixel(0, 2, (255, 211, 0)) +small_img.set_pixel(1, 2, (83, 255, 163)) small_img.set_pixel(2, 2, (255, 211, 0)) -small_img.set_pixel(3, 2, (0, 80, 255)) -small_img.set_pixel(0, 3, (255, 118, 0 )) -small_img.set_pixel(1, 3, (127, 0, 0 )) -small_img.set_pixel(2, 3, (0, 144, 255)) -small_img.set_pixel(3, 3, (50, 255, 195)) -#small_img.to_grayscale() -#small_img.to_bitmap() +small_img.set_pixel(3, 2, (0, 80, 255)) +small_img.set_pixel(0, 3, (255, 118, 0)) +small_img.set_pixel(1, 3, (127, 0, 0)) +small_img.set_pixel(2, 3, (0, 144, 255)) +small_img.set_pixel(3, 3, (50, 255, 195)) +# small_img.to_grayscale() +# small_img.to_bitmap() x_bounce = 0 x_bounce_toggle = 0 @@ -46,20 +46,26 @@ y_bounce = 0 y_bounce_toggle = 0 clock = time.clock() -while(True): +while True: clock.tick() big_img.clear() - big_img.draw_image(small_img, - x_bounce // bounce_div, y_bounce // bounce_div, - x_scale=32, y_scale=32, - hint=hint) + big_img.draw_image( + small_img, + x_bounce // bounce_div, + y_bounce // bounce_div, + x_scale=32, + y_scale=32, + hint=hint, + ) sensor.flush() x_bounce += x_bounce_toggle - if abs(x_bounce // bounce_div) >= (big_img.width()*1.1): x_bounce_toggle = -x_bounce_toggle + if abs(x_bounce // bounce_div) >= (big_img.width() * 1.1): + x_bounce_toggle = -x_bounce_toggle y_bounce += y_bounce_toggle - if abs(y_bounce // bounce_div) >= (big_img.height()*1.1): y_bounce_toggle = -y_bounce_toggle + if abs(y_bounce // bounce_div) >= (big_img.height() * 1.1): + y_bounce_toggle = -y_bounce_toggle print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_with_custom_palette.py b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_with_custom_palette.py index b9de5099d..c0be29819 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_with_custom_palette.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/image_drawing_with_custom_palette.py @@ -5,12 +5,11 @@ import sensor import image import time -import pyb sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) # or GRAYSCALE... -sensor.set_framesize(sensor.QQVGA) # or QQVGA... -sensor.skip_frames(time = 2000) +sensor.set_pixformat(sensor.GRAYSCALE) # or GRAYSCALE... +sensor.set_framesize(sensor.QQVGA) # or QQVGA... +sensor.skip_frames(time=2000) clock = time.clock() # the color palette is actually an image, this allows you to use image ops to create palettes @@ -23,11 +22,16 @@ for i, color in enumerate(palette_source_colors): palette_source_color_image[i] = color # Scale the image to palette width and smooth them -palette = image.Image(256,1, sensor.RGB565) -palette.draw_image(palette_source_color_image, 0, 0, x_scale=palette.width() / palette_source_color_image.width()) -palette.mean(int(palette.width() / palette_source_color_image.width()/2)) +palette = image.Image(256, 1, sensor.RGB565) +palette.draw_image( + palette_source_color_image, + 0, + 0, + x_scale=palette.width() / palette_source_color_image.width(), +) +palette.mean(int(palette.width() / palette_source_color_image.width() / 2)) -while(True): +while True: clock.tick() img = sensor.snapshot() @@ -40,7 +44,20 @@ while(True): palette_scale_x = (sensor.width() - palette_boundary_inset * 2) / palette.width() img.draw_image(img_copy, 0, 0, color_palette=palette) - img.draw_image(palette, palette_boundary_inset, palette_boundary_inset, x_scale=palette_scale_x, y_scale=8) - img.draw_rectangle(palette_boundary_inset, palette_boundary_inset, int(palette.width()*palette_scale_x), 8, color=(255,255,255), thickness=1) + img.draw_image( + palette, + palette_boundary_inset, + palette_boundary_inset, + x_scale=palette_scale_x, + y_scale=8, + ) + img.draw_rectangle( + palette_boundary_inset, + palette_boundary_inset, + int(palette.width() * palette_scale_x), + 8, + color=(255, 255, 255), + thickness=1, + ) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/00-Drawing/keypoints_drawing.py b/scripts/examples/02-Image-Processing/00-Drawing/keypoints_drawing.py index db2bc8b1e..f5909bc48 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/keypoints_drawing.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/keypoints_drawing.py @@ -8,19 +8,19 @@ import time import pyb sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) +sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... +sensor.set_framesize(sensor.QVGA) # or QQVGA... +sensor.skip_frames(time=2000) clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() for i in range(20): - x = (pyb.rng() % (2*img.width())) - (img.width()//2) - y = (pyb.rng() % (2*img.height())) - (img.height()//2) + x = (pyb.rng() % (2 * img.width())) - (img.width() // 2) + y = (pyb.rng() % (2 * img.height())) - (img.height() // 2) rot = pyb.rng() % 360 r = (pyb.rng() % 127) + 128 @@ -28,6 +28,8 @@ while(True): b = (pyb.rng() % 127) + 128 # This method draws a keypoints object or a list of (x, y, rot) tuples... - img.draw_keypoints([(x, y, rot)], color = (r, g, b), size = 20, thickness = 2, fill = False) + img.draw_keypoints( + [(x, y, rot)], color=(r, g, b), size=20, thickness=2, fill=False + ) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/00-Drawing/line_drawing.py b/scripts/examples/02-Image-Processing/00-Drawing/line_drawing.py index c84ee6c83..0b48ef2ad 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/line_drawing.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/line_drawing.py @@ -7,27 +7,27 @@ import time import pyb sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) +sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... +sensor.set_framesize(sensor.QVGA) # or QQVGA... +sensor.skip_frames(time=2000) clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() for i in range(10): - x0 = (pyb.rng() % (2*img.width())) - (img.width()//2) - y0 = (pyb.rng() % (2*img.height())) - (img.height()//2) - x1 = (pyb.rng() % (2*img.width())) - (img.width()//2) - y1 = (pyb.rng() % (2*img.height())) - (img.height()//2) + x0 = (pyb.rng() % (2 * img.width())) - (img.width() // 2) + y0 = (pyb.rng() % (2 * img.height())) - (img.height() // 2) + x1 = (pyb.rng() % (2 * img.width())) - (img.width() // 2) + y1 = (pyb.rng() % (2 * img.height())) - (img.height() // 2) r = (pyb.rng() % 127) + 128 g = (pyb.rng() % 127) + 128 b = (pyb.rng() % 127) + 128 # If the first argument is a scaler then this method expects # to see x0, y0, x1, and y1. Otherwise, it expects a (x0,y0,x1,y1) tuple. - img.draw_line(x0, y0, x1, y1, color = (r, g, b), thickness = 2) + img.draw_line(x0, y0, x1, y1, color=(r, g, b), thickness=2) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/00-Drawing/rectangle_drawing.py b/scripts/examples/02-Image-Processing/00-Drawing/rectangle_drawing.py index ce73fcc60..9bd07c4e4 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/rectangle_drawing.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/rectangle_drawing.py @@ -7,27 +7,27 @@ import time import pyb sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) +sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... +sensor.set_framesize(sensor.QVGA) # or QQVGA... +sensor.skip_frames(time=2000) clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() for i in range(10): - x = (pyb.rng() % (2*img.width())) - (img.width()//2) - y = (pyb.rng() % (2*img.height())) - (img.height()//2) - w = (pyb.rng() % (img.width()//2)) - h = (pyb.rng() % (img.height()//2)) + x = (pyb.rng() % (2 * img.width())) - (img.width() // 2) + y = (pyb.rng() % (2 * img.height())) - (img.height() // 2) + w = pyb.rng() % (img.width() // 2) + h = pyb.rng() % (img.height() // 2) r = (pyb.rng() % 127) + 128 g = (pyb.rng() % 127) + 128 b = (pyb.rng() % 127) + 128 # If the first argument is a scaler then this method expects # to see x, y, w, and h. Otherwise, it expects a (x,y,w,h) tuple. - img.draw_rectangle(x, y, w, h, color = (r, g, b), thickness = 2, fill = False) + img.draw_rectangle(x, y, w, h, color=(r, g, b), thickness=2, fill=False) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/00-Drawing/text_drawing.py b/scripts/examples/02-Image-Processing/00-Drawing/text_drawing.py index 806f63787..b0fc602b1 100644 --- a/scripts/examples/02-Image-Processing/00-Drawing/text_drawing.py +++ b/scripts/examples/02-Image-Processing/00-Drawing/text_drawing.py @@ -7,19 +7,19 @@ import time import pyb sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) +sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... +sensor.set_framesize(sensor.QVGA) # or QQVGA... +sensor.skip_frames(time=2000) clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() for i in range(10): - x = (pyb.rng() % (2*img.width())) - (img.width()//2) - y = (pyb.rng() % (2*img.height())) - (img.height()//2) + x = (pyb.rng() % (2 * img.width())) - (img.width() // 2) + y = (pyb.rng() % (2 * img.height())) - (img.height() // 2) r = (pyb.rng() % 127) + 128 g = (pyb.rng() % 127) + 128 b = (pyb.rng() % 127) + 128 @@ -28,8 +28,19 @@ while(True): # to see x, y, and text. Otherwise, it expects a (x,y,text) tuple. # Character and string rotation can be done at 0, 90, 180, 270, and etc. degrees. - img.draw_string(x, y, "Hello World!", color = (r, g, b), scale = 2, mono_space = False, - char_rotation = 0, char_hmirror = False, char_vflip = False, - string_rotation = 0, string_hmirror = False, string_vflip = False) + img.draw_string( + x, + y, + "Hello World!", + color=(r, g, b), + scale=2, + mono_space=False, + char_rotation=0, + char_hmirror=False, + char_vflip=False, + string_rotation=0, + string_hmirror=False, + string_vflip=False, + ) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/adaptive_histogram_equalization.py b/scripts/examples/02-Image-Processing/01-Image-Filters/adaptive_histogram_equalization.py index 180d30018..055fabdcb 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/adaptive_histogram_equalization.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/adaptive_histogram_equalization.py @@ -12,10 +12,10 @@ import time sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) clock = time.clock() -while(True): +while True: clock.tick() # A clip_limit of < 0 gives you normal adaptive histogram equalization diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/blur_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/blur_filter.py index c5a39d9de..80057a036 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/blur_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/blur_filter.py @@ -5,18 +5,18 @@ import sensor import time -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. # Run the kernel on every pixel of the image. img.gaussian(1) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/cartoon_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/cartoon_filter.py index b5193ef52..665fabd7c 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/cartoon_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/cartoon_filter.py @@ -8,12 +8,12 @@ import sensor import time sensor.reset() -sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... -sensor.set_framesize(sensor.QVGA) # or QQVGA... -sensor.skip_frames(time = 2000) +sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... +sensor.set_framesize(sensor.QVGA) # or QQVGA... +sensor.skip_frames(time=2000) clock = time.clock() -while(True): +while True: clock.tick() # seed_threshold controls the maximum area growth of a colored diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/color_bilateral_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/color_bilateral_filter.py index 7e860678a..4298719f1 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/color_bilateral_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/color_bilateral_filter.py @@ -5,15 +5,15 @@ import sensor import time -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. # color_sigma controls how close color wise pixels have to be to each other to be # blured togheter. A smaller value means they have to be closer. @@ -30,5 +30,5 @@ while(True): # color_sigma/space_sigma to aggresively. Increase the sigma values until # the defects go away if you see them. - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/color_binary_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/color_binary_filter.py index 425d1bbc3..00c92877a 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/color_binary_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/color_binary_filter.py @@ -9,16 +9,15 @@ import time sensor.reset() sensor.set_framesize(sensor.QVGA) sensor.set_pixformat(sensor.RGB565) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) clock = time.clock() # Use the Tools -> Machine Vision -> Threshold Edtor to pick better thresholds. -red_threshold = (0,100, 0,127, 0,127) # L A B -green_threshold = (0,100, -128,0, 0,127) # L A B -blue_threshold = (0,100, -128,127, -128,0) # L A B - -while(True): +red_threshold = (0, 100, 0, 127, 0, 127) # L A B +green_threshold = (0, 100, -128, 0, 0, 127) # L A B +blue_threshold = (0, 100, -128, 127, -128, 0) # L A B +while True: # Test red threshold for i in range(100): clock.tick() @@ -44,19 +43,19 @@ while(True): for i in range(100): clock.tick() img = sensor.snapshot() - img.binary([red_threshold], invert = 1) + img.binary([red_threshold], invert=1) print(clock.fps()) # Test not green threshold for i in range(100): clock.tick() img = sensor.snapshot() - img.binary([green_threshold], invert = 1) + img.binary([green_threshold], invert=1) print(clock.fps()) # Test not blue threshold for i in range(100): clock.tick() img = sensor.snapshot() - img.binary([blue_threshold], invert = 1) + img.binary([blue_threshold], invert=1) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/color_light_removal.py b/scripts/examples/02-Image-Processing/01-Image-Filters/color_light_removal.py index 3855a6b3a..b24a6d3e6 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/color_light_removal.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/color_light_removal.py @@ -10,17 +10,17 @@ import sensor import time -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. thresholds = (90, 100, -128, 127, -128, 127) -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot().binary([thresholds], invert=False, zero=True) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/edge_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/edge_filter.py index 111f22471..03bd8367e 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/edge_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/edge_filter.py @@ -5,18 +5,18 @@ import sensor import time -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. # Run the kernel on every pixel of the image. img.laplacian(1) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/erode_and_dilate.py b/scripts/examples/02-Image-Processing/01-Image-Filters/erode_and_dilate.py index 2b66a50bb..69b9b5f28 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/erode_and_dilate.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/erode_and_dilate.py @@ -4,7 +4,6 @@ # a binary image to remove noise. This example was originally a test but its # useful for showing off how these functions work. -import pyb import sensor sensor.reset() @@ -13,8 +12,7 @@ sensor.set_framesize(sensor.QVGA) grayscale_thres = (170, 255) rgb565_thres = (70, 100, -128, 127, -128, 127) -while(True): - +while True: sensor.set_pixformat(sensor.GRAYSCALE) for i in range(20): img = sensor.snapshot() diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/gamma_correction.py b/scripts/examples/02-Image-Processing/01-Image-Filters/gamma_correction.py index 03c876d91..bfb8e6427 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/gamma_correction.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/gamma_correction.py @@ -9,14 +9,14 @@ import time sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) clock = time.clock() -while(True): +while True: clock.tick() # Gamma, contrast, and brightness correction are applied to each color channel. The # values are scaled to the range per color channel per image type... - img = sensor.snapshot().gamma_corr(gamma = 0.5, contrast = 1.0, brightness = 0.0) + img = sensor.snapshot().gamma_corr(gamma=0.5, contrast=1.0, brightness=0.0) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_bilateral_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_bilateral_filter.py index 88ace3880..0a9eafb6f 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_bilateral_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_bilateral_filter.py @@ -5,15 +5,15 @@ import sensor import time -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. # color_sigma controls how close color wise pixels have to be to each other to be # blured togheter. A smaller value means they have to be closer. @@ -30,5 +30,5 @@ while(True): # color_sigma/space_sigma to aggresively. Increase the sigma values until # the defects go away if you see them. - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_binary_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_binary_filter.py index 7e02cbe20..230f0fd2a 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_binary_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_binary_filter.py @@ -9,14 +9,13 @@ import time sensor.reset() sensor.set_framesize(sensor.QVGA) sensor.set_pixformat(sensor.GRAYSCALE) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) clock = time.clock() low_threshold = (0, 50) high_threshold = (205, 255) -while(True): - +while True: # Test low threshold for i in range(100): clock.tick() @@ -35,12 +34,12 @@ while(True): for i in range(100): clock.tick() img = sensor.snapshot() - img.binary([low_threshold], invert = 1) + img.binary([low_threshold], invert=1) print(clock.fps()) # Test not high threshold for i in range(100): clock.tick() img = sensor.snapshot() - img.binary([high_threshold], invert = 1) + img.binary([high_threshold], invert=1) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_light_removal.py b/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_light_removal.py index 3748ccd91..8f58f46ae 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_light_removal.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/grayscale_light_removal.py @@ -10,17 +10,17 @@ import sensor import time -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. thresholds = (220, 255) -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot().binary([thresholds], invert=False, zero=True) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/histogram_equalization.py b/scripts/examples/02-Image-Processing/01-Image-Filters/histogram_equalization.py index 137a00957..171ed4659 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/histogram_equalization.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/histogram_equalization.py @@ -9,10 +9,10 @@ import time sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot().histeq() diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/kernel_filters.py b/scripts/examples/02-Image-Processing/01-Image-Filters/kernel_filters.py index 508635d85..44e6747f5 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/kernel_filters.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/kernel_filters.py @@ -5,24 +5,22 @@ import sensor import time -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. -kernel_size = 1 # 3x3==1, 5x5==2, 7x7==3, etc. +kernel_size = 1 # 3x3==1, 5x5==2, 7x7==3, etc. -kernel = [-2, -1, 0, \ - -1, 1, 1, \ - 0, 1, 2] +kernel = [-2, -1, 0, -1, 1, 1, 0, 1, 2] -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. # Run the kernel on every pixel of the image. img.morph(kernel_size, kernel) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/lens_correction.py b/scripts/examples/02-Image-Processing/01-Image-Filters/lens_correction.py index 91b6dadc1..6be918c3f 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/lens_correction.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/lens_correction.py @@ -11,12 +11,12 @@ import time sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) clock = time.clock() -while(True): +while True: clock.tick() - img = sensor.snapshot().lens_corr(strength = 1.8, zoom = 1.0) + img = sensor.snapshot().lens_corr(strength=1.8, zoom=1.0) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/linear_polar.py b/scripts/examples/02-Image-Processing/01-Image-Filters/linear_polar.py index cd481617f..4a290742a 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/linear_polar.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/linear_polar.py @@ -8,15 +8,15 @@ import sensor import time -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot().linpolar(reverse=False) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/log_polar.py b/scripts/examples/02-Image-Processing/01-Image-Filters/log_polar.py index 99479b3e1..1321614d8 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/log_polar.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/log_polar.py @@ -8,15 +8,15 @@ import sensor import time -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot().logpolar(reverse=False) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/mean_adaptive_threshold_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/mean_adaptive_threshold_filter.py index b3435d1bd..d9dea5334 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/mean_adaptive_threshold_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/mean_adaptive_threshold_filter.py @@ -7,20 +7,20 @@ import sensor import time -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. # The first argument is the kernel size. N coresponds to a ((N*2)+1)^2 # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You # shouldn't ever need to use a value bigger than 2. img.mean(1, threshold=True, offset=5, invert=True) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/mean_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/mean_filter.py index 132a594d2..b040cc2a8 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/mean_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/mean_filter.py @@ -7,20 +7,20 @@ import sensor import time -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. # The only argument is the kernel size. N coresponds to a ((N*2)+1)^2 # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You # shouldn't ever need to use a value bigger than 2. img.mean(1) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/median_adaptive_threshold_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/median_adaptive_threshold_filter.py index a81b3b9ac..ff24cfb2f 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/median_adaptive_threshold_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/median_adaptive_threshold_filter.py @@ -7,15 +7,15 @@ import sensor import time -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. # The first argument to the median filter is the kernel size, it can be # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. The second @@ -24,5 +24,5 @@ while(True): # would be the upper quartile. img.median(1, percentile=0.5, threshold=True, offset=5, invert=True) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/median_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/median_filter.py index 5a2193194..4c91f5c09 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/median_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/median_filter.py @@ -7,15 +7,15 @@ import sensor import time -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. # The first argument to the median filter is the kernel size, it can be # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. The second @@ -24,5 +24,5 @@ while(True): # would be the upper quartile. img.median(1, percentile=0.5) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/midpoint_adaptive_threshold_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/midpoint_adaptive_threshold_filter.py index 39549205a..509fde993 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/midpoint_adaptive_threshold_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/midpoint_adaptive_threshold_filter.py @@ -7,15 +7,15 @@ import sensor import time -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. # The first argument is the kernel size. N coresponds to a ((N*2)+1)^2 # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You @@ -25,5 +25,5 @@ while(True): # makes images darker while the max filter makes images lighter. img.midpoint(1, bias=0.5, threshold=True, offset=5, invert=True) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/midpoint_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/midpoint_filter.py index 340f47cad..f6a0ebcf7 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/midpoint_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/midpoint_filter.py @@ -6,15 +6,15 @@ import sensor import time -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. # The first argument is the kernel size. N coresponds to a ((N*2)+1)^2 # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You @@ -24,5 +24,5 @@ while(True): # makes images darker while the max filter makes images lighter. img.midpoint(1, bias=0.5) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/mode_adaptive_threshold_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/mode_adaptive_threshold_filter.py index 29ae76b52..ae07d8484 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/mode_adaptive_threshold_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/mode_adaptive_threshold_filter.py @@ -8,19 +8,19 @@ import sensor import time -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. # The only argument to the median filter is the kernel size, it can be # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. img.mode(1, threshold=True, offset=5, invert=True) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/mode_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/mode_filter.py index cc0135bf7..6280764e1 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/mode_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/mode_filter.py @@ -8,19 +8,19 @@ import sensor import time -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. # The only argument to the median filter is the kernel size, it can be # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. img.mode(1) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/negative.py b/scripts/examples/02-Image-Processing/01-Image-Filters/negative.py index ff89a26ef..1156f3d02 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/negative.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/negative.py @@ -6,15 +6,15 @@ import sensor import time -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot().negate() - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/perspective_and_rotation_correction.py b/scripts/examples/02-Image-Processing/01-Image-Filters/perspective_and_rotation_correction.py index 77c3f8df6..728dce67d 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/perspective_and_rotation_correction.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/perspective_and_rotation_correction.py @@ -10,7 +10,7 @@ import time sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) clock = time.clock() # The image will be warped such that the following points become the new: @@ -28,10 +28,12 @@ clock = time.clock() w = sensor.width() h = sensor.height() -TARGET_POINTS = [(0, 0), # (x, y) CHANGE ME! - (w-1, 0), # (x, y) CHANGE ME! - (w-1, h-1), # (x, y) CHANGE ME! - (0, h-1)] # (x, y) CHANGE ME! +TARGET_POINTS = [ + (0, 0), # (x, y) CHANGE ME! + (w - 1, 0), # (x, y) CHANGE ME! + (w - 1, h - 1), # (x, y) CHANGE ME! + (0, h - 1), +] # (x, y) CHANGE ME! # Degrees per frame to rotation by... X_ROTATION_DEGREE_RATE = 5 @@ -40,30 +42,32 @@ Z_ROTATION_DEGREE_RATE = 0 X_OFFSET = 0 Y_OFFSET = 0 -ZOOM_AMOUNT = 1 # Lower zooms out - Higher zooms in. -FOV_WINDOW = 25 # Between 0 and 180. Represents the field-of-view of the scene - # window when rotating the image in 3D space. When closer to - # zero results in lines becoming straighter as the window - # moves away from the image being rotated in 3D space. A large - # value moves the window closer to the image in 3D space which - # results in the more perspective distortion and sometimes - # the image in 3D intersecting the scene window. +ZOOM_AMOUNT = 1 # Lower zooms out - Higher zooms in. +FOV_WINDOW = 25 # Between 0 and 180. Represents the field-of-view of the scene +# window when rotating the image in 3D space. When closer to +# zero results in lines becoming straighter as the window +# moves away from the image being rotated in 3D space. A large +# value moves the window closer to the image in 3D space which +# results in the more perspective distortion and sometimes +# the image in 3D intersecting the scene window. x_rotation_counter = 0 y_rotation_counter = 0 z_rotation_counter = 0 -while(True): +while True: clock.tick() - img = sensor.snapshot().rotation_corr(x_rotation = x_rotation_counter, \ - y_rotation = y_rotation_counter, \ - z_rotation = z_rotation_counter, \ - x_translation = X_OFFSET, \ - y_translation = Y_OFFSET, \ - zoom = ZOOM_AMOUNT, \ - fov = FOV_WINDOW, \ - corners = TARGET_POINTS) + img = sensor.snapshot().rotation_corr( + x_rotation=x_rotation_counter, + y_rotation=y_rotation_counter, + z_rotation=z_rotation_counter, + x_translation=X_OFFSET, + y_translation=Y_OFFSET, + zoom=ZOOM_AMOUNT, + fov=FOV_WINDOW, + corners=TARGET_POINTS, + ) x_rotation_counter += X_ROTATION_DEGREE_RATE y_rotation_counter += Y_ROTATION_DEGREE_RATE diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/perspective_correction.py b/scripts/examples/02-Image-Processing/01-Image-Filters/perspective_correction.py index ba8ec3849..d20760267 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/perspective_correction.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/perspective_correction.py @@ -9,7 +9,7 @@ import time sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) clock = time.clock() # The image will be warped such that the following points become the new: @@ -27,14 +27,16 @@ clock = time.clock() w = sensor.width() h = sensor.height() -TARGET_POINTS = [(0, 0), # (x, y) CHANGE ME! - (w-1, 0), # (x, y) CHANGE ME! - (w-1, h-1), # (x, y) CHANGE ME! - (0, h-1)] # (x, y) CHANGE ME! +TARGET_POINTS = [ + (0, 0), # (x, y) CHANGE ME! + (w - 1, 0), # (x, y) CHANGE ME! + (w - 1, h - 1), # (x, y) CHANGE ME! + (0, h - 1), +] # (x, y) CHANGE ME! -while(True): +while True: clock.tick() - img = sensor.snapshot().rotation_corr(corners = TARGET_POINTS) + img = sensor.snapshot().rotation_corr(corners=TARGET_POINTS) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/rotation_correction.py b/scripts/examples/02-Image-Processing/01-Image-Filters/rotation_correction.py index 0eef3421c..1bd7a52ff 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/rotation_correction.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/rotation_correction.py @@ -13,35 +13,37 @@ Z_ROTATION_DEGREE_RATE = 0 X_OFFSET = 0 Y_OFFSET = 0 -ZOOM_AMOUNT = 1 # Lower zooms out - Higher zooms in. -FOV_WINDOW = 60 # Between 0 and 180. Represents the field-of-view of the scene - # window when rotating the image in 3D space. When closer to - # zero results in lines becoming straighter as the window - # moves away from the image being rotated in 3D space. A large - # value moves the window closer to the image in 3D space which - # results in the more perspective distortion and sometimes - # the image in 3D intersecting the scene window. +ZOOM_AMOUNT = 1 # Lower zooms out - Higher zooms in. +FOV_WINDOW = 60 # Between 0 and 180. Represents the field-of-view of the scene +# window when rotating the image in 3D space. When closer to +# zero results in lines becoming straighter as the window +# moves away from the image being rotated in 3D space. A large +# value moves the window closer to the image in 3D space which +# results in the more perspective distortion and sometimes +# the image in 3D intersecting the scene window. sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) clock = time.clock() x_rotation_counter = 0 y_rotation_counter = 0 z_rotation_counter = 0 -while(True): +while True: clock.tick() - img = sensor.snapshot().rotation_corr(x_rotation = x_rotation_counter, \ - y_rotation = y_rotation_counter, \ - z_rotation = z_rotation_counter, \ - x_translation = X_OFFSET, \ - y_translation = Y_OFFSET, \ - zoom = ZOOM_AMOUNT, \ - fov = FOV_WINDOW) + img = sensor.snapshot().rotation_corr( + x_rotation=x_rotation_counter, + y_rotation=y_rotation_counter, + z_rotation=z_rotation_counter, + x_translation=X_OFFSET, + y_translation=Y_OFFSET, + zoom=ZOOM_AMOUNT, + fov=FOV_WINDOW, + ) x_rotation_counter += X_ROTATION_DEGREE_RATE y_rotation_counter += Y_ROTATION_DEGREE_RATE diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/sharpen_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/sharpen_filter.py index e6165c431..36300f5ba 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/sharpen_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/sharpen_filter.py @@ -5,18 +5,18 @@ import sensor import time -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. # Run the kernel on every pixel of the image. img.laplacian(1, sharpen=True) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/ulab.py b/scripts/examples/02-Image-Processing/01-Image-Filters/ulab.py index f16c68798..cc02c19ab 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/ulab.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/ulab.py @@ -8,13 +8,12 @@ import sensor import time from ulab import numpy as np -sensor.reset() # Reset and initialize the sensor. +sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QQVGA) # Set frame size to QVGA (320x240) -clock = time.clock() # Create a clock object to track the FPS. +sensor.set_framesize(sensor.QQVGA) # Set frame size to QVGA (320x240) +clock = time.clock() # Create a clock object to track the FPS. -while (True): - img = sensor.snapshot() # Take a picture and return the image. +while True: + img = sensor.snapshot() # Take a picture and return the image. a = np.array(img, dtype=np.uint8) - print("mean: %d std:%d"%(np.mean(a), np.std(a))) - + print("mean: %d std:%d" % (np.mean(a), np.std(a))) diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/unsharp_filter.py b/scripts/examples/02-Image-Processing/01-Image-Filters/unsharp_filter.py index 89bd423ee..410d7abad 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/unsharp_filter.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/unsharp_filter.py @@ -5,18 +5,18 @@ import sensor import time -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. # Run the kernel on every pixel of the image. img.gaussian(1, unsharp=True) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/01-Image-Filters/vflip_hmirror_transpose.py b/scripts/examples/02-Image-Processing/01-Image-Filters/vflip_hmirror_transpose.py index 3d8b225f8..0fde158e3 100644 --- a/scripts/examples/02-Image-Processing/01-Image-Filters/vflip_hmirror_transpose.py +++ b/scripts/examples/02-Image-Processing/01-Image-Filters/vflip_hmirror_transpose.py @@ -15,20 +15,22 @@ import pyb sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) clock = time.clock() mills = pyb.millis() counter = 0 -while(True): +while True: clock.tick() - img = sensor.snapshot().replace(vflip=(counter//2)%2, - hmirror=(counter//4)%2, - transpose=(counter//8)%2) + img = sensor.snapshot().replace( + vflip=(counter // 2) % 2, + hmirror=(counter // 4) % 2, + transpose=(counter // 8) % 2, + ) - if (pyb.millis() > (mills + 1000)): + if pyb.millis() > (mills + 1000): mills = pyb.millis() counter += 1 diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/automatic_grayscale_color_tracking.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/automatic_grayscale_color_tracking.py index e52bde80c..a9ec34a66 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/automatic_grayscale_color_tracking.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/automatic_grayscale_color_tracking.py @@ -4,36 +4,47 @@ import sensor import time + print("Letting auto algorithms run. Don't put anything in front of the camera!") sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must be turned off for color tracking -sensor.set_auto_whitebal(False) # must be turned off for color tracking +sensor.skip_frames(time=2000) +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking clock = time.clock() # Capture the color thresholds for whatever was in the center of the image. -r = [(320//2)-(50//2), (240//2)-(50//2), 50, 50] # 50x50 center of QVGA. +r = [(320 // 2) - (50 // 2), (240 // 2) - (50 // 2), 50, 50] # 50x50 center of QVGA. -print("Auto algorithms done. Hold the object you want to track in front of the camera in the box.") -print("MAKE SURE THE COLOR OF THE OBJECT YOU WANT TO TRACK IS FULLY ENCLOSED BY THE BOX!") +print( + "Auto algorithms done. Hold the object you want to track in front of the camera in the box." +) +print( + "MAKE SURE THE COLOR OF THE OBJECT YOU WANT TO TRACK IS FULLY ENCLOSED BY THE BOX!" +) for i in range(60): img = sensor.snapshot() img.draw_rectangle(r) print("Learning thresholds...") -threshold = [128, 128] # Middle grayscale values. +threshold = [128, 128] # Middle grayscale values. for i in range(60): img = sensor.snapshot() hist = img.get_histogram(roi=r) - lo = hist.get_percentile(0.01) # Get the CDF of the histogram at the 1% range (ADJUST AS NECESSARY)! - hi = hist.get_percentile(0.99) # Get the CDF of the histogram at the 99% range (ADJUST AS NECESSARY)! + lo = hist.get_percentile( + 0.01 + ) # Get the CDF of the histogram at the 1% range (ADJUST AS NECESSARY)! + hi = hist.get_percentile( + 0.99 + ) # Get the CDF of the histogram at the 99% range (ADJUST AS NECESSARY)! # Average in percentile values. threshold[0] = (threshold[0] + lo.value()) // 2 threshold[1] = (threshold[1] + hi.value()) // 2 - for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): + for blob in img.find_blobs( + [threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10 + ): img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) img.draw_rectangle(r) @@ -41,10 +52,12 @@ for i in range(60): print("Thresholds learned...") print("Tracking colors...") -while(True): +while True: clock.tick() img = sensor.snapshot() - for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): + for blob in img.find_blobs( + [threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10 + ): img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/automatic_rgb565_color_tracking.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/automatic_rgb565_color_tracking.py index af46fc50e..1b1b2da12 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/automatic_rgb565_color_tracking.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/automatic_rgb565_color_tracking.py @@ -4,32 +4,41 @@ import sensor import time + print("Letting auto algorithms run. Don't put anything in front of the camera!") sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must be turned off for color tracking -sensor.set_auto_whitebal(False) # must be turned off for color tracking +sensor.skip_frames(time=2000) +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking clock = time.clock() # Capture the color thresholds for whatever was in the center of the image. -r = [(320//2)-(50//2), (240//2)-(50//2), 50, 50] # 50x50 center of QVGA. +r = [(320 // 2) - (50 // 2), (240 // 2) - (50 // 2), 50, 50] # 50x50 center of QVGA. -print("Auto algorithms done. Hold the object you want to track in front of the camera in the box.") -print("MAKE SURE THE COLOR OF THE OBJECT YOU WANT TO TRACK IS FULLY ENCLOSED BY THE BOX!") +print( + "Auto algorithms done. Hold the object you want to track in front of the camera in the box." +) +print( + "MAKE SURE THE COLOR OF THE OBJECT YOU WANT TO TRACK IS FULLY ENCLOSED BY THE BOX!" +) for i in range(60): img = sensor.snapshot() img.draw_rectangle(r) print("Learning thresholds...") -threshold = [50, 50, 0, 0, 0, 0] # Middle L, A, B values. +threshold = [50, 50, 0, 0, 0, 0] # Middle L, A, B values. for i in range(60): img = sensor.snapshot() hist = img.get_histogram(roi=r) - lo = hist.get_percentile(0.01) # Get the CDF of the histogram at the 1% range (ADJUST AS NECESSARY)! - hi = hist.get_percentile(0.99) # Get the CDF of the histogram at the 99% range (ADJUST AS NECESSARY)! + lo = hist.get_percentile( + 0.01 + ) # Get the CDF of the histogram at the 1% range (ADJUST AS NECESSARY)! + hi = hist.get_percentile( + 0.99 + ) # Get the CDF of the histogram at the 99% range (ADJUST AS NECESSARY)! # Average in percentile values. threshold[0] = (threshold[0] + lo.l_value()) // 2 threshold[1] = (threshold[1] + hi.l_value()) // 2 @@ -37,7 +46,9 @@ for i in range(60): threshold[3] = (threshold[3] + hi.a_value()) // 2 threshold[4] = (threshold[4] + lo.b_value()) // 2 threshold[5] = (threshold[5] + hi.b_value()) // 2 - for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): + for blob in img.find_blobs( + [threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10 + ): img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) img.draw_rectangle(r) @@ -45,10 +56,12 @@ for i in range(60): print("Thresholds learned...") print("Tracking colors...") -while(True): +while True: clock.tick() img = sensor.snapshot() - for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): + for blob in img.find_blobs( + [threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10 + ): img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/black_grayscale_line_following.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/black_grayscale_line_following.py index 706340e40..706354b02 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/black_grayscale_line_following.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/black_grayscale_line_following.py @@ -21,33 +21,36 @@ GRAYSCALE_THRESHOLD = [(0, 64)] # centroid of the largest blob in each roi. The x position of the centroids # will then be averaged with different weights where the most weight is assigned # to the roi near the bottom of the image and less to the next roi and so on. -ROIS = [ # [ROI, weight] - (0, 100, 160, 20, 0.7), # You'll need to tweak the weights for your app - (0, 50, 160, 20, 0.3), # depending on how your robot is setup. - (0, 0, 160, 20, 0.1) - ] +ROIS = [ # [ROI, weight] + (0, 100, 160, 20, 0.7), # You'll need to tweak the weights for your app + (0, 50, 160, 20, 0.3), # depending on how your robot is setup. + (0, 0, 160, 20, 0.1), +] # Compute the weight divisor (we're computing this so you don't have to make weights add to 1). weight_sum = 0 -for r in ROIS: weight_sum += r[4] # r[4] is the roi weight. +for r in ROIS: + weight_sum += r[4] # r[4] is the roi weight. # Camera setup... -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # use grayscale. -sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed. -sensor.skip_frames(time = 2000) # Let new settings take affect. -sensor.set_auto_gain(False) # must be turned off for color tracking -sensor.set_auto_whitebal(False) # must be turned off for color tracking -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # use grayscale. +sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed. +sensor.skip_frames(time=2000) # Let new settings take affect. +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking +clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. centroid_sum = 0 for r in ROIS: - blobs = img.find_blobs(GRAYSCALE_THRESHOLD, roi=r[0:4], merge=True) # r[0:4] is roi tuple. + blobs = img.find_blobs( + GRAYSCALE_THRESHOLD, roi=r[0:4], merge=True + ) # r[0:4] is roi tuple. if blobs: # Find the blob with the most pixels. @@ -55,12 +58,11 @@ while(True): # Draw a rect around the blob. img.draw_rectangle(largest_blob.rect()) - img.draw_cross(largest_blob.cx(), - largest_blob.cy()) + img.draw_cross(largest_blob.cx(), largest_blob.cy()) - centroid_sum += largest_blob.cx() * r[4] # r[4] is the roi weight. + centroid_sum += largest_blob.cx() * r[4] # r[4] is the roi weight. - center_pos = (centroid_sum / weight_sum) # Determine center of line. + center_pos = centroid_sum / weight_sum # Determine center of line. # Convert the center_pos to a deflection angle. We're using a non-linear # operation so that the response gets stronger the farther off the line we @@ -73,7 +75,7 @@ while(True): # opposite side of the triangle is the deviation of the center position # from the center and the adjacent side is half the Y res. This limits # the angle output to around -45 to 45. (It's not quite -45 and 45). - deflection_angle = -math.atan((center_pos-80)/60) + deflection_angle = -math.atan((center_pos - 80) / 60) # Convert angle in radians to degrees. deflection_angle = math.degrees(deflection_angle) @@ -83,5 +85,5 @@ while(True): # the line farther away from the robot for a better prediction. print("Turn Angle: %f" % deflection_angle) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/image_histogram_info.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/image_histogram_info.py index ac669b70c..257b7d43c 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/image_histogram_info.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/image_histogram_info.py @@ -6,14 +6,14 @@ import sensor import time sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) # or RGB565. +sensor.set_pixformat(sensor.GRAYSCALE) # or RGB565. sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must be turned off for color tracking -sensor.set_auto_whitebal(False) # must be turned off for color tracking +sensor.skip_frames(time=2000) +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() # Gets the grayscale histogram for the image into 8 bins. diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/image_statistics_info.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/image_statistics_info.py index 93c9459ab..c914f279c 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/image_statistics_info.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/image_statistics_info.py @@ -6,14 +6,14 @@ import sensor import time sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) # or RGB565. +sensor.set_pixformat(sensor.GRAYSCALE) # or RGB565. sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must be turned off for color tracking -sensor.set_auto_whitebal(False) # must be turned off for color tracking +sensor.skip_frames(time=2000) +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() print(img.get_statistics()) diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/ir_beacon_grayscale_tracking.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/ir_beacon_grayscale_tracking.py index 4f95a2eb5..d30c0cb00 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/ir_beacon_grayscale_tracking.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/ir_beacon_grayscale_tracking.py @@ -5,27 +5,29 @@ import sensor import time -thresholds = (255, 255) # thresholds for bright white light from IR. +thresholds = (255, 255) # thresholds for bright white light from IR. sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.VGA) -sensor.set_windowing((240, 240)) # 240x240 center pixels of VGA -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must be turned off for color tracking -sensor.set_auto_whitebal(False) # must be turned off for color tracking +sensor.set_windowing((240, 240)) # 240x240 center pixels of VGA +sensor.skip_frames(time=2000) +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking clock = time.clock() # Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. "merge=True" merges all overlapping blobs in the image. -while(True): +while True: clock.tick() img = sensor.snapshot() - for blob in img.find_blobs([thresholds], pixels_threshold=200, area_threshold=200, merge=True): + for blob in img.find_blobs( + [thresholds], pixels_threshold=200, area_threshold=200, merge=True + ): ratio = blob.w() / blob.h() - if (ratio >= 0.5) and (ratio <= 1.5): # filter out non-squarish blobs + if (ratio >= 0.5) and (ratio <= 1.5): # filter out non-squarish blobs img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/ir_beacon_rgb565_tracking.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/ir_beacon_rgb565_tracking.py index 75d668750..79a48eadf 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/ir_beacon_rgb565_tracking.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/ir_beacon_rgb565_tracking.py @@ -5,27 +5,29 @@ import sensor import time -thresholds = (100, 100, 0, 0, 0, 0) # thresholds for bright white light from IR. +thresholds = (100, 100, 0, 0, 0, 0) # thresholds for bright white light from IR. sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.VGA) -sensor.set_windowing((240, 240)) # 240x240 center pixels of VGA -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must be turned off for color tracking -sensor.set_auto_whitebal(False) # must be turned off for color tracking +sensor.set_windowing((240, 240)) # 240x240 center pixels of VGA +sensor.skip_frames(time=2000) +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking clock = time.clock() # Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. "merge=True" merges all overlapping blobs in the image. -while(True): +while True: clock.tick() img = sensor.snapshot() - for blob in img.find_blobs([thresholds], pixels_threshold=200, area_threshold=200, merge=True): + for blob in img.find_blobs( + [thresholds], pixels_threshold=200, area_threshold=200, merge=True + ): ratio = blob.w() / blob.h() - if (ratio >= 0.5) and (ratio <= 1.5): # filter out non-squarish blobs + if (ratio >= 0.5) and (ratio <= 1.5): # filter out non-squarish blobs img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/multi_color_blob_tracking.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/multi_color_blob_tracking.py index f14cd83ac..144820eb0 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/multi_color_blob_tracking.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/multi_color_blob_tracking.py @@ -8,36 +8,40 @@ import math # Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) # The below thresholds track in general red/green things. You may wish to tune them... -thresholds = [(30, 100, 15, 127, 15, 127), # generic_red_thresholds - (30, 100, -64, -8, -32, 32), # generic_green_thresholds - (0, 15, 0, 40, -80, -20)] # generic_blue_thresholds +thresholds = [ + (30, 100, 15, 127, 15, 127), # generic_red_thresholds + (30, 100, -64, -8, -32, 32), # generic_green_thresholds + (0, 15, 0, 40, -80, -20), +] # generic_blue_thresholds # You may pass up to 16 thresholds above. However, it's not really possible to segment any # scene with 16 thresholds before color thresholds start to overlap heavily. sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must be turned off for color tracking -sensor.set_auto_whitebal(False) # must be turned off for color tracking +sensor.skip_frames(time=2000) +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking clock = time.clock() # Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. Don't set "merge=True" becuase that will merge blobs which we don't want here. -while(True): +while True: clock.tick() img = sensor.snapshot() for blob in img.find_blobs(thresholds, pixels_threshold=200, area_threshold=200): # These values depend on the blob not being circular - otherwise they will be shaky. if blob.elongation() > 0.5: - img.draw_edges(blob.min_corners(), color=(255,0,0)) - img.draw_line(blob.major_axis_line(), color=(0,255,0)) - img.draw_line(blob.minor_axis_line(), color=(0,0,255)) + img.draw_edges(blob.min_corners(), color=(255, 0, 0)) + img.draw_line(blob.major_axis_line(), color=(0, 255, 0)) + img.draw_line(blob.minor_axis_line(), color=(0, 0, 255)) # These values are stable all the time. img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) # Note - the blob rotation is unique to 0-180 only. - img.draw_keypoints([(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], size=20) + img.draw_keypoints( + [(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], size=20 + ) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/multi_color_code_tracking.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/multi_color_code_tracking.py index 06125db6f..11eaf22da 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/multi_color_code_tracking.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/multi_color_code_tracking.py @@ -10,40 +10,58 @@ import time # Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) # The below thresholds track in general red/green things. You may wish to tune them... -thresholds = [(30, 100, 15, 127, 15, 127), # generic_red_thresholds -> index is 0 so code == (1 << 0) - (30, 100, -64, -8, -32, 32), # generic_green_thresholds -> index is 1 so code == (1 << 1) - (0, 15, 0, 40, -80, -20)] # generic_blue_thresholds -> index is 2 so code == (1 << 2) +thresholds = [ + ( + 30, + 100, + 15, + 127, + 15, + 127, + ), # generic_red_thresholds -> index is 0 so code == (1 << 0) + ( + 30, + 100, + -64, + -8, + -32, + 32, + ), # generic_green_thresholds -> index is 1 so code == (1 << 1) + (0, 15, 0, 40, -80, -20), +] # generic_blue_thresholds -> index is 2 so code == (1 << 2) # Codes are or'ed together when "merge=True" for "find_blobs". sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must be turned off for color tracking -sensor.set_auto_whitebal(False) # must be turned off for color tracking +sensor.skip_frames(time=2000) +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking clock = time.clock() # Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. "merge=True" must be set to merge overlapping color blobs for color codes. -while(True): +while True: clock.tick() img = sensor.snapshot() - for blob in img.find_blobs(thresholds, pixels_threshold=100, area_threshold=100, merge=True): - if blob.code() == 3: # r/g code + for blob in img.find_blobs( + thresholds, pixels_threshold=100, area_threshold=100, merge=True + ): + if blob.code() == 3: # r/g code img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) img.draw_string(blob.x() + 2, blob.y() + 2, "r/g") - if blob.code() == 5: # r/b code + if blob.code() == 5: # r/b code img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) img.draw_string(blob.x() + 2, blob.y() + 2, "r/b") - if blob.code() == 6: # g/b code + if blob.code() == 6: # g/b code img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) img.draw_string(blob.x() + 2, blob.y() + 2, "g/b") - if blob.code() == 7: # r/g/b code + if blob.code() == 7: # r/g/b code img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) img.draw_string(blob.x() + 2, blob.y() + 2, "r/g/b") diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_code_tracking.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_code_tracking.py index 72a15fe8e..a6e68bbce 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_code_tracking.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_code_tracking.py @@ -11,35 +11,48 @@ import math # Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) # The below thresholds track in general red/green things. You may wish to tune them... -thresholds = [(30, 100, 15, 127, 15, 127), # generic_red_thresholds -> index is 0 so code == (1 << 0) - (30, 100, -64, -8, -32, 32)] # generic_green_thresholds -> index is 1 so code == (1 << 1) +thresholds = [ + ( + 30, + 100, + 15, + 127, + 15, + 127, + ), # generic_red_thresholds -> index is 0 so code == (1 << 0) + (30, 100, -64, -8, -32, 32), +] # generic_green_thresholds -> index is 1 so code == (1 << 1) # Codes are or'ed together when "merge=True" for "find_blobs". sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must be turned off for color tracking -sensor.set_auto_whitebal(False) # must be turned off for color tracking +sensor.skip_frames(time=2000) +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking clock = time.clock() # Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. "merge=True" must be set to merge overlapping color blobs for color codes. -while(True): +while True: clock.tick() img = sensor.snapshot() - for blob in img.find_blobs(thresholds, pixels_threshold=100, area_threshold=100, merge=True): - if blob.code() == 3: # r/g code == (1 << 1) | (1 << 0) + for blob in img.find_blobs( + thresholds, pixels_threshold=100, area_threshold=100, merge=True + ): + if blob.code() == 3: # r/g code == (1 << 1) | (1 << 0) # These values depend on the blob not being circular - otherwise they will be shaky. if blob.elongation() > 0.5: - img.draw_edges(blob.min_corners(), color=(255,0,0)) - img.draw_line(blob.major_axis_line(), color=(0,255,0)) - img.draw_line(blob.minor_axis_line(), color=(0,0,255)) + img.draw_edges(blob.min_corners(), color=(255, 0, 0)) + img.draw_line(blob.major_axis_line(), color=(0, 255, 0)) + img.draw_line(blob.minor_axis_line(), color=(0, 0, 255)) # These values are stable all the time. img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) # Note - the blob rotation is unique to 0-180 only. - img.draw_keypoints([(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], size=20) + img.draw_keypoints( + [(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], size=20 + ) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_grayscale_blob_tracking.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_grayscale_blob_tracking.py index ec5d907b4..e9e347945 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_grayscale_blob_tracking.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_grayscale_blob_tracking.py @@ -13,19 +13,21 @@ thresholds = (245, 255) sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.VGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must be turned off for color tracking -sensor.set_auto_whitebal(False) # must be turned off for color tracking +sensor.skip_frames(time=2000) +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking clock = time.clock() # Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. "merge=True" merges all overlapping blobs in the image. -while(True): +while True: clock.tick() img = sensor.snapshot() - for blob in img.find_blobs([thresholds], pixels_threshold=100, area_threshold=100, merge=True): + for blob in img.find_blobs( + [thresholds], pixels_threshold=100, area_threshold=100, merge=True + ): # These values depend on the blob not being circular - otherwise they will be shaky. if blob.elongation() > 0.5: img.draw_edges(blob.min_corners(), color=0) @@ -35,5 +37,9 @@ while(True): img.draw_rectangle(blob.rect(), color=127) img.draw_cross(blob.cx(), blob.cy(), color=127) # Note - the blob rotation is unique to 0-180 only. - img.draw_keypoints([(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], size=40, color=127) + img.draw_keypoints( + [(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], + size=40, + color=127, + ) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_rgb565_blob_tracking.py b/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_rgb565_blob_tracking.py index cf9843b13..503f88045 100644 --- a/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_rgb565_blob_tracking.py +++ b/scripts/examples/02-Image-Processing/02-Color-Tracking/single_color_rgb565_blob_tracking.py @@ -6,38 +6,47 @@ import sensor import time import math -threshold_index = 0 # 0 for red, 1 for green, 2 for blue +threshold_index = 0 # 0 for red, 1 for green, 2 for blue # Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) # The below thresholds track in general red/green/blue things. You may wish to tune them... -thresholds = [(30, 100, 15, 127, 15, 127), # generic_red_thresholds - (30, 100, -64, -8, -32, 32), # generic_green_thresholds - (0, 30, 0, 64, -128, 0)] # generic_blue_thresholds +thresholds = [ + (30, 100, 15, 127, 15, 127), # generic_red_thresholds + (30, 100, -64, -8, -32, 32), # generic_green_thresholds + (0, 30, 0, 64, -128, 0), +] # generic_blue_thresholds sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must be turned off for color tracking -sensor.set_auto_whitebal(False) # must be turned off for color tracking +sensor.skip_frames(time=2000) +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking clock = time.clock() # Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. "merge=True" merges all overlapping blobs in the image. -while(True): +while True: clock.tick() img = sensor.snapshot() - for blob in img.find_blobs([thresholds[threshold_index]], pixels_threshold=200, area_threshold=200, merge=True): + for blob in img.find_blobs( + [thresholds[threshold_index]], + pixels_threshold=200, + area_threshold=200, + merge=True, + ): # These values depend on the blob not being circular - otherwise they will be shaky. if blob.elongation() > 0.5: - img.draw_edges(blob.min_corners(), color=(255,0,0)) - img.draw_line(blob.major_axis_line(), color=(0,255,0)) - img.draw_line(blob.minor_axis_line(), color=(0,0,255)) + img.draw_edges(blob.min_corners(), color=(255, 0, 0)) + img.draw_line(blob.major_axis_line(), color=(0, 255, 0)) + img.draw_line(blob.minor_axis_line(), color=(0, 0, 255)) # These values are stable all the time. img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) # Note - the blob rotation is unique to 0-180 only. - img.draw_keypoints([(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], size=20) + img.draw_keypoints( + [(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], size=20 + ) print(clock.fps()) diff --git a/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_advanced_frame_differencing.py b/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_advanced_frame_differencing.py index fde8e5785..0c91e0b65 100644 --- a/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_advanced_frame_differencing.py +++ b/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_advanced_frame_differencing.py @@ -5,21 +5,19 @@ # backgound image changing overtime. import sensor -import pyb -import os import time TRIGGER_THRESHOLD = 5 -BG_UPDATE_FRAMES = 50 # How many frames before blending. -BG_UPDATE_BLEND = 128 # How much to blend by... ([0-256]==[0.0-1.0]). +BG_UPDATE_FRAMES = 50 # How many frames before blending. +BG_UPDATE_BLEND = 128 # How much to blend by... ([0-256]==[0.0-1.0]). -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.RGB565 -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -sensor.set_auto_whitebal(False) # Turn off white balance. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.RGB565 +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +sensor.set_auto_whitebal(False) # Turn off white balance. +clock = time.clock() # Tracks FPS. # Take from the main frame buffer's RAM to allocate a second frame buffer. # There's a lot more RAM in the frame buffer than in the MicroPython heap. @@ -30,19 +28,19 @@ clock = time.clock() # Tracks FPS. extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) print("About to save background image...") -sensor.skip_frames(time = 2000) # Give the user time to get ready. +sensor.skip_frames(time=2000) # Give the user time to get ready. extra_fb.replace(sensor.snapshot()) print("Saved background image - Now frame differencing!") triggered = False frame_count = 0 -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. frame_count += 1 - if (frame_count > BG_UPDATE_FRAMES): + if frame_count > BG_UPDATE_FRAMES: frame_count = 0 # Blend in new frame. We're doing 256-alpha here because we want to # blend the new frame into the backgound. Not the background into the @@ -50,7 +48,7 @@ while(True): # ((NEW*(alpha))+(OLD*(256-alpha)))/256. So, a low alpha results in # low blending of the new image while a high alpha results in high # blending of the new image. We need to reverse that for this update. - img.blend(extra_fb, alpha=(256-BG_UPDATE_BLEND)) + img.blend(extra_fb, alpha=(256 - BG_UPDATE_BLEND)) extra_fb.replace(img) # Replace the image with the "abs(NEW-OLD)" frame difference. @@ -64,5 +62,5 @@ while(True): diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value() triggered = diff > TRIGGER_THRESHOLD - print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_basic_frame_differencing.py b/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_basic_frame_differencing.py index 2b1d438ab..b91bcb85b 100644 --- a/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_basic_frame_differencing.py +++ b/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_basic_frame_differencing.py @@ -5,18 +5,16 @@ # So, as time passes the background image may change resulting in issues. import sensor -import pyb -import os import time TRIGGER_THRESHOLD = 5 -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -sensor.set_auto_whitebal(False) # Turn off white balance. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +sensor.set_auto_whitebal(False) # Turn off white balance. +clock = time.clock() # Tracks FPS. # Take from the main frame buffer's RAM to allocate a second frame buffer. # There's a lot more RAM in the frame buffer than in the MicroPython heap. @@ -27,13 +25,13 @@ clock = time.clock() # Tracks FPS. extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) print("About to save background image...") -sensor.skip_frames(time = 2000) # Give the user time to get ready. +sensor.skip_frames(time=2000) # Give the user time to get ready. extra_fb.replace(sensor.snapshot()) print("Saved background image - Now frame differencing!") -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. # Replace the image with the "abs(NEW-OLD)" frame difference. img.difference(extra_fb) @@ -46,5 +44,5 @@ while(True): diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value() triggered = diff > TRIGGER_THRESHOLD - print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_shadow_removal.py b/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_shadow_removal.py index 1f1bc7f9f..59234069a 100644 --- a/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_shadow_removal.py +++ b/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_shadow_removal.py @@ -4,21 +4,19 @@ # shadow removal to help reduce the affects of cast shadows in your scene. import sensor -import pyb -import os import time TRIGGER_THRESHOLD = 5 -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -if sensor.get_id() == sensor.OV7725: # Reduce sensor PLL from 6x to 4x. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +if sensor.get_id() == sensor.OV7725: # Reduce sensor PLL from 6x to 4x. sensor.__write_reg(0x0D, (sensor.__read_reg(0x0D) & 0x3F) | 0x40) -sensor.skip_frames(time = 2000) # Let new settings take affect. -sensor.set_auto_whitebal(False) # Turn off white balance. -sensor.set_auto_gain(False) # Turn this off too. -clock = time.clock() # Tracks FPS. +sensor.skip_frames(time=2000) # Let new settings take affect. +sensor.set_auto_whitebal(False) # Turn off white balance. +sensor.set_auto_gain(False) # Turn this off too. +clock = time.clock() # Tracks FPS. # Take from the main frame buffer's RAM to allocate a second frame buffer. # There's a lot more RAM in the frame buffer than in the MicroPython heap. @@ -29,13 +27,13 @@ clock = time.clock() # Tracks FPS. extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) print("About to save background image...") -sensor.skip_frames(time = 2000) # Give the user time to get ready. +sensor.skip_frames(time=2000) # Give the user time to get ready. extra_fb.replace(sensor.snapshot()) print("Saved background image - Now frame differencing!") -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. # Note that for shadow removal to work the background image must be # shadow free and have the same lighting as the latest image. Unlike max() @@ -52,5 +50,5 @@ while(True): diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value() triggered = diff > TRIGGER_THRESHOLD - print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_structural_similarity.py b/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_structural_similarity.py index e80342b71..013c9e0dd 100644 --- a/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_structural_similarity.py +++ b/scripts/examples/02-Image-Processing/03-Frame-Differencing/in_memory_structural_similarity.py @@ -6,19 +6,17 @@ # score between two images. import sensor -import pyb -import os import time # The image has likely changed if the sim.min() is lower than this. MIN_TRIGGER_THRESHOLD = -0.4 -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -sensor.set_auto_whitebal(False) # Turn off white balance. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +sensor.set_auto_whitebal(False) # Turn off white balance. +clock = time.clock() # Tracks FPS. # Take from the main frame buffer's RAM to allocate a second frame buffer. # There's a lot more RAM in the frame buffer than in the MicroPython heap. @@ -29,13 +27,13 @@ clock = time.clock() # Tracks FPS. extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) print("About to save background image...") -sensor.skip_frames(time = 2000) # Give the user time to get ready. +sensor.skip_frames(time=2000) # Give the user time to get ready. extra_fb.replace(sensor.snapshot()) print("Saved background image!") -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. sim = img.get_similarity(extra_fb) change = "- Change -" if sim.min() < MIN_TRIGGER_THRESHOLD else "- No Change -" diff --git a/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_advanced_frame_differencing.py b/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_advanced_frame_differencing.py index 9725b7abd..53bf48a73 100644 --- a/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_advanced_frame_differencing.py +++ b/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_advanced_frame_differencing.py @@ -7,38 +7,38 @@ # backgound image changing overtime. import sensor -import pyb import os import time TRIGGER_THRESHOLD = 5 -BG_UPDATE_FRAMES = 50 # How many frames before blending. -BG_UPDATE_BLEND = 128 # How much to blend by... ([0-256]==[0.0-1.0]). +BG_UPDATE_FRAMES = 50 # How many frames before blending. +BG_UPDATE_BLEND = 128 # How much to blend by... ([0-256]==[0.0-1.0]). -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.RGB565 -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -sensor.set_auto_whitebal(False) # Turn off white balance. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.RGB565 +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +sensor.set_auto_whitebal(False) # Turn off white balance. +clock = time.clock() # Tracks FPS. -if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory +if not "temp" in os.listdir(): + os.mkdir("temp") # Make a temp directory print("About to save background image...") -sensor.skip_frames(time = 2000) # Give the user time to get ready. +sensor.skip_frames(time=2000) # Give the user time to get ready. sensor.snapshot().save("temp/bg.bmp") print("Saved background image - Now frame differencing!") triggered = False frame_count = 0 -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. frame_count += 1 - if (frame_count > BG_UPDATE_FRAMES): + if frame_count > BG_UPDATE_FRAMES: frame_count = 0 # Blend in new frame. We're doing 256-alpha here because we want to # blend the new frame into the backgound. Not the background into the @@ -46,7 +46,7 @@ while(True): # ((NEW*(alpha))+(OLD*(256-alpha)))/256. So, a low alpha results in # low blending of the new image while a high alpha results in high # blending of the new image. We need to reverse that for this update. - img.blend("temp/bg.bmp", alpha=(256-BG_UPDATE_BLEND)) + img.blend("temp/bg.bmp", alpha=(256 - BG_UPDATE_BLEND)) img.save("temp/bg.bmp") # Replace the image with the "abs(NEW-OLD)" frame difference. @@ -60,5 +60,5 @@ while(True): diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value() triggered = diff > TRIGGER_THRESHOLD - print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_basic_frame_differencing.py b/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_basic_frame_differencing.py index 93b937b0f..576f5263f 100644 --- a/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_basic_frame_differencing.py +++ b/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_basic_frame_differencing.py @@ -7,29 +7,29 @@ # So, as time passes the background image may change resulting in issues. import sensor -import pyb import os import time TRIGGER_THRESHOLD = 5 -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -sensor.set_auto_whitebal(False) # Turn off white balance. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +sensor.set_auto_whitebal(False) # Turn off white balance. +clock = time.clock() # Tracks FPS. -if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory +if not "temp" in os.listdir(): + os.mkdir("temp") # Make a temp directory print("About to save background image...") -sensor.skip_frames(time = 2000) # Give the user time to get ready. +sensor.skip_frames(time=2000) # Give the user time to get ready. sensor.snapshot().save("temp/bg.bmp") print("Saved background image - Now frame differencing!") -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. # Replace the image with the "abs(NEW-OLD)" frame difference. img.difference("temp/bg.bmp") @@ -42,5 +42,5 @@ while(True): diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value() triggered = diff > TRIGGER_THRESHOLD - print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_shadow_removal.py b/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_shadow_removal.py index b6062f851..45b3d5ede 100644 --- a/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_shadow_removal.py +++ b/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_shadow_removal.py @@ -6,32 +6,32 @@ # shadow removal to help reduce the affects of cast shadows in your scene. import sensor -import pyb import os import time TRIGGER_THRESHOLD = 5 -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -if sensor.get_id() == sensor.OV7725: # Reduce sensor PLL from 6x to 4x. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +if sensor.get_id() == sensor.OV7725: # Reduce sensor PLL from 6x to 4x. sensor.__write_reg(0x0D, (sensor.__read_reg(0x0D) & 0x3F) | 0x40) -sensor.skip_frames(time = 2000) # Let new settings take affect. -sensor.set_auto_whitebal(False) # Turn off white balance. -sensor.set_auto_gain(False) # Turn this off too. -clock = time.clock() # Tracks FPS. +sensor.skip_frames(time=2000) # Let new settings take affect. +sensor.set_auto_whitebal(False) # Turn off white balance. +sensor.set_auto_gain(False) # Turn this off too. +clock = time.clock() # Tracks FPS. -if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory +if not "temp" in os.listdir(): + os.mkdir("temp") # Make a temp directory print("About to save background image...") -sensor.skip_frames(time = 2000) # Give the user time to get ready. +sensor.skip_frames(time=2000) # Give the user time to get ready. sensor.snapshot().save("temp/bg.bmp") print("Saved background image - Now frame differencing!") -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. # Note that for shadow removal to work the background image must be # shadow free and have the same lighting as the latest image. Unlike max() @@ -48,5 +48,5 @@ while(True): diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value() triggered = diff > TRIGGER_THRESHOLD - print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_structural_similarity.py b/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_structural_similarity.py index 151d43c24..1d9d0432d 100644 --- a/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_structural_similarity.py +++ b/scripts/examples/02-Image-Processing/03-Frame-Differencing/on_disk_structural_similarity.py @@ -8,30 +8,30 @@ # score between two images. import sensor -import pyb import os import time # The image has likely changed if the sim.min() is lower than this. MIN_TRIGGER_THRESHOLD = -0.4 -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. -sensor.set_auto_whitebal(False) # Turn off white balance. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. +sensor.set_auto_whitebal(False) # Turn off white balance. +clock = time.clock() # Tracks FPS. -if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory +if not "temp" in os.listdir(): + os.mkdir("temp") # Make a temp directory print("About to save background image...") -sensor.skip_frames(time = 2000) # Give the user time to get ready. +sensor.skip_frames(time=2000) # Give the user time to get ready. sensor.snapshot().save("temp/bg.bmp") print("Saved background image!") -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. sim = img.get_similarity("temp/bg.bmp") change = "- Change -" if sim.min() < MIN_TRIGGER_THRESHOLD else "- No Change -" diff --git a/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_image_classification.py b/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_image_classification.py index 6c3a557f4..d34082026 100644 --- a/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_image_classification.py +++ b/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_image_classification.py @@ -20,24 +20,27 @@ import sensor import time -import os import tf -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.set_windowing((240, 240)) # Set 240x240 window. -sensor.skip_frames(time=2000) # Let the camera adjust. +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.set_windowing((240, 240)) # Set 240x240 window. +sensor.skip_frames(time=2000) # Let the camera adjust. -mobilenet_version = "1" # 1 -mobilenet_width = "0.5" # 1.0, 0.75, 0.50, 0.25 -mobilenet_resolution = "128" # 224, 192, 160, 128 +mobilenet_version = "1" # 1 +mobilenet_width = "0.5" # 1.0, 0.75, 0.50, 0.25 +mobilenet_resolution = "128" # 224, 192, 160, 128 -mobilenet = "mobilenet_v%s_%s_%s_quant.tflite" % (mobilenet_version, mobilenet_width, mobilenet_resolution) -labels = [line.rstrip('\n') for line in open("mobilenet_labels.txt")] +mobilenet = "mobilenet_v%s_%s_%s_quant.tflite" % ( + mobilenet_version, + mobilenet_width, + mobilenet_resolution, +) +labels = [line.rstrip("\n") for line in open("mobilenet_labels.txt")] clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() @@ -57,12 +60,16 @@ while(True): # Setting y_overlap=-1 forces the window to stay centered in the ROI in the y direction always. If # x_overlap is not -1 the method will serach in all horizontal positions. - for obj in tf.classify(mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0): + for obj in tf.classify( + mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0 + ): print("**********\nTop 5 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) img.draw_rectangle(obj.rect()) # This combines the labels and confidence values into a list of tuples # and then sorts that list by the confidence values. - sorted_list = sorted(zip(labels, obj.output()), key = lambda x: x[1], reverse = True) + sorted_list = sorted( + zip(labels, obj.output()), key=lambda x: x[1], reverse=True + ) for i in range(5): print("%s = %f" % (sorted_list[i][0], sorted_list[i][1])) print(clock.fps(), "fps") diff --git a/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_object_detection.py b/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_object_detection.py index 571ffe71e..ad0977a7c 100644 --- a/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_object_detection.py +++ b/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_object_detection.py @@ -7,11 +7,11 @@ import time import tf import math -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.set_windowing((240, 240)) # Set 240x240 window. -sensor.skip_frames(time=2000) # Let the camera adjust. +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.set_windowing((240, 240)) # Set 240x240 window. +sensor.skip_frames(time=2000) # Let the camera adjust. min_confidence = 0.4 @@ -19,21 +19,21 @@ min_confidence = 0.4 labels, net = tf.load_builtin_model("fomo_face_detection") # Alternatively, models can be loaded from the filesystem storage. -#net = tf.load('', load_to_fb=True) -#labels = [line.rstrip('\n') for line in open("labels.txt")] +# net = tf.load('', load_to_fb=True) +# labels = [line.rstrip('\n') for line in open("labels.txt")] -colors = [ # Add more colors if you are detecting more than 7 types of classes at once. - (255, 0, 0), - ( 0, 255, 0), - (255, 255, 0), - ( 0, 0, 255), - (255, 0, 255), - ( 0, 255, 255), +colors = [ # Add more colors if you are detecting more than 7 types of classes at once. + (255, 0, 0), + (0, 255, 0), + (255, 255, 0), + (0, 0, 255), + (255, 0, 255), + (0, 255, 255), (255, 255, 255), ] clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() @@ -42,9 +42,13 @@ while(True): # we skip class index 0, as that is the background, and then draw circles of the center # of our objects - for i, detection_list in enumerate(net.detect(img, thresholds=[(math.ceil(min_confidence * 255), 255)])): - if (i == 0): continue # background class - if (len(detection_list) == 0): continue # no detections for this class? + for i, detection_list in enumerate( + net.detect(img, thresholds=[(math.ceil(min_confidence * 255), 255)]) + ): + if i == 0: + continue # background class + if len(detection_list) == 0: + continue # no detections for this class? print("********** %s **********" % labels[i]) for d in detection_list: diff --git a/scripts/examples/03-Machine-Learning/01-ST-CubeAI/nn_stm32cubeai.py b/scripts/examples/03-Machine-Learning/01-ST-CubeAI/nn_stm32cubeai.py index 4b53e8d50..808866464 100644 --- a/scripts/examples/03-Machine-Learning/01-ST-CubeAI/nn_stm32cubeai.py +++ b/scripts/examples/03-Machine-Learning/01-ST-CubeAI/nn_stm32cubeai.py @@ -5,36 +5,42 @@ import sensor import time import nn_st -sensor.reset() # Reset and initialize the sensor. +sensor.reset() # Reset and initialize the sensor. sensor.set_contrast(3) sensor.set_brightness(0) sensor.set_auto_gain(True) sensor.set_auto_exposure(True) -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to Grayscale -sensor.set_framesize(sensor.QQQVGA) # Set frame size to 80x60 -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to Grayscale +sensor.set_framesize(sensor.QQQVGA) # Set frame size to 80x60 +sensor.skip_frames(time=2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. # [CUBE.AI] Initialize the network -net = nn_st.loadnnst('network') +net = nn_st.loadnnst("network") -nn_input_sz = 28 # The NN input is 28x28 +nn_input_sz = 28 # The NN input is 28x28 -while(True): - clock.tick() # Update the FPS clock. +while True: + clock.tick() # Update the FPS clock. img = sensor.snapshot() # Take a picture and return the image. # Crop in the middle (avoids vignetting) - img.crop((img.width()//2-nn_input_sz//2, - img.height()//2-nn_input_sz//2, - nn_input_sz, - nn_input_sz)) + img.crop( + ( + img.width() // 2 - nn_input_sz // 2, + img.height() // 2 - nn_input_sz // 2, + nn_input_sz, + nn_input_sz, + ) + ) - # Binarize the image + # Binarize the image img.midpoint(2, bias=0.5, threshold=True, offset=5, invert=True) # [CUBE.AI] Run the inference out = net.predict(img) - print('Network argmax output: {}'.format( out.index(max(out)) )) + print("Network argmax output: {}".format(out.index(max(out)))) img.draw_string(0, 0, str(out.index(max(out)))) - print('FPS {}'.format(clock.fps())) # Note: OpenMV Cam runs about half as fast when connected + print( + "FPS {}".format(clock.fps()) + ) # Note: OpenMV Cam runs about half as fast when connected diff --git a/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_detection.py b/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_detection.py index 12da0291a..9486c4d47 100644 --- a/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_detection.py +++ b/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_detection.py @@ -33,7 +33,7 @@ print(face_cascade) # FPS clock clock = time.clock() -while (True): +while True: clock.tick() # Capture snapshot diff --git a/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_eye_detection.py b/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_eye_detection.py index a166bede1..a75d841d0 100644 --- a/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_eye_detection.py +++ b/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_eye_detection.py @@ -26,7 +26,7 @@ print(face_cascade, eyes_cascade) # FPS clock clock = time.clock() -while (True): +while True: clock.tick() # Capture snapshot @@ -42,7 +42,9 @@ while (True): img.draw_rectangle(face) # Now find eyes within each face. # Note: Use a higher threshold here (more detections) and lower scale (to find small objects) - eyes = img.find_features(eyes_cascade, threshold=0.5, scale_factor=1.2, roi=face) + eyes = img.find_features( + eyes_cascade, threshold=0.5, scale_factor=1.2, roi=face + ) for e in eyes: img.draw_rectangle(e) diff --git a/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_recognition.py b/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_recognition.py index a68ccfc41..3b6814134 100644 --- a/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_recognition.py +++ b/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_recognition.py @@ -7,23 +7,21 @@ # # NOTE: This is just a PoC implementation of the paper mentioned above, it does Not work well in real life conditions. -import sensor -import time import image SUB = "s2" NUM_SUBJECTS = 5 NUM_SUBJECTS_IMGS = 10 -img = image.Image("orl_faces/%s/1.pgm"%(SUB)).mask_ellipse() +img = image.Image("orl_faces/%s/1.pgm" % (SUB)).mask_ellipse() d0 = img.find_lbp((0, 0, img.width(), img.height())) img = None print("") -for s in range(1, NUM_SUBJECTS+1): +for s in range(1, NUM_SUBJECTS + 1): dist = 0 - for i in range(2, NUM_SUBJECTS_IMGS+1): - img = image.Image("orl_faces/s%d/%d.pgm"%(s, i)).mask_ellipse() + for i in range(2, NUM_SUBJECTS_IMGS + 1): + img = image.Image("orl_faces/s%d/%d.pgm" % (s, i)).mask_ellipse() d1 = img.find_lbp((0, 0, img.width(), img.height())) dist += image.match_descriptor(d0, d1) - print("Average dist for subject %d: %d"%(s, dist/NUM_SUBJECTS_IMGS)) + print("Average dist for subject %d: %d" % (s, dist / NUM_SUBJECTS_IMGS)) diff --git a/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_tracking.py b/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_tracking.py index 7da30278f..3590a369d 100644 --- a/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_tracking.py +++ b/scripts/examples/03-Machine-Learning/02-Haar-Cascade/face_tracking.py @@ -18,7 +18,7 @@ sensor.set_windowing((320, 240)) sensor.set_pixformat(sensor.GRAYSCALE) # Skip a few frames to allow the sensor settle down -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) # Load Haar Cascade # By default this will use all stages, lower satges is faster but less accurate. @@ -29,16 +29,23 @@ print(face_cascade) kpts1 = None # Find a face! -while (kpts1 == None): +while kpts1 is None: img = sensor.snapshot() img.draw_string(0, 0, "Looking for a face...") # Find faces objects = img.find_features(face_cascade, threshold=0.5, scale=1.25) if objects: # Expand the ROI by 31 pixels in every direction - face = (objects[0][0]-31, objects[0][1]-31,objects[0][2]+31*2, objects[0][3]+31*2) + face = ( + objects[0][0] - 31, + objects[0][1] - 31, + objects[0][2] + 31 * 2, + objects[0][3] + 31 * 2, + ) # Extract keypoints using the detect face size as the ROI - kpts1 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, roi=face) + kpts1 = img.find_keypoints( + threshold=10, scale_factor=1.1, max_keypoints=100, roi=face + ) # Draw a rectangle around the first face img.draw_rectangle(objects[0]) @@ -51,20 +58,22 @@ time.sleep_ms(2000) # FPS clock clock = time.clock() -while (True): +while True: clock.tick() img = sensor.snapshot() # Extract keypoints from the whole frame - kpts2 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, normalized=True) + kpts2 = img.find_keypoints( + threshold=10, scale_factor=1.1, max_keypoints=100, normalized=True + ) - if (kpts2): + if kpts2: # Match the first set of keypoints with the second one - c=image.match_descriptor(kpts1, kpts2, threshold=85) - match = c[6] # C[6] contains the number of matches. - if (match>5): + c = image.match_descriptor(kpts1, kpts2, threshold=85) + match = c[6] # C[6] contains the number of matches. + if match > 5: img.draw_rectangle(c[2:6]) img.draw_cross(c[0], c[1], size=10) - print(kpts2, "matched:%d dt:%d"%(match, c[7])) + print(kpts2, "matched:%d dt:%d" % (match, c[7])) # Draw FPS - img.draw_string(0, 0, "FPS:%.2f"%(clock.fps())) + img.draw_string(0, 0, "FPS:%.2f" % (clock.fps())) diff --git a/scripts/examples/03-Machine-Learning/02-Haar-Cascade/iris_detection.py b/scripts/examples/03-Machine-Learning/02-Haar-Cascade/iris_detection.py index 6fc0e263f..febf6f27c 100644 --- a/scripts/examples/03-Machine-Learning/02-Haar-Cascade/iris_detection.py +++ b/scripts/examples/03-Machine-Learning/02-Haar-Cascade/iris_detection.py @@ -34,7 +34,7 @@ print(eyes_cascade) # FPS clock clock = time.clock() -while (True): +while True: clock.tick() # Capture snapshot img = sensor.snapshot() diff --git a/scripts/examples/04-Barcodes/find_barcodes.py b/scripts/examples/04-Barcodes/find_barcodes.py index 775034cb3..822babfbd 100644 --- a/scripts/examples/04-Barcodes/find_barcodes.py +++ b/scripts/examples/04-Barcodes/find_barcodes.py @@ -10,9 +10,9 @@ import math sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.VGA) # High Res! -sensor.set_windowing((640, 80)) # V Res of 80 == less work (40 for 2X the speed). -sensor.skip_frames(time = 2000) +sensor.set_framesize(sensor.VGA) # High Res! +sensor.set_windowing((640, 80)) # V Res of 80 == less work (40 for 2X the speed). +sensor.skip_frames(time=2000) sensor.set_auto_gain(False) # must turn this off to prevent image washout... sensor.set_auto_whitebal(False) # must turn this off to prevent image washout... clock = time.clock() @@ -22,47 +22,58 @@ clock = time.clock() # a lower resolution. That said, barcode detection requires a higher resolution # to work well so it should always be run at 640x480 in grayscale... + def barcode_name(code): - if(code.type() == image.EAN2): + if code.type() == image.EAN2: return "EAN2" - if(code.type() == image.EAN5): + if code.type() == image.EAN5: return "EAN5" - if(code.type() == image.EAN8): + if code.type() == image.EAN8: return "EAN8" - if(code.type() == image.UPCE): + if code.type() == image.UPCE: return "UPCE" - if(code.type() == image.ISBN10): + if code.type() == image.ISBN10: return "ISBN10" - if(code.type() == image.UPCA): + if code.type() == image.UPCA: return "UPCA" - if(code.type() == image.EAN13): + if code.type() == image.EAN13: return "EAN13" - if(code.type() == image.ISBN13): + if code.type() == image.ISBN13: return "ISBN13" - if(code.type() == image.I25): + if code.type() == image.I25: return "I25" - if(code.type() == image.DATABAR): + if code.type() == image.DATABAR: return "DATABAR" - if(code.type() == image.DATABAR_EXP): + if code.type() == image.DATABAR_EXP: return "DATABAR_EXP" - if(code.type() == image.CODABAR): + if code.type() == image.CODABAR: return "CODABAR" - if(code.type() == image.CODE39): + if code.type() == image.CODE39: return "CODE39" - if(code.type() == image.PDF417): + if code.type() == image.PDF417: return "PDF417" - if(code.type() == image.CODE93): + if code.type() == image.CODE93: return "CODE93" - if(code.type() == image.CODE128): + if code.type() == image.CODE128: return "CODE128" -while(True): + +while True: clock.tick() img = sensor.snapshot() codes = img.find_barcodes() for code in codes: img.draw_rectangle(code.rect()) - print_args = (barcode_name(code), code.payload(), (180 * code.rotation()) / math.pi, code.quality(), clock.fps()) - print("Barcode %s, Payload \"%s\", rotation %f (degrees), quality %d, FPS %f" % print_args) + print_args = ( + barcode_name(code), + code.payload(), + (180 * code.rotation()) / math.pi, + code.quality(), + clock.fps(), + ) + print( + 'Barcode %s, Payload "%s", rotation %f (degrees), quality %d, FPS %f' + % print_args + ) if not codes: print("FPS %f" % clock.fps()) diff --git a/scripts/examples/04-Barcodes/find_datamatrices.py b/scripts/examples/04-Barcodes/find_datamatrices.py index a8df0caa9..039ca75ed 100644 --- a/scripts/examples/04-Barcodes/find_datamatrices.py +++ b/scripts/examples/04-Barcodes/find_datamatrices.py @@ -10,20 +10,28 @@ import math sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) sensor.set_auto_gain(False) # must turn this off to prevent image washout... sensor.set_auto_whitebal(False) # must turn this off to prevent image washout... clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() - img.lens_corr(1.8) # strength of 1.8 is good for the 2.8mm lens. + img.lens_corr(1.8) # strength of 1.8 is good for the 2.8mm lens. matrices = img.find_datamatrices() for matrix in matrices: - img.draw_rectangle(matrix.rect(), color = (255, 0, 0)) - print_args = (matrix.rows(), matrix.columns(), matrix.payload(), (180 * matrix.rotation()) / math.pi, clock.fps()) - print("Matrix [%d:%d], Payload \"%s\", rotation %f (degrees), FPS %f" % print_args) + img.draw_rectangle(matrix.rect(), color=(255, 0, 0)) + print_args = ( + matrix.rows(), + matrix.columns(), + matrix.payload(), + (180 * matrix.rotation()) / math.pi, + clock.fps(), + ) + print( + 'Matrix [%d:%d], Payload "%s", rotation %f (degrees), FPS %f' % print_args + ) if not matrices: print("FPS %f" % clock.fps()) diff --git a/scripts/examples/04-Barcodes/find_datamatrices_w_lens_zoom.py b/scripts/examples/04-Barcodes/find_datamatrices_w_lens_zoom.py index 751cad8bd..4c4806dba 100644 --- a/scripts/examples/04-Barcodes/find_datamatrices_w_lens_zoom.py +++ b/scripts/examples/04-Barcodes/find_datamatrices_w_lens_zoom.py @@ -10,20 +10,28 @@ import math sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.VGA) -sensor.set_windowing((320, 240)) # 2x Zoom -sensor.skip_frames(time = 2000) +sensor.set_windowing((320, 240)) # 2x Zoom +sensor.skip_frames(time=2000) sensor.set_auto_gain(False) # must turn this off to prevent image washout... sensor.set_auto_whitebal(False) # must turn this off to prevent image washout... clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() matrices = img.find_datamatrices() for matrix in matrices: - img.draw_rectangle(matrix.rect(), color = (255, 0, 0)) - print_args = (matrix.rows(), matrix.columns(), matrix.payload(), (180 * matrix.rotation()) / math.pi, clock.fps()) - print("Matrix [%d:%d], Payload \"%s\", rotation %f (degrees), FPS %f" % print_args) + img.draw_rectangle(matrix.rect(), color=(255, 0, 0)) + print_args = ( + matrix.rows(), + matrix.columns(), + matrix.payload(), + (180 * matrix.rotation()) / math.pi, + clock.fps(), + ) + print( + 'Matrix [%d:%d], Payload "%s", rotation %f (degrees), FPS %f' % print_args + ) if not matrices: print("FPS %f" % clock.fps()) diff --git a/scripts/examples/04-Barcodes/qrcodes_with_lens_corr.py b/scripts/examples/04-Barcodes/qrcodes_with_lens_corr.py index cecd345a3..6fe6bde7a 100644 --- a/scripts/examples/04-Barcodes/qrcodes_with_lens_corr.py +++ b/scripts/examples/04-Barcodes/qrcodes_with_lens_corr.py @@ -9,15 +9,15 @@ import time sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must turn this off to prevent image washout... +sensor.skip_frames(time=2000) +sensor.set_auto_gain(False) # must turn this off to prevent image washout... clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() - img.lens_corr(1.8) # strength of 1.8 is good for the 2.8mm lens. + img.lens_corr(1.8) # strength of 1.8 is good for the 2.8mm lens. for code in img.find_qrcodes(): - img.draw_rectangle(code.rect(), color = (255, 0, 0)) + img.draw_rectangle(code.rect(), color=(255, 0, 0)) print(code) print(clock.fps()) diff --git a/scripts/examples/04-Barcodes/qrcodes_with_lens_zoom.py b/scripts/examples/04-Barcodes/qrcodes_with_lens_zoom.py index 5a39d3dbe..f5b657cf5 100644 --- a/scripts/examples/04-Barcodes/qrcodes_with_lens_zoom.py +++ b/scripts/examples/04-Barcodes/qrcodes_with_lens_zoom.py @@ -9,15 +9,15 @@ import time sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.VGA) -sensor.set_windowing((240, 240)) # look at center 240x240 pixels of the VGA resolution. -sensor.skip_frames(time = 2000) -sensor.set_auto_gain(False) # must turn this off to prevent image washout... +sensor.set_windowing((240, 240)) # look at center 240x240 pixels of the VGA resolution. +sensor.skip_frames(time=2000) +sensor.set_auto_gain(False) # must turn this off to prevent image washout... clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() for code in img.find_qrcodes(): - img.draw_rectangle(code.rect(), color = 127) + img.draw_rectangle(code.rect(), color=127) print(code) print(clock.fps()) diff --git a/scripts/examples/05-Feature-Detection/edges.py b/scripts/examples/05-Feature-Detection/edges.py index d84fa7b2f..2c1d5515c 100644 --- a/scripts/examples/05-Feature-Detection/edges.py +++ b/scripts/examples/05-Feature-Detection/edges.py @@ -5,18 +5,18 @@ import sensor import image import time -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 -sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) -sensor.skip_frames(time = 2000) # Let new settings take affect. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 +sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) +sensor.skip_frames(time=2000) # Let new settings take affect. sensor.set_gainceiling(8) -clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +clock = time.clock() # Tracks FPS. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. # Use Canny edge detector img.find_edges(image.EDGE_CANNY, threshold=(50, 80)) # Faster simpler edge detection - #img.find_edges(image.EDGE_SIMPLE, threshold=(100, 255)) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + # img.find_edges(image.EDGE_SIMPLE, threshold=(100, 255)) + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while diff --git a/scripts/examples/05-Feature-Detection/find_circles.py b/scripts/examples/05-Feature-Detection/find_circles.py index 445766fde..a29dc5a5d 100644 --- a/scripts/examples/05-Feature-Detection/find_circles.py +++ b/scripts/examples/05-Feature-Detection/find_circles.py @@ -10,12 +10,12 @@ import sensor import time sensor.reset() -sensor.set_pixformat(sensor.RGB565) # grayscale is faster +sensor.set_pixformat(sensor.RGB565) # grayscale is faster sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot().lens_corr(1.8) @@ -32,9 +32,16 @@ while(True): # r_min, r_max, and r_step control what radiuses of circles are tested. # Shrinking the number of tested circle radiuses yields a big performance boost. - for c in img.find_circles(threshold = 2000, x_margin = 10, y_margin = 10, r_margin = 10, - r_min = 2, r_max = 100, r_step = 2): - img.draw_circle(c.x(), c.y(), c.r(), color = (255, 0, 0)) + for c in img.find_circles( + threshold=2000, + x_margin=10, + y_margin=10, + r_margin=10, + r_min=2, + r_max=100, + r_step=2, + ): + img.draw_circle(c.x(), c.y(), c.r(), color=(255, 0, 0)) print(c) print("FPS %f" % clock.fps()) diff --git a/scripts/examples/05-Feature-Detection/find_line_segments.py b/scripts/examples/05-Feature-Detection/find_line_segments.py index dd86a10f3..4e42f738a 100644 --- a/scripts/examples/05-Feature-Detection/find_line_segments.py +++ b/scripts/examples/05-Feature-Detection/find_line_segments.py @@ -6,24 +6,25 @@ # find_line_segments() finds finite length lines (but is slow). # Use find_line_segments() to find non-infinite lines (and is fast). -enable_lens_corr = False # turn on for straighter lines... - import sensor import time +ENABLE_LENS_CORR = False # turn on for straighter lines... + sensor.reset() -sensor.set_pixformat(sensor.RGB565) # grayscale is faster +sensor.set_pixformat(sensor.RGB565) # grayscale is faster sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) clock = time.clock() # All lines also have `x1()`, `y1()`, `x2()`, and `y2()` methods to get their end-points # and a `line()` method to get all the above as one 4 value tuple for `draw_line()`. -while(True): +while True: clock.tick() img = sensor.snapshot() - if enable_lens_corr: img.lens_corr(1.8) # for 2.8mm lens... + if ENABLE_LENS_CORR: + img.lens_corr(1.8) # for 2.8mm lens... # `merge_distance` controls the merging of nearby lines. At 0 (the default), no # merging is done. At 1, any line 1 pixel away from another is merged... and so @@ -33,8 +34,8 @@ while(True): # `max_theta_diff` controls the maximum amount of rotation difference between # any two lines about to be merged. The default setting allows for 15 degrees. - for l in img.find_line_segments(merge_distance = 0, max_theta_diff = 5): - img.draw_line(l.line(), color = (255, 0, 0)) + for l in img.find_line_segments(merge_distance=0, max_theta_diff=5): + img.draw_line(l.line(), color=(255, 0, 0)) # print(l) print("FPS %f" % clock.fps()) diff --git a/scripts/examples/05-Feature-Detection/find_lines.py b/scripts/examples/05-Feature-Detection/find_lines.py index 6675ec081..65f9ff5fc 100644 --- a/scripts/examples/05-Feature-Detection/find_lines.py +++ b/scripts/examples/05-Feature-Detection/find_lines.py @@ -9,15 +9,15 @@ # find_lines() finds infinite length lines. Use find_line_segments() to find non-infinite lines. -enable_lens_corr = False # turn on for straighter lines... - import sensor import time +ENABLE_LENS_CORR = False # turn on for straighter lines... + sensor.reset() -sensor.set_pixformat(sensor.RGB565) # grayscale is faster +sensor.set_pixformat(sensor.RGB565) # grayscale is faster sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) clock = time.clock() # All line objects have a `theta()` method to get their rotation angle in degrees. @@ -29,10 +29,11 @@ max_degree = 179 # All lines also have `x1()`, `y1()`, `x2()`, and `y2()` methods to get their end-points # and a `line()` method to get all the above as one 4 value tuple for `draw_line()`. -while(True): +while True: clock.tick() img = sensor.snapshot() - if enable_lens_corr: img.lens_corr(1.8) # for 2.8mm lens... + if ENABLE_LENS_CORR: + img.lens_corr(1.8) # for 2.8mm lens... # `threshold` controls how many lines in the image are found. Only lines with # edge difference magnitude sums greater than `threshold` are detected... @@ -46,9 +47,9 @@ while(True): # `theta_margin` and `rho_margin` control merging similar lines. If two lines # theta and rho value differences are less than the margins then they are merged. - for l in img.find_lines(threshold = 1000, theta_margin = 25, rho_margin = 25): + for l in img.find_lines(threshold=1000, theta_margin=25, rho_margin=25): if (min_degree <= l.theta()) and (l.theta() <= max_degree): - img.draw_line(l.line(), color = (255, 0, 0)) + img.draw_line(l.line(), color=(255, 0, 0)) # print(l) print("FPS %f" % clock.fps()) diff --git a/scripts/examples/05-Feature-Detection/find_rects.py b/scripts/examples/05-Feature-Detection/find_rects.py index d56f37558..ab29cfe9d 100644 --- a/scripts/examples/05-Feature-Detection/find_rects.py +++ b/scripts/examples/05-Feature-Detection/find_rects.py @@ -11,12 +11,12 @@ import sensor import time sensor.reset() -sensor.set_pixformat(sensor.RGB565) # grayscale is faster (160x120 max on OpenMV-M7) +sensor.set_pixformat(sensor.RGB565) # grayscale is faster (160x120 max on OpenMV-M7) sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() @@ -24,9 +24,10 @@ while(True): # rectangles detected in the image which have low edge magnitudes. Rectangles # have larger edge magnitudes the larger and more contrasty they are... - for r in img.find_rects(threshold = 10000): - img.draw_rectangle(r.rect(), color = (255, 0, 0)) - for p in r.corners(): img.draw_circle(p[0], p[1], 5, color = (0, 255, 0)) + for r in img.find_rects(threshold=10000): + img.draw_rectangle(r.rect(), color=(255, 0, 0)) + for p in r.corners(): + img.draw_circle(p[0], p[1], 5, color=(0, 255, 0)) print(r) print("FPS %f" % clock.fps()) diff --git a/scripts/examples/05-Feature-Detection/hog.py b/scripts/examples/05-Feature-Detection/hog.py index 5a9e6a300..6170e0a7c 100644 --- a/scripts/examples/05-Feature-Detection/hog.py +++ b/scripts/examples/05-Feature-Detection/hog.py @@ -14,16 +14,16 @@ sensor.set_contrast(1) sensor.set_gainceiling(8) sensor.set_framesize(sensor.QVGA) sensor.set_pixformat(sensor.GRAYSCALE) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) -clock = time.clock() # Tracks FPS. -while (True): +clock = time.clock() # Tracks FPS. +while True: clock.tick() img = sensor.snapshot() img.find_hog() # Uncomment to save raw FB to file and exit the loop - #img.save("/hog.pgm") - #break + # img.save("/hog.pgm") + # break print(clock.fps()) diff --git a/scripts/examples/05-Feature-Detection/keypoints.py b/scripts/examples/05-Feature-Detection/keypoints.py index f2a2f286f..083536f46 100644 --- a/scripts/examples/05-Feature-Detection/keypoints.py +++ b/scripts/examples/05-Feature-Detection/keypoints.py @@ -16,9 +16,10 @@ sensor.set_framesize(sensor.VGA) sensor.set_windowing((320, 240)) sensor.set_pixformat(sensor.GRAYSCALE) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) sensor.set_auto_gain(False, value=100) + def draw_keypoints(img, kpts): if kpts: print(kpts) @@ -26,17 +27,18 @@ def draw_keypoints(img, kpts): img = sensor.snapshot() time.sleep_ms(1000) + kpts1 = None # NOTE: uncomment to load a keypoints descriptor from file -#kpts1 = image.load_descriptor("/desc.orb") -#img = sensor.snapshot() -#draw_keypoints(img, kpts1) +# kpts1 = image.load_descriptor("/desc.orb") +# img = sensor.snapshot() +# draw_keypoints(img, kpts1) clock = time.clock() -while (True): +while True: clock.tick() img = sensor.snapshot() - if (kpts1 == None): + if kpts1 is None: # NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid. kpts1 = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2) draw_keypoints(img, kpts1) @@ -44,17 +46,17 @@ while (True): # NOTE: When extracting keypoints to match the first descriptor, we use normalized=True to extract # keypoints from the first scale only, which will match one of the scales in the first descriptor. kpts2 = img.find_keypoints(max_keypoints=150, threshold=10, normalized=True) - if (kpts2): + if kpts2: match = image.match_descriptor(kpts1, kpts2, threshold=85) - if (match.count()>10): + if match.count() > 10: # If we have at least n "good matches" # Draw bounding rectangle and cross. img.draw_rectangle(match.rect()) img.draw_cross(match.cx(), match.cy(), size=10) - print(kpts2, "matched:%d dt:%d"%(match.count(), match.theta())) + print(kpts2, "matched:%d dt:%d" % (match.count(), match.theta())) # NOTE: uncomment if you want to draw the keypoints - #img.draw_keypoints(kpts2, size=KEYPOINTS_SIZE, matched=True) + # img.draw_keypoints(kpts2, size=KEYPOINTS_SIZE, matched=True) # Draw FPS - img.draw_string(0, 0, "FPS:%.2f"%(clock.fps())) + img.draw_string(0, 0, "FPS:%.2f" % (clock.fps())) diff --git a/scripts/examples/05-Feature-Detection/keypoints_save.py b/scripts/examples/05-Feature-Detection/keypoints_save.py index 6230ab0de..35aa00f99 100644 --- a/scripts/examples/05-Feature-Detection/keypoints_save.py +++ b/scripts/examples/05-Feature-Detection/keypoints_save.py @@ -18,7 +18,7 @@ sensor.set_framesize(sensor.VGA) sensor.set_windowing((320, 240)) sensor.set_pixformat(sensor.GRAYSCALE) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) sensor.set_auto_gain(False, value=100) FILE_NAME = "desc" @@ -27,13 +27,13 @@ img = sensor.snapshot() # NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid. kpts = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2) -if (kpts == None): - raise(Exception("Couldn't find any keypoints!")) +if kpts is None: + raise (Exception("Couldn't find any keypoints!")) -image.save_descriptor(kpts, "/%s.orb"%(FILE_NAME)) -img.save("/%s.pgm"%(FILE_NAME)) +image.save_descriptor(kpts, "/%s.orb" % (FILE_NAME)) +img.save("/%s.pgm" % (FILE_NAME)) img.draw_keypoints(kpts) sensor.snapshot() time.sleep_ms(1000) -raise(Exception("Done! Please reset the camera")) +raise (Exception("Done! Please reset the camera")) diff --git a/scripts/examples/05-Feature-Detection/lbp.py b/scripts/examples/05-Feature-Detection/lbp.py index 72e5ea19d..1a4be5827 100644 --- a/scripts/examples/05-Feature-Detection/lbp.py +++ b/scripts/examples/05-Feature-Detection/lbp.py @@ -10,7 +10,6 @@ import sensor import time import image -sensor.reset() # Reset sensor sensor.reset() @@ -33,10 +32,10 @@ for i in range(0, 30): img.draw_string(0, 0, "Please wait...") d0 = None -#d0 = image.load_descriptor("/desc.lbp") +# d0 = image.load_descriptor("/desc.lbp") clock = time.clock() -while (True): +while True: clock.tick() img = sensor.snapshot() @@ -44,12 +43,12 @@ while (True): if objects: face = objects[0] d1 = img.find_lbp(face) - if (d0 == None): + if d0 is None: d0 = d1 else: dist = image.match_descriptor(d0, d1) - img.draw_string(0, 10, "Match %d%%"%(dist)) + img.draw_string(0, 10, "Match %d%%" % (dist)) img.draw_rectangle(face) # Draw FPS - img.draw_string(0, 0, "FPS:%.2f"%(clock.fps())) + img.draw_string(0, 0, "FPS:%.2f" % (clock.fps())) diff --git a/scripts/examples/05-Feature-Detection/linear_regression_fast.py b/scripts/examples/05-Feature-Detection/linear_regression_fast.py index b34c7de7c..3c46be0e4 100644 --- a/scripts/examples/05-Feature-Detection/linear_regression_fast.py +++ b/scripts/examples/05-Feature-Detection/linear_regression_fast.py @@ -10,20 +10,19 @@ # method to fit the line. However, this method is NOT GOOD FOR ANY images that # have a lot (or really any) outlier points which corrupt the line fit... -THRESHOLD = (0, 100) # Grayscale threshold for dark things... -BINARY_VISIBLE = True # Does binary first so you can see what the linear regression - # is being run on... might lower FPS though. - import sensor import time +THRESHOLD = (0, 100) # Grayscale threshold for dark things. +BINARY_VISIBLE = True # Binary pass first to see what linear regression is running on. + sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot().binary([THRESHOLD]) if BINARY_VISIBLE else sensor.snapshot() @@ -34,10 +33,13 @@ while(True): # magnitude() represents how well the linear regression worked. It goes from # (0, INF] where 0 is returned for a circle. The more linear the # scene is the higher the magnitude. - line = img.get_regression([(255,255) if BINARY_VISIBLE else THRESHOLD]) + line = img.get_regression([(255, 255) if BINARY_VISIBLE else THRESHOLD]) - if (line): img.draw_line(line.line(), color = 127) - print("FPS %f, mag = %s" % (clock.fps(), str(line.magnitude()) if (line) else "N/A")) + if line: + img.draw_line(line.line(), color=127) + print( + "FPS %f, mag = %s" % (clock.fps(), str(line.magnitude()) if (line) else "N/A") + ) # About negative rho values: # diff --git a/scripts/examples/05-Feature-Detection/linear_regression_robust.py b/scripts/examples/05-Feature-Detection/linear_regression_robust.py index 8764c01aa..ac5492ce4 100644 --- a/scripts/examples/05-Feature-Detection/linear_regression_robust.py +++ b/scripts/examples/05-Feature-Detection/linear_regression_robust.py @@ -12,20 +12,19 @@ # TO LIMIT THE NUMBER OF PIXELS the robust algorithm works on or it can actually # take seconds for the algorithm to give you a result... THRESHOLD VERY CAREFULLY! -THRESHOLD = (0, 100) # Grayscale threshold for dark things... -BINARY_VISIBLE = True # Does binary first so you can see what the linear regression - # is being run on... might lower FPS though. - import sensor import time +THRESHOLD = (0, 100) # Grayscale threshold for dark things. +BINARY_VISIBLE = True # Binary pass first to see what linear regression is running on. + sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QQQVGA) # 80x60 (4,800 pixels) - O(N^2) max = 2,3040,000. -sensor.skip_frames(time = 2000) # WARNING: If you use QQVGA it may take seconds -clock = time.clock() # to process a frame sometimes. +sensor.set_framesize(sensor.QQQVGA) # 80x60 (4,800 pixels) - O(N^2) max = 2,3040,000. +sensor.skip_frames(time=2000) # WARNING: If you use QQVGA it may take seconds +clock = time.clock() # to process a frame sometimes. -while(True): +while True: clock.tick() img = sensor.snapshot().binary([THRESHOLD]) if BINARY_VISIBLE else sensor.snapshot() @@ -36,10 +35,15 @@ while(True): # magnitude() represents how well the linear regression worked. It means something # different for the robust linear regression. In general, the larger the value the # better... - line = img.get_regression([(255,255) if BINARY_VISIBLE else THRESHOLD], robust = True) + line = img.get_regression( + [(255, 255) if BINARY_VISIBLE else THRESHOLD], robust=True + ) - if (line): img.draw_line(line.line(), color = 127) - print("FPS %f, mag = %s" % (clock.fps(), str(line.magnitude()) if (line) else "N/A")) + if line: + img.draw_line(line.line(), color=127) + print( + "FPS %f, mag = %s" % (clock.fps(), str(line.magnitude()) if (line) else "N/A") + ) # About negative rho values: # diff --git a/scripts/examples/05-Feature-Detection/selective_search.py b/scripts/examples/05-Feature-Detection/selective_search.py index c4b168a77..0ac1e3353 100644 --- a/scripts/examples/05-Feature-Detection/selective_search.py +++ b/scripts/examples/05-Feature-Detection/selective_search.py @@ -2,22 +2,22 @@ import sensor import time -from random import randint -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.skip_frames(time = 2000) # Wait for settings take effect. +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.skip_frames(time=2000) # Wait for settings take effect. sensor.set_auto_gain(False) sensor.set_auto_exposure(False, exposure_us=10000) -clock = time.clock() # Create a clock object to track the FPS. +clock = time.clock() # Create a clock object to track the FPS. -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - rois = img.selective_search(threshold = 200, size = 20, a1=0.5, a2=1.0, a3=1.0) +while True: + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + rois = img.selective_search(threshold=200, size=20, a1=0.5, a2=1.0, a3=1.0) for r in rois: img.draw_rectangle(r, color=(255, 0, 0)) - #img.draw_rectangle(r, color=(randint(100, 255), randint(100, 255), randint(100, 255))) + # from random import randint + # img.draw_rectangle(r, color=(randint(100, 255), randint(100, 255), randint(100, 255))) print(clock.fps()) diff --git a/scripts/examples/05-Feature-Detection/template_matching.py b/scripts/examples/05-Feature-Detection/template_matching.py index a18e80236..d6d81a4fd 100644 --- a/scripts/examples/05-Feature-Detection/template_matching.py +++ b/scripts/examples/05-Feature-Detection/template_matching.py @@ -11,7 +11,9 @@ import time import sensor import image -from image import SEARCH_EX, SEARCH_DS +from image import SEARCH_EX + +# from image import SEARCH_DS # Reset sensor sensor.reset() @@ -22,7 +24,7 @@ sensor.set_gainceiling(16) # Max resolution for template matching with SEARCH_EX is QQVGA sensor.set_framesize(sensor.QQVGA) # You can set windowing to reduce the search image. -#sensor.set_windowing(((640-80)//2, (480-60)//2, 80, 60)) +# sensor.set_windowing(((640-80)//2, (480-60)//2, 80, 60)) sensor.set_pixformat(sensor.GRAYSCALE) # Load template. @@ -32,7 +34,7 @@ template = image.Image("/template.pgm") clock = time.clock() # Run template matching -while (True): +while True: clock.tick() img = sensor.snapshot() @@ -43,7 +45,9 @@ while (True): # # Note1: ROI has to be smaller than the image and bigger than the template. # Note2: In diamond search, step and ROI are both ignored. - r = img.find_template(template, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) + r = img.find_template( + template, 0.70, step=4, search=SEARCH_EX + ) # , roi=(10, 0, 60, 60)) if r: img.draw_rectangle(r) diff --git a/scripts/examples/06-April-Tags/find_apriltags.py b/scripts/examples/06-April-Tags/find_apriltags.py index 8b3de069a..0f94b148d 100644 --- a/scripts/examples/06-April-Tags/find_apriltags.py +++ b/scripts/examples/06-April-Tags/find_apriltags.py @@ -10,8 +10,10 @@ import math sensor.reset() sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QQVGA) # we run out of memory if the resolution is much bigger... -sensor.skip_frames(time = 2000) +sensor.set_framesize( + sensor.QQVGA +) # we run out of memory if the resolution is much bigger... +sensor.skip_frames(time=2000) sensor.set_auto_gain(False) # must turn this off to prevent image washout... sensor.set_auto_whitebal(False) # must turn this off to prevent image washout... clock = time.clock() @@ -22,12 +24,12 @@ clock = time.clock() # Returned tag objects will have their tag family and id within the tag family. tag_families = 0 -tag_families |= image.TAG16H5 # comment out to disable this family -tag_families |= image.TAG25H7 # comment out to disable this family -tag_families |= image.TAG25H9 # comment out to disable this family -tag_families |= image.TAG36H10 # comment out to disable this family -tag_families |= image.TAG36H11 # comment out to disable this family (default family) -tag_families |= image.ARTOOLKIT # comment out to disable this family +tag_families |= image.TAG16H5 # comment out to disable this family +tag_families |= image.TAG25H7 # comment out to disable this family +tag_families |= image.TAG25H9 # comment out to disable this family +tag_families |= image.TAG36H10 # comment out to disable this family +tag_families |= image.TAG36H11 # comment out to disable this family (default family) +tag_families |= image.ARTOOLKIT # comment out to disable this family # What's the difference between tag families? Well, for example, the TAG16H5 family is effectively # a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which @@ -35,26 +37,30 @@ tag_families |= image.ARTOOLKIT # comment out to disable this family # rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a # reason to use the other tags families just use TAG36H11 which is the default family. + def family_name(tag): - if(tag.family() == image.TAG16H5): + if tag.family() == image.TAG16H5: return "TAG16H5" - if(tag.family() == image.TAG25H7): + if tag.family() == image.TAG25H7: return "TAG25H7" - if(tag.family() == image.TAG25H9): + if tag.family() == image.TAG25H9: return "TAG25H9" - if(tag.family() == image.TAG36H10): + if tag.family() == image.TAG36H10: return "TAG36H10" - if(tag.family() == image.TAG36H11): + if tag.family() == image.TAG36H11: return "TAG36H11" - if(tag.family() == image.ARTOOLKIT): + if tag.family() == image.ARTOOLKIT: return "ARTOOLKIT" -while(True): + +while True: clock.tick() img = sensor.snapshot() - for tag in img.find_apriltags(families=tag_families): # defaults to TAG36H11 without "families". - img.draw_rectangle(tag.rect(), color = (255, 0, 0)) - img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0)) + for tag in img.find_apriltags( + families=tag_families + ): # defaults to TAG36H11 without "families". + img.draw_rectangle(tag.rect(), color=(255, 0, 0)) + img.draw_cross(tag.cx(), tag.cy(), color=(0, 255, 0)) print_args = (family_name(tag), tag.id(), (180 * tag.rotation()) / math.pi) print("Tag Family %s, Tag ID %d, rotation %f (degrees)" % print_args) print(clock.fps()) diff --git a/scripts/examples/06-April-Tags/find_apriltags_3d_pose.py b/scripts/examples/06-April-Tags/find_apriltags_3d_pose.py index 64cbd6955..f791c1fd6 100644 --- a/scripts/examples/06-April-Tags/find_apriltags_3d_pose.py +++ b/scripts/examples/06-April-Tags/find_apriltags_3d_pose.py @@ -9,8 +9,10 @@ import math sensor.reset() sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.QQVGA) # we run out of memory if the resolution is much bigger... -sensor.skip_frames(time = 2000) +sensor.set_framesize( + sensor.QQVGA +) # we run out of memory if the resolution is much bigger... +sensor.skip_frames(time=2000) sensor.set_auto_gain(False) # must turn this off to prevent image washout... sensor.set_auto_whitebal(False) # must turn this off to prevent image washout... clock = time.clock() @@ -38,22 +40,32 @@ clock = time.clock() # c_x is the image x center position in pixels. # c_y is the image y center position in pixels. -f_x = (2.8 / 3.984) * 160 # find_apriltags defaults to this if not set -f_y = (2.8 / 2.952) * 120 # find_apriltags defaults to this if not set -c_x = 160 * 0.5 # find_apriltags defaults to this if not set (the image.w * 0.5) -c_y = 120 * 0.5 # find_apriltags defaults to this if not set (the image.h * 0.5) +f_x = (2.8 / 3.984) * 160 # find_apriltags defaults to this if not set +f_y = (2.8 / 2.952) * 120 # find_apriltags defaults to this if not set +c_x = 160 * 0.5 # find_apriltags defaults to this if not set (the image.w * 0.5) +c_y = 120 * 0.5 # find_apriltags defaults to this if not set (the image.h * 0.5) + def degrees(radians): return (180 * radians) / math.pi -while(True): + +while True: clock.tick() img = sensor.snapshot() - for tag in img.find_apriltags(fx=f_x, fy=f_y, cx=c_x, cy=c_y): # defaults to TAG36H11 - img.draw_rectangle(tag.rect(), color = (255, 0, 0)) - img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0)) - print_args = (tag.x_translation(), tag.y_translation(), tag.z_translation(), \ - degrees(tag.x_rotation()), degrees(tag.y_rotation()), degrees(tag.z_rotation())) + for tag in img.find_apriltags( + fx=f_x, fy=f_y, cx=c_x, cy=c_y + ): # defaults to TAG36H11 + img.draw_rectangle(tag.rect(), color=(255, 0, 0)) + img.draw_cross(tag.cx(), tag.cy(), color=(0, 255, 0)) + print_args = ( + tag.x_translation(), + tag.y_translation(), + tag.z_translation(), + degrees(tag.x_rotation()), + degrees(tag.y_rotation()), + degrees(tag.z_rotation()), + ) # Translation units are unknown. Rotation units are in degrees. print("Tx: %f, Ty %f, Tz %f, Rx %f, Ry %f, Rz %f" % print_args) print(clock.fps()) diff --git a/scripts/examples/06-April-Tags/find_apriltags_max_res.py b/scripts/examples/06-April-Tags/find_apriltags_max_res.py index b28b1889c..4e9456578 100644 --- a/scripts/examples/06-April-Tags/find_apriltags_max_res.py +++ b/scripts/examples/06-April-Tags/find_apriltags_max_res.py @@ -11,12 +11,17 @@ import omv sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.VGA) # we run out of memory if the resolution is much bigger... +sensor.set_framesize( + sensor.VGA +) # we run out of memory if the resolution is much bigger... # AprilTags works on a maximum of < 64K pixels. -if omv.board_type() == "H7": sensor.set_windowing((240, 240)) -elif omv.board_type() == "M7": sensor.set_windowing((200, 200)) -else: raise Exception("You need a more powerful OpenMV Cam to run this script") -sensor.skip_frames(time = 2000) +if omv.board_type() == "H7": + sensor.set_windowing((240, 240)) +elif omv.board_type() == "M7": + sensor.set_windowing((200, 200)) +else: + raise Exception("You need a more powerful OpenMV Cam to run this script") +sensor.skip_frames(time=2000) sensor.set_auto_gain(False) # must turn this off to prevent image washout... sensor.set_auto_whitebal(False) # must turn this off to prevent image washout... clock = time.clock() @@ -27,12 +32,12 @@ clock = time.clock() # Returned tag objects will have their tag family and id within the tag family. tag_families = 0 -tag_families |= image.TAG16H5 # comment out to disable this family -tag_families |= image.TAG25H7 # comment out to disable this family -tag_families |= image.TAG25H9 # comment out to disable this family -tag_families |= image.TAG36H10 # comment out to disable this family -tag_families |= image.TAG36H11 # comment out to disable this family (default family) -tag_families |= image.ARTOOLKIT # comment out to disable this family +tag_families |= image.TAG16H5 # comment out to disable this family +tag_families |= image.TAG25H7 # comment out to disable this family +tag_families |= image.TAG25H9 # comment out to disable this family +tag_families |= image.TAG36H10 # comment out to disable this family +tag_families |= image.TAG36H11 # comment out to disable this family (default family) +tag_families |= image.ARTOOLKIT # comment out to disable this family # What's the difference between tag families? Well, for example, the TAG16H5 family is effectively # a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which @@ -40,26 +45,30 @@ tag_families |= image.ARTOOLKIT # comment out to disable this family # rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a # reason to use the other tags families just use TAG36H11 which is the default family. + def family_name(tag): - if(tag.family() == image.TAG16H5): + if tag.family() == image.TAG16H5: return "TAG16H5" - if(tag.family() == image.TAG25H7): + if tag.family() == image.TAG25H7: return "TAG25H7" - if(tag.family() == image.TAG25H9): + if tag.family() == image.TAG25H9: return "TAG25H9" - if(tag.family() == image.TAG36H10): + if tag.family() == image.TAG36H10: return "TAG36H10" - if(tag.family() == image.TAG36H11): + if tag.family() == image.TAG36H11: return "TAG36H11" - if(tag.family() == image.ARTOOLKIT): + if tag.family() == image.ARTOOLKIT: return "ARTOOLKIT" -while(True): + +while True: clock.tick() img = sensor.snapshot() - for tag in img.find_apriltags(families=tag_families): # defaults to TAG36H11 without "families". - img.draw_rectangle(tag.rect(), color = 127) - img.draw_cross(tag.cx(), tag.cy(), color = 127) + for tag in img.find_apriltags( + families=tag_families + ): # defaults to TAG36H11 without "families". + img.draw_rectangle(tag.rect(), color=127) + img.draw_cross(tag.cx(), tag.cy(), color=127) print_args = (family_name(tag), tag.id(), (180 * tag.rotation()) / math.pi) print("Tag Family %s, Tag ID %d, rotation %f (degrees)" % print_args) print(clock.fps()) diff --git a/scripts/examples/06-April-Tags/find_apriltags_w_lens_zoom.py b/scripts/examples/06-April-Tags/find_apriltags_w_lens_zoom.py index b1dfbd0c9..f41a8fe0f 100644 --- a/scripts/examples/06-April-Tags/find_apriltags_w_lens_zoom.py +++ b/scripts/examples/06-April-Tags/find_apriltags_w_lens_zoom.py @@ -9,9 +9,11 @@ import math sensor.reset() sensor.set_pixformat(sensor.RGB565) -sensor.set_framesize(sensor.VGA) # we run out of memory if the resolution is much bigger... -sensor.set_windowing((160, 120)) # Look at center 160x120 pixels of the VGA resolution. -sensor.skip_frames(time = 2000) +sensor.set_framesize( + sensor.VGA +) # we run out of memory if the resolution is much bigger... +sensor.set_windowing((160, 120)) # Look at center 160x120 pixels of the VGA resolution. +sensor.skip_frames(time=2000) sensor.set_auto_gain(False) # must turn this off to prevent image washout... sensor.set_auto_whitebal(False) # must turn this off to prevent image washout... clock = time.clock() @@ -24,12 +26,12 @@ clock = time.clock() # rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a # reason to use the other tags families just use TAG36H11 which is the default family. -while(True): +while True: clock.tick() img = sensor.snapshot() - for tag in img.find_apriltags(): # defaults to TAG36H11 - img.draw_rectangle(tag.rect(), color = (255, 0, 0)) - img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0)) + for tag in img.find_apriltags(): # defaults to TAG36H11 + img.draw_rectangle(tag.rect(), color=(255, 0, 0)) + img.draw_cross(tag.cx(), tag.cy(), color=(0, 255, 0)) print_args = (tag.id(), (180 * tag.rotation()) / math.pi) print("Tag Family TAG36H11, Tag ID %d, rotation %f (degrees)" % print_args) print(clock.fps()) diff --git a/scripts/examples/06-April-Tags/find_small_apriltags.py b/scripts/examples/06-April-Tags/find_small_apriltags.py index b2709c170..fd73737f4 100644 --- a/scripts/examples/06-April-Tags/find_small_apriltags.py +++ b/scripts/examples/06-April-Tags/find_small_apriltags.py @@ -19,25 +19,28 @@ thresholds = (150, 255) sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) -if omv.board_type() == "H7": sensor.set_framesize(sensor.VGA) -elif omv.board_type() == "M7": sensor.set_framesize(sensor.QVGA) -else: raise Exception("You need a more powerful OpenMV Cam to run this script") -sensor.skip_frames(time = 200) # increase this to let the auto methods run for longer -sensor.set_auto_gain(False) # must be turned off for color tracking -sensor.set_auto_whitebal(False) # must be turned off for color tracking +if omv.board_type() == "H7": + sensor.set_framesize(sensor.VGA) +elif omv.board_type() == "M7": + sensor.set_framesize(sensor.QVGA) +else: + raise Exception("You need a more powerful OpenMV Cam to run this script") +sensor.skip_frames(time=200) # increase this to let the auto methods run for longer +sensor.set_auto_gain(False) # must be turned off for color tracking +sensor.set_auto_whitebal(False) # must be turned off for color tracking clock = time.clock() # The apriltag code supports up to 6 tag families which can be processed at the same time. # Returned tag objects will have their tag family and id within the tag family. tag_families = 0 -tag_families |= image.TAG16H5 # comment out to disable this family -tag_families |= image.TAG25H7 # comment out to disable this family -tag_families |= image.TAG25H9 # comment out to disable this family -tag_families |= image.TAG36H10 # comment out to disable this family -tag_families |= image.TAG36H11 # comment out to disable this family (default family) -tag_families |= image.ARTOOLKIT # comment out to disable this family +tag_families |= image.TAG16H5 # comment out to disable this family +tag_families |= image.TAG25H7 # comment out to disable this family +tag_families |= image.TAG25H9 # comment out to disable this family +tag_families |= image.TAG36H10 # comment out to disable this family +tag_families |= image.TAG36H11 # comment out to disable this family (default family) +tag_families |= image.ARTOOLKIT # comment out to disable this family -while(True): +while True: clock.tick() img = sensor.snapshot() @@ -47,20 +50,24 @@ while(True): # AprilTags may fail due to not having enough ram given the image sie being passed. tag_list = [] - for blob in img.find_blobs([thresholds], pixels_threshold=100, area_threshold=100, merge=True): + for blob in img.find_blobs( + [thresholds], pixels_threshold=100, area_threshold=100, merge=True + ): # Next we look for a tag in an ROI that's bigger than the blob. - w = min(max(int(blob.w() * 1.2), 10), 160) # Not too small, not too big. - h = min(max(int(blob.h() * 1.2), 10), 160) # Not too small, not too big. - x = min(max(int(blob.x() + (blob.w()/4) - (w * 0.1)), 0), img.width()-1) - y = min(max(int(blob.y() + (blob.h()/4) - (h * 0.1)), 0), img.height()-1) + w = min(max(int(blob.w() * 1.2), 10), 160) # Not too small, not too big. + h = min(max(int(blob.h() * 1.2), 10), 160) # Not too small, not too big. + x = min(max(int(blob.x() + (blob.w() / 4) - (w * 0.1)), 0), img.width() - 1) + y = min(max(int(blob.y() + (blob.h() / 4) - (h * 0.1)), 0), img.height() - 1) - box_list.append((x, y, w, h)) # We'll draw these later. + box_list.append((x, y, w, h)) # We'll draw these later. # Since we constrict the roi size apriltags shouldn't run out of ram. # But, if it does we handle it... try: - tag_list.extend(img.find_apriltags(roi=(x,y,w,h), families=tag_families)) - except (MemoryError): # Don't catch all exceptions otherwise you can't stop the script. + tag_list.extend(img.find_apriltags(roi=(x, y, w, h), families=tag_families)) + except ( + MemoryError + ): # Don't catch all exceptions otherwise you can't stop the script. pass for b in box_list: diff --git a/scripts/examples/07-Interface-Library/00-Arduino/arduino_i2c_slave.py b/scripts/examples/07-Interface-Library/00-Arduino/arduino_i2c_slave.py index fd633e7d3..732743a2f 100644 --- a/scripts/examples/07-Interface-Library/00-Arduino/arduino_i2c_slave.py +++ b/scripts/examples/07-Interface-Library/00-Arduino/arduino_i2c_slave.py @@ -27,7 +27,7 @@ data = ustruct.pack("<%ds" % len(text), text) # The hardware I2C bus for your OpenMV Cam is always I2C bus 2. bus = pyb.I2C(2, pyb.I2C.SLAVE, addr=0x12) -bus.deinit() # Fully reset I2C device... +bus.deinit() # Fully reset I2C device... bus = pyb.I2C(2, pyb.I2C.SLAVE, addr=0x12) print("Waiting for Arduino...") @@ -35,18 +35,20 @@ print("Waiting for Arduino...") # Arduino starts to poll the OpenMV Cam for data. Otherwise the I2C byte framing gets messed up, # and etc. So, keep the Arduino in reset until the OpenMV Cam is "Waiting for Arduino...". -while(True): +while True: try: - bus.send(ustruct.pack(" sample data on rising clock edge, output data on falling clock edge. spi = pyb.SPI(2, pyb.SPI.SLAVE, polarity=0, phase=0) + # NSS callback. def nss_callback(line): global spi, data try: spi.send(data, timeout=1000) except OSError as err: - pass # Don't care about errors - so pass. + pass # Don't care about errors - so pass. # Note that there are 3 possible errors. A timeout error, a general purpose error, or # a busy error. The error codes are 116, 5, 16 respectively for "err.arg[0]". + # Configure NSS/CS in IRQ mode to send data when requested by the master. pyb.ExtInt(pyb.Pin("P3"), pyb.ExtInt.IRQ_FALLING, pyb.Pin.PULL_UP, nss_callback) -while(True): +while True: time.sleep_ms(1000) ################################################################################################### @@ -64,7 +66,7 @@ while(True): # #define SS_PIN 10 # #define BAUD_RATE 19200 # #define CHAR_BUF 128 -# +# # void setup() { # pinMode(SS_PIN, OUTPUT); # Serial.begin(BAUD_RATE); @@ -74,13 +76,13 @@ while(True): # SPI.setDataMode(SPI_MODE0); # delay(1000); // Give the OpenMV Cam time to bootup. # } -# +# # void loop() { # int32_t len = 0; # char buff[CHAR_BUF] = {0}; # digitalWrite(SS_PIN, LOW); # delay(1); // Give the OpenMV Cam some time to setup to send data. -# +# # if(SPI.transfer(1) == 85) { // saw sync char? # SPI.transfer(&len, 4); // get length # if (len) { @@ -89,7 +91,7 @@ while(True): # } # while (len--) SPI.transfer(0); // eat any remaining bytes # } -# +# # digitalWrite(SS_PIN, HIGH); # Serial.print(buff); # delay(1); // Don't loop to quickly. diff --git a/scripts/examples/07-Interface-Library/00-Arduino/arduino_uart.py b/scripts/examples/07-Interface-Library/00-Arduino/arduino_uart.py index 1269c8739..60ba4163c 100644 --- a/scripts/examples/07-Interface-Library/00-Arduino/arduino_uart.py +++ b/scripts/examples/07-Interface-Library/00-Arduino/arduino_uart.py @@ -12,7 +12,7 @@ # // put your setup code here, to run once: # Serial.begin(19200); # } -# +# # void loop() { # // put your main code here, to run repeatedly: # if (Serial.available()) { @@ -31,8 +31,8 @@ from pyb import UART # UART 3, and baudrate. uart = UART(3, 19200) -while(True): +while True: uart.write("Hello World!\n") - if (uart.any()): + if uart.any(): print(uart.read()) time.sleep_ms(1000) diff --git a/scripts/examples/07-Interface-Library/01-Pixy-Emulation/apriltags_pixy_i2c_emulation.py b/scripts/examples/07-Interface-Library/01-Pixy-Emulation/apriltags_pixy_i2c_emulation.py index c39b60550..89e298f6e 100644 --- a/scripts/examples/07-Interface-Library/01-Pixy-Emulation/apriltags_pixy_i2c_emulation.py +++ b/scripts/examples/07-Interface-Library/01-Pixy-Emulation/apriltags_pixy_i2c_emulation.py @@ -10,48 +10,43 @@ # # P7 = Servo 1 # P8 = Servo 2 - +# # Note: The tag family is TAG36H11. Additionally, in order to for the # signature value of a tag detection to be compatible with pixy # interface libraries all tag ids have 8 added to them in order # to move them in the color code signature range. Finally, tags # are all reported as color code blocks... -# Pixy Parameters ############################################################ - -max_blocks = 1000 -max_blocks_per_id = 1000 - -i2c_address = 0x54 - -# Pan Servo -s0_lower_limit = 1000 # Servo pulse width lower limit in microseconds. -s0_upper_limit = 2000 # Servo pulse width upper limit in microseconds. - -# Tilt Servo -s1_lower_limit = 1000 # Servo pulse width lower limit in microseconds. -s1_upper_limit = 2000 # Servo pulse width upper limit in microseconds. - -analog_out_enable = False # P6 -> Analog Out (0v - 3.3v). -analog_out_mode = 0 # 0 == x position of largest tag - 1 == y position of largest tag - -############################################################################## - import math import pyb import sensor import struct import time -# Camera Setup +# Pixy Parameters ############################################################ +max_blocks = 1000 +max_blocks_per_id = 1000 +i2c_address = 0x54 + +# Pan Servo +s0_lower_limit = 1000 # Servo pulse width lower limit in microseconds. +s0_upper_limit = 2000 # Servo pulse width upper limit in microseconds. + +# Tilt Servo +s1_lower_limit = 1000 # Servo pulse width lower limit in microseconds. +s1_upper_limit = 2000 # Servo pulse width upper limit in microseconds. + +analog_out_enable = False # P6 -> Analog Out (0v - 3.3v). +analog_out_mode = 0 # 0 == x position of largest tag - 1 == y position of largest tag + +# Camera Setup sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) # LED Setup - red_led = pyb.LED(1) green_led = pyb.LED(2) blue_led = pyb.LED(3) @@ -61,80 +56,89 @@ green_led.off() blue_led.off() # DAC Setup - dac = pyb.DAC("P6") if analog_out_enable else None if dac: dac.write(0) # Servo Setup - min_s0_limit = min(s0_lower_limit, s0_upper_limit) max_s0_limit = max(s0_lower_limit, s0_upper_limit) min_s1_limit = min(s1_lower_limit, s1_upper_limit) max_s1_limit = max(s1_lower_limit, s1_upper_limit) -s0_pan = pyb.Servo(1) # P7 -s1_tilt = pyb.Servo(2) # P8 +s0_pan = pyb.Servo(1) # P7 +s1_tilt = pyb.Servo(2) # P8 -s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center -s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center +s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center +s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center s0_pan_conversion_factor = (max_s0_limit - min_s0_limit) / 1000 s1_tilt_conversion_factor = (max_s1_limit - min_s1_limit) / 1000 + def s0_pan_position(value): - s0_pan.pulse_width(round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor))) + s0_pan.pulse_width( + round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor)) + ) + def s1_tilt_position(value): - s1_tilt.pulse_width(round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor))) + s1_tilt.pulse_width( + round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor)) + ) + # Link Setup +bus = pyb.I2C(2, pyb.I2C.SLAVE, addr=i2c_address) -bus = pyb.I2C(2, pyb.I2C.SLAVE, addr = i2c_address) def write(data): # Prepare the data to transmit first so we can do it quickly. out_data = [] for i in range(0, len(data), 2): - out_data.append(data[i:i+2]) + out_data.append(data[i : i + 2]) # Disable interrupts so we can send all packets without gaps. state = pyb.disable_irq() for i in range(len(out_data)): max_exceptions = 10 loop = True - while(loop): + while loop: try: - bus.send(out_data[i], timeout = 1) + bus.send(out_data[i], timeout=1) loop = False except OSError as error: - if(max_exceptions <= 0): + if max_exceptions <= 0: pyb.enable_irq(state) return max_exceptions -= 1 pyb.enable_irq(state) + def available(): - return 0 # Not implemented as there is no way for the us to be ready to receive the data. + return 0 # Not implemented as there is no way for the us to be ready to receive the data. + def read_byte(): - return 0 # Not implemented as there is no way for the us to be ready to receive the data. + return 0 # Not implemented as there is no way for the us to be ready to receive the data. -# Helper Stuff def checksum(data): checksum = 0 for i in range(0, len(data), 2): - checksum += ((data[i+1] & 0xFF) << 8) | ((data[i+0] & 0xFF) << 0) + checksum += ((data[i + 1] & 0xFF) << 8) | ((data[i + 0] & 0xFF) << 0) return checksum & 0xFFFF + def to_object_block_format(tag): angle = int((tag.rotation() * 180) // math.pi) - temp = struct.pack(" 0) and (max_blocks_per_id > 0): # new frame + if tags and (max_blocks > 0) and (max_blocks_per_id > 0): # new frame dat_buf = struct.pack(" Analog Out (0v - 3.3v). -analog_out_mode = 0 # 0 == x position of largest tag - 1 == y position of largest tag - -############################################################################## - import math import pyb import sensor import struct import time -# Camera Setup +# Pixy Parameters ############################################################ +max_blocks = 1000 +max_blocks_per_id = 1000 +# Pan Servo +s0_lower_limit = 1000 # Servo pulse width lower limit in microseconds. +s0_upper_limit = 2000 # Servo pulse width upper limit in microseconds. + +# Tilt Servo +s1_lower_limit = 1000 # Servo pulse width lower limit in microseconds. +s1_upper_limit = 2000 # Servo pulse width upper limit in microseconds. + +analog_out_enable = False # P6 -> Analog Out (0v - 3.3v). +analog_out_mode = 0 # 0 == x position of largest tag - 1 == y position of largest tag + + +# Camera Setup sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) # LED Setup - red_led = pyb.LED(1) green_led = pyb.LED(2) blue_led = pyb.LED(3) @@ -67,82 +63,90 @@ green_led.off() blue_led.off() # DAC Setup - dac = pyb.DAC("P6") if analog_out_enable else None if dac: dac.write(0) # Servo Setup - min_s0_limit = min(s0_lower_limit, s0_upper_limit) max_s0_limit = max(s0_lower_limit, s0_upper_limit) min_s1_limit = min(s1_lower_limit, s1_upper_limit) max_s1_limit = max(s1_lower_limit, s1_upper_limit) -s0_pan = pyb.Servo(1) # P7 -s1_tilt = pyb.Servo(2) # P8 +s0_pan = pyb.Servo(1) # P7 +s1_tilt = pyb.Servo(2) # P8 -s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center -s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center +s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center +s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center s0_pan_conversion_factor = (max_s0_limit - min_s0_limit) / 1000 s1_tilt_conversion_factor = (max_s1_limit - min_s1_limit) / 1000 + def s0_pan_position(value): - s0_pan.pulse_width(round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor))) + s0_pan.pulse_width( + round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor)) + ) + def s1_tilt_position(value): - s1_tilt.pulse_width(round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor))) + s1_tilt.pulse_width( + round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor)) + ) + # Link Setup - -bus = pyb.SPI(2, pyb.SPI.SLAVE, polarity = 0, phase = 0, bits = 16) -while(True): +bus = pyb.SPI(2, pyb.SPI.SLAVE, polarity=0, phase=0, bits=16) +while True: try: - sync_bytes = bus.recv(2, timeout = 10) - if((sync_bytes[0] == 0x00) and (sync_bytes[1] == 0x5A)): + sync_bytes = bus.recv(2, timeout=10) + if (sync_bytes[0] == 0x00) and (sync_bytes[1] == 0x5A): break except OSError as error: pass bus.deinit() - bus.init(pyb.SPI.SLAVE, polarity = 0, phase = 0, bits = 16) + bus.init(pyb.SPI.SLAVE, polarity=0, phase=0, bits=16) + def write(data): - max_exceptions = 10 loop = True - while(loop): + while loop: try: - bus.send(data, timeout = 10) + bus.send(data, timeout=10) loop = False except OSError as error: - if(max_exceptions <= 0): + if max_exceptions <= 0: return max_exceptions -= 1 + def available(): - return 0 # Not implemented as there is no way for the us to be ready to receive the data. + return 0 # Not implemented as there is no way for the us to be ready to receive the data. + def read_byte(): - return 0 # Not implemented as there is no way for the us to be ready to receive the data. + return 0 # Not implemented as there is no way for the us to be ready to receive the data. -# Helper Stuff def checksum(data): checksum = 0 for i in range(0, len(data), 2): - checksum += ((data[i+1] & 0xFF) << 8) | ((data[i+0] & 0xFF) << 0) + checksum += ((data[i + 1] & 0xFF) << 8) | ((data[i + 0] & 0xFF) << 0) return checksum & 0xFFFF + def to_object_block_format(tag): angle = int((tag.rotation() * 180) // math.pi) - temp = struct.pack(" 0) and (max_blocks_per_id > 0): # new frame + if tags and (max_blocks > 0) and (max_blocks_per_id > 0): # new frame dat_buf = struct.pack(" Analog Out (0v - 3.3v). -analog_out_mode = 0 # 0 == x position of largest tag - 1 == y position of largest tag - -############################################################################## - import math import pyb import sensor import struct import time -# Camera Setup +# Pixy Parameters ############################################################ +max_blocks = 1000 +max_blocks_per_id = 1000 +uart_baudrate = 19200 + +# Pan Servo +s0_lower_limit = 1000 # Servo pulse width lower limit in microseconds. +s0_upper_limit = 2000 # Servo pulse width upper limit in microseconds. + +# Tilt Servo +s1_lower_limit = 1000 # Servo pulse width lower limit in microseconds. +s1_upper_limit = 2000 # Servo pulse width upper limit in microseconds. + +analog_out_enable = False # P6 -> Analog Out (0v - 3.3v). +analog_out_mode = 0 # 0 == x position of largest tag - 1 == y position of largest tag + +# Camera Setup sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) # LED Setup - red_led = pyb.LED(1) green_led = pyb.LED(2) blue_led = pyb.LED(3) @@ -61,62 +56,71 @@ green_led.off() blue_led.off() # DAC Setup - dac = pyb.DAC("P6") if analog_out_enable else None if dac: dac.write(0) # Servo Setup - min_s0_limit = min(s0_lower_limit, s0_upper_limit) max_s0_limit = max(s0_lower_limit, s0_upper_limit) min_s1_limit = min(s1_lower_limit, s1_upper_limit) max_s1_limit = max(s1_lower_limit, s1_upper_limit) -s0_pan = pyb.Servo(1) # P7 -s1_tilt = pyb.Servo(2) # P8 +s0_pan = pyb.Servo(1) # P7 +s1_tilt = pyb.Servo(2) # P8 -s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center -s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center +s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center +s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center s0_pan_conversion_factor = (max_s0_limit - min_s0_limit) / 1000 s1_tilt_conversion_factor = (max_s1_limit - min_s1_limit) / 1000 + def s0_pan_position(value): - s0_pan.pulse_width(round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor))) + s0_pan.pulse_width( + round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor)) + ) + def s1_tilt_position(value): - s1_tilt.pulse_width(round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor))) + s1_tilt.pulse_width( + round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor)) + ) + # Link Setup +uart = pyb.UART(3, uart_baudrate, timeout_char=1000) -uart = pyb.UART(3, uart_baudrate, timeout_char = 1000) def write(data): uart.write(data) + def available(): return uart.any() + def read_byte(): return uart.readchar() -# Helper Stuff def checksum(data): checksum = 0 for i in range(0, len(data), 2): - checksum += ((data[i+1] & 0xFF) << 8) | ((data[i+0] & 0xFF) << 0) + checksum += ((data[i + 1] & 0xFF) << 8) | ((data[i + 0] & 0xFF) << 0) return checksum & 0xFFFF + def to_object_block_format(tag): angle = int((tag.rotation() * 180) // math.pi) - temp = struct.pack(" 0) and (max_blocks_per_id > 0): # new frame + # Transmit Tags + if tags and (max_blocks > 0) and (max_blocks_per_id > 0): # new frame dat_buf = struct.pack(" Analog Out (0v - 3.3v). -analog_out_mode = 0 # 0 == x position of largest blob - 1 == y position of largest blob +analog_out_enable = False # P6 -> Analog Out (0v - 3.3v). +analog_out_mode = 0 # 0 == x position of largest blob - 1 == y position of largest blob # Parameter 0 - L Min. # Parameter 1 - L Max. @@ -38,45 +44,39 @@ analog_out_mode = 0 # 0 == x position of largest blob - 1 == y position of large # Parameter 5 - B Max. # Parameter 6 - Is Color Code Threshold? (True/False). # Parameter 7 - Enable Threshold? (True/False). -lab_color_thresholds = [(0, 100, 40, 127, -128, 127, True, True), # Generic Red Threshold - (0, 100, -128, -10, -128, 127, True, True), # Generic Green Threshold - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False)] +lab_color_thresholds = [ + (0, 100, 40, 127, -128, 127, True, True), # Generic Red Threshold + (0, 100, -128, -10, -128, 127, True, True), # Generic Green Threshold + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False), +] -fb_pixels_threshold = 500 # minimum number of pixels that must be in a blob -fb_merge_margin = 5 # how close pixel wise blobs can be before merging +fb_pixels_threshold = 500 # minimum number of pixels that must be in a blob +fb_merge_margin = 5 # how close pixel wise blobs can be before merging ############################################################################## -e_lab_color_thresholds = [] # enabled thresholds -e_lab_color_code = [] # enabled color code -e_lab_color_signatures = [] # original enabled threshold indexes +e_lab_color_thresholds = [] # enabled thresholds +e_lab_color_code = [] # enabled color code +e_lab_color_signatures = [] # original enabled threshold indexes for i in range(len(lab_color_thresholds)): if lab_color_thresholds[i][7]: e_lab_color_thresholds.append(lab_color_thresholds[i][0:6]) e_lab_color_code.append(lab_color_thresholds[i][6]) e_lab_color_signatures.append(i + 1) -import math -import pyb -import sensor -import struct -import time - # Camera Setup - sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) sensor.set_auto_gain(False) sensor.set_auto_whitebal(False) # LED Setup - red_led = pyb.LED(1) green_led = pyb.LED(2) blue_led = pyb.LED(3) @@ -86,83 +86,99 @@ green_led.off() blue_led.off() # DAC Setup - dac = pyb.DAC("P6") if analog_out_enable else None if dac: dac.write(0) # Servo Setup - min_s0_limit = min(s0_lower_limit, s0_upper_limit) max_s0_limit = max(s0_lower_limit, s0_upper_limit) min_s1_limit = min(s1_lower_limit, s1_upper_limit) max_s1_limit = max(s1_lower_limit, s1_upper_limit) -s0_pan = pyb.Servo(1) # P7 -s1_tilt = pyb.Servo(2) # P8 +s0_pan = pyb.Servo(1) # P7 +s1_tilt = pyb.Servo(2) # P8 -s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center -s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center +s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center +s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center s0_pan_conversion_factor = (max_s0_limit - min_s0_limit) / 1000 s1_tilt_conversion_factor = (max_s1_limit - min_s1_limit) / 1000 + def s0_pan_position(value): - s0_pan.pulse_width(round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor))) + s0_pan.pulse_width( + round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor)) + ) + def s1_tilt_position(value): - s1_tilt.pulse_width(round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor))) + s1_tilt.pulse_width( + round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor)) + ) + # Link Setup +bus = pyb.I2C(2, pyb.I2C.SLAVE, addr=i2c_address) -bus = pyb.I2C(2, pyb.I2C.SLAVE, addr = i2c_address) def write(data): # Prepare the data to transmit first so we can do it quickly. out_data = [] for i in range(0, len(data), 2): - out_data.append(data[i:i+2]) + out_data.append(data[i : i + 2]) # Disable interrupts so we can send all packets without gaps. state = pyb.disable_irq() for i in range(len(out_data)): max_exceptions = 10 loop = True - while(loop): + while loop: try: - bus.send(out_data[i], timeout = 1) + bus.send(out_data[i], timeout=1) loop = False except OSError as error: - if(max_exceptions <= 0): + if max_exceptions <= 0: pyb.enable_irq(state) return max_exceptions -= 1 pyb.enable_irq(state) + def available(): - return 0 # Not implemented as there is no way for the us to be ready to receive the data. + return 0 # Not implemented as there is no way for the us to be ready to receive the data. + def read_byte(): - return 0 # Not implemented as there is no way for the us to be ready to receive the data. + return 0 # Not implemented as there is no way for the us to be ready to receive the data. -# Helper Stuff def checksum(data): checksum = 0 for i in range(0, len(data), 2): - checksum += ((data[i+1] & 0xFF) << 8) | ((data[i+0] & 0xFF) << 0) + checksum += ((data[i + 1] & 0xFF) << 8) | ((data[i + 0] & 0xFF) << 0) return checksum & 0xFFFF + def get_normal_signature(code): for i in range(len(e_lab_color_signatures)): if code & (1 << i): return e_lab_color_signatures[i] return 0 + def to_normal_object_block_format(blob): - temp = struct.pack(" 1) or (not color_code(blob.code())) - elif(pri_color_code_mode == 2): # only color codes with two or more colors - return (bits_set(blob.code()) > 1) - elif(pri_color_code_mode == 3): + elif pri_color_code_mode == 2: # only color codes with two or more colors + return bits_set(blob.code()) > 1 + elif pri_color_code_mode == 3: return True + clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() - blobs = list(filter(blob_filter, img.find_blobs(e_lab_color_thresholds, area_threshold = min_block_area, pixels_threshold = fb_pixels_threshold, merge = True, margin = fb_merge_margin, merge_cb = fb_merge_cb))) + blobs = list( + filter( + blob_filter, + img.find_blobs( + e_lab_color_thresholds, + area_threshold=min_block_area, + pixels_threshold=fb_pixels_threshold, + merge=True, + margin=fb_merge_margin, + merge_cb=fb_merge_cb, + ), + ) + ) - # Transmit Blobs # - - if blobs and (max_blocks > 0) and (max_blocks_per_signature > 0): # new frame + # Transmit Blobs + if blobs and (max_blocks > 0) and (max_blocks_per_signature > 0): # new frame dat_buf = struct.pack(" Analog Out (0v - 3.3v). -analog_out_mode = 0 # 0 == x position of largest blob - 1 == y position of largest blob +analog_out_enable = False # P6 -> Analog Out (0v - 3.3v). +analog_out_mode = 0 # 0 == x position of largest blob - 1 == y position of largest blob # Parameter 0 - L Min. # Parameter 1 - L Max. @@ -44,45 +49,39 @@ analog_out_mode = 0 # 0 == x position of largest blob - 1 == y position of large # Parameter 5 - B Max. # Parameter 6 - Is Color Code Threshold? (True/False). # Parameter 7 - Enable Threshold? (True/False). -lab_color_thresholds = [(0, 100, 40, 127, -128, 127, True, True), # Generic Red Threshold - (0, 100, -128, -10, -128, 127, True, True), # Generic Green Threshold - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False)] +lab_color_thresholds = [ + (0, 100, 40, 127, -128, 127, True, True), # Generic Red Threshold + (0, 100, -128, -10, -128, 127, True, True), # Generic Green Threshold + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False), +] -fb_pixels_threshold = 500 # minimum number of pixels that must be in a blob -fb_merge_margin = 5 # how close pixel wise blobs can be before merging +fb_pixels_threshold = 500 # minimum number of pixels that must be in a blob +fb_merge_margin = 5 # how close pixel wise blobs can be before merging ############################################################################## -e_lab_color_thresholds = [] # enabled thresholds -e_lab_color_code = [] # enabled color code -e_lab_color_signatures = [] # original enabled threshold indexes +e_lab_color_thresholds = [] # enabled thresholds +e_lab_color_code = [] # enabled color code +e_lab_color_signatures = [] # original enabled threshold indexes for i in range(len(lab_color_thresholds)): if lab_color_thresholds[i][7]: e_lab_color_thresholds.append(lab_color_thresholds[i][0:6]) e_lab_color_code.append(lab_color_thresholds[i][6]) e_lab_color_signatures.append(i + 1) -import math -import pyb -import sensor -import struct -import time - # Camera Setup - sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) sensor.set_auto_gain(False) sensor.set_auto_whitebal(False) # LED Setup - red_led = pyb.LED(1) green_led = pyb.LED(2) blue_led = pyb.LED(3) @@ -92,85 +91,100 @@ green_led.off() blue_led.off() # DAC Setup - dac = pyb.DAC("P6") if analog_out_enable else None if dac: dac.write(0) # Servo Setup - min_s0_limit = min(s0_lower_limit, s0_upper_limit) max_s0_limit = max(s0_lower_limit, s0_upper_limit) min_s1_limit = min(s1_lower_limit, s1_upper_limit) max_s1_limit = max(s1_lower_limit, s1_upper_limit) -s0_pan = pyb.Servo(1) # P7 -s1_tilt = pyb.Servo(2) # P8 +s0_pan = pyb.Servo(1) # P7 +s1_tilt = pyb.Servo(2) # P8 -s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center -s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center +s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center +s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center s0_pan_conversion_factor = (max_s0_limit - min_s0_limit) / 1000 s1_tilt_conversion_factor = (max_s1_limit - min_s1_limit) / 1000 + def s0_pan_position(value): - s0_pan.pulse_width(round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor))) + s0_pan.pulse_width( + round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor)) + ) + def s1_tilt_position(value): - s1_tilt.pulse_width(round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor))) + s1_tilt.pulse_width( + round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor)) + ) + # Link Setup - -bus = pyb.SPI(2, pyb.SPI.SLAVE, polarity = 0, phase = 0, bits = 16) -while(True): +bus = pyb.SPI(2, pyb.SPI.SLAVE, polarity=0, phase=0, bits=16) +while True: try: - sync_bytes = bus.recv(2, timeout = 10) - if((sync_bytes[0] == 0x00) and (sync_bytes[1] == 0x5A)): + sync_bytes = bus.recv(2, timeout=10) + if (sync_bytes[0] == 0x00) and (sync_bytes[1] == 0x5A): break except OSError as error: pass bus.deinit() - bus.init(pyb.SPI.SLAVE, polarity = 0, phase = 0, bits = 16) + bus.init(pyb.SPI.SLAVE, polarity=0, phase=0, bits=16) + def write(data): - max_exceptions = 10 loop = True - while(loop): + while loop: try: - bus.send(data, timeout = 10) + bus.send(data, timeout=10) loop = False except OSError as error: - if(max_exceptions <= 0): + if max_exceptions <= 0: return max_exceptions -= 1 + def available(): - return 0 # Not implemented as there is no way for the us to be ready to receive the data. + return 0 # Not implemented as there is no way for the us to be ready to receive the data. + def read_byte(): - return 0 # Not implemented as there is no way for the us to be ready to receive the data. + return 0 # Not implemented as there is no way for the us to be ready to receive the data. -# Helper Stuff def checksum(data): checksum = 0 for i in range(0, len(data), 2): - checksum += ((data[i+1] & 0xFF) << 8) | ((data[i+0] & 0xFF) << 0) + checksum += ((data[i + 1] & 0xFF) << 8) | ((data[i + 0] & 0xFF) << 0) return checksum & 0xFFFF + def get_normal_signature(code): for i in range(len(e_lab_color_signatures)): if code & (1 << i): return e_lab_color_signatures[i] return 0 + def to_normal_object_block_format(blob): - temp = struct.pack(" 1) or (not color_code(blob.code())) - elif(pri_color_code_mode == 2): # only color codes with two or more colors - return (bits_set(blob.code()) > 1) - elif(pri_color_code_mode == 3): + elif pri_color_code_mode == 2: # only color codes with two or more colors + return bits_set(blob.code()) > 1 + elif pri_color_code_mode == 3: return True + clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() - blobs = list(filter(blob_filter, img.find_blobs(e_lab_color_thresholds, area_threshold = min_block_area, pixels_threshold = fb_pixels_threshold, merge = True, margin = fb_merge_margin, merge_cb = fb_merge_cb))) + blobs = list( + filter( + blob_filter, + img.find_blobs( + e_lab_color_thresholds, + area_threshold=min_block_area, + pixels_threshold=fb_pixels_threshold, + merge=True, + margin=fb_merge_margin, + merge_cb=fb_merge_cb, + ), + ) + ) - # Transmit Blobs # - - if blobs and (max_blocks > 0) and (max_blocks_per_signature > 0): # new frame + # Transmit Blobs + if blobs and (max_blocks > 0) and (max_blocks_per_signature > 0): # new frame dat_buf = struct.pack(" Analog Out (0v - 3.3v). -analog_out_mode = 0 # 0 == x position of largest blob - 1 == y position of largest blob +analog_out_enable = False # P6 -> Analog Out (0v - 3.3v). +analog_out_mode = 0 # 0 == x position of largest blob - 1 == y position of largest blob # Parameter 0 - L Min. # Parameter 1 - L Max. @@ -38,40 +44,35 @@ analog_out_mode = 0 # 0 == x position of largest blob - 1 == y position of large # Parameter 5 - B Max. # Parameter 6 - Is Color Code Threshold? (True/False). # Parameter 7 - Enable Threshold? (True/False). -lab_color_thresholds = [(0, 100, 40, 127, -128, 127, True, True), # Generic Red Threshold - (0, 100, -128, -10, -128, 127, True, True), # Generic Green Threshold - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False), - (0, 0, 0, 0, 0, 0, False, False)] +lab_color_thresholds = [ + (0, 100, 40, 127, -128, 127, True, True), # Generic Red Threshold + (0, 100, -128, -10, -128, 127, True, True), # Generic Green Threshold + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False), + (0, 0, 0, 0, 0, 0, False, False), +] -fb_pixels_threshold = 500 # minimum number of pixels that must be in a blob -fb_merge_margin = 5 # how close pixel wise blobs can be before merging +fb_pixels_threshold = 500 # minimum number of pixels that must be in a blob +fb_merge_margin = 5 # how close pixel wise blobs can be before merging ############################################################################## -e_lab_color_thresholds = [] # enabled thresholds -e_lab_color_code = [] # enabled color code -e_lab_color_signatures = [] # original enabled threshold indexes +e_lab_color_thresholds = [] # enabled thresholds +e_lab_color_code = [] # enabled color code +e_lab_color_signatures = [] # original enabled threshold indexes for i in range(len(lab_color_thresholds)): if lab_color_thresholds[i][7]: e_lab_color_thresholds.append(lab_color_thresholds[i][0:6]) e_lab_color_code.append(lab_color_thresholds[i][6]) e_lab_color_signatures.append(i + 1) -import math -import pyb -import sensor -import struct -import time - # Camera Setup - sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) -sensor.skip_frames(time = 2000) +sensor.skip_frames(time=2000) sensor.set_auto_gain(False) sensor.set_auto_whitebal(False) @@ -93,58 +94,76 @@ if dac: dac.write(0) # Servo Setup - min_s0_limit = min(s0_lower_limit, s0_upper_limit) max_s0_limit = max(s0_lower_limit, s0_upper_limit) min_s1_limit = min(s1_lower_limit, s1_upper_limit) max_s1_limit = max(s1_lower_limit, s1_upper_limit) -s0_pan = pyb.Servo(1) # P7 -s1_tilt = pyb.Servo(2) # P8 +s0_pan = pyb.Servo(1) # P7 +s1_tilt = pyb.Servo(2) # P8 -s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center -s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center +s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center +s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center s0_pan_conversion_factor = (max_s0_limit - min_s0_limit) / 1000 s1_tilt_conversion_factor = (max_s1_limit - min_s1_limit) / 1000 + def s0_pan_position(value): - s0_pan.pulse_width(round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor))) + s0_pan.pulse_width( + round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor)) + ) + def s1_tilt_position(value): - s1_tilt.pulse_width(round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor))) + s1_tilt.pulse_width( + round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor)) + ) + # Link Setup -uart = pyb.UART(3, uart_baudrate, timeout_char = 1000) +uart = pyb.UART(3, uart_baudrate, timeout_char=1000) + def write(data): uart.write(data) + def available(): return uart.any() + def read_byte(): return uart.readchar() -# Helper Stuff def checksum(data): checksum = 0 for i in range(0, len(data), 2): - checksum += ((data[i+1] & 0xFF) << 8) | ((data[i+0] & 0xFF) << 0) + checksum += ((data[i + 1] & 0xFF) << 8) | ((data[i + 0] & 0xFF) << 0) return checksum & 0xFFFF + def get_normal_signature(code): for i in range(len(e_lab_color_signatures)): if code & (1 << i): return e_lab_color_signatures[i] return 0 + def to_normal_object_block_format(blob): - temp = struct.pack(" 1) or (not color_code(blob.code())) - elif(pri_color_code_mode == 2): # only color codes with two or more colors - return (bits_set(blob.code()) > 1) - elif(pri_color_code_mode == 3): + elif pri_color_code_mode == 2: # only color codes with two or more colors + return bits_set(blob.code()) > 1 + elif pri_color_code_mode == 3: return True + clock = time.clock() -while(True): +while True: clock.tick() img = sensor.snapshot() - blobs = list(filter(blob_filter, img.find_blobs(e_lab_color_thresholds, area_threshold = min_block_area, pixels_threshold = fb_pixels_threshold, merge = True, margin = fb_merge_margin, merge_cb = fb_merge_cb))) + blobs = list( + filter( + blob_filter, + img.find_blobs( + e_lab_color_thresholds, + area_threshold=min_block_area, + pixels_threshold=fb_pixels_threshold, + merge=True, + margin=fb_merge_margin, + merge_cb=fb_merge_cb, + ), + ) + ) - # Transmit Blobs # - - if blobs and (max_blocks > 0) and (max_blocks_per_signature > 0): # new frame + # Transmit Blobs + if blobs and (max_blocks > 0) and (max_blocks_per_signature > 0): # new frame dat_buf = struct.pack("> 8) ^ (tmp << 8) ^ (tmp << 3) ^ (tmp >> 4)) & 0xFFFF return output + MAV_LANDING_TARGET_message_id = 149 -MAV_LANDING_TARGET_min_distance = 1/100 # in meters -MAV_LANDING_TARGET_max_distance = 10000/100 # in meters -MAV_LANDING_TARGET_frame = 8 # MAV_FRAME_BODY_NED +MAV_LANDING_TARGET_min_distance = 1 / 100 # in meters +MAV_LANDING_TARGET_max_distance = 10000 / 100 # in meters +MAV_LANDING_TARGET_frame = 8 # MAV_FRAME_BODY_NED MAV_LANDING_TARGET_extra_crc = 200 + # http://mavlink.org/messages/common#LANDING_TARGET # https://github.com/mavlink/c_library_v1/blob/master/common/mavlink_msg_landing_target.h def send_landing_target_packet(tag, dist_mm, w, h): global packet_sequence - temp = struct.pack("> 8) ^ (tmp << 8) ^ (tmp << 3) ^ (tmp >> 4)) & 0xFFFF return output + MAV_OPTICAL_FLOW_message_id = 100 -MAV_OPTICAL_FLOW_id = 0 # unused +MAV_OPTICAL_FLOW_id = 0 # unused MAV_OPTICAL_FLOW_extra_crc = 175 + # http://mavlink.org/messages/common#OPTICAL_FLOW # https://github.com/mavlink/c_library_v1/blob/master/common/mavlink_msg_optical_flow.h def send_optical_flow_packet(x, y, c): global packet_sequence - temp = struct.pack(" 4 # Filter index, mode (RANGE, DUAL or MASK), FIFO (0 or 1), params can.setfilter(0, CAN.RANGE, 0, (1, 4)) @@ -32,6 +32,6 @@ else: # Filter index, mode (LIST16, etc..), FIFO (0 or 1), params can.setfilter(0, CAN.LIST16, 0, (1, 2, 3, 4)) - while (True): + while True: # Receive messages on FIFO 0 print(can.recv(0, timeout=10000)) diff --git a/scripts/examples/09-OpenMV-Boards/00-Board-Control/cpufreq_scaling.py b/scripts/examples/09-OpenMV-Boards/00-Board-Control/cpufreq_scaling.py index e707e7618..b2a650492 100644 --- a/scripts/examples/09-OpenMV-Boards/00-Board-Control/cpufreq_scaling.py +++ b/scripts/examples/09-OpenMV-Boards/00-Board-Control/cpufreq_scaling.py @@ -6,22 +6,27 @@ import image import time import cpufreq -sensor.reset() # Reset and initialize the sensor. +sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -clock = time.clock() # Create a clock object to track the FPS. +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +clock = time.clock() # Create a clock object to track the FPS. + def test_image_processing(): for i in range(0, 50): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. img.find_edges(image.EDGE_CANNY, threshold=(50, 80)) + print("\nFrequency Scaling Test...") for f in cpufreq.get_supported_frequencies(): - print("Testing CPU Freq: %dMHz..." %(f)) + print("Testing CPU Freq: %dMHz..." % (f)) cpufreq.set_frequency(f) clock.reset() test_image_processing() freqs = cpufreq.get_current_frequencies() - print("CPU Freq:%dMHz HCLK:%dMhz PCLK1:%dMhz PCLK2:%dMhz FPS:%.2f" %(freqs[0], freqs[1], freqs[2], freqs[3], clock.fps())) + print( + "CPU Freq:%dMHz HCLK:%dMhz PCLK1:%dMhz PCLK2:%dMhz FPS:%.2f" + % (freqs[0], freqs[1], freqs[2], freqs[3], clock.fps()) + ) diff --git a/scripts/examples/09-OpenMV-Boards/00-Board-Control/dac_write.py b/scripts/examples/09-OpenMV-Boards/00-Board-Control/dac_write.py index c43a13c91..0ded38433 100644 --- a/scripts/examples/09-OpenMV-Boards/00-Board-Control/dac_write.py +++ b/scripts/examples/09-OpenMV-Boards/00-Board-Control/dac_write.py @@ -5,13 +5,13 @@ import time from pyb import DAC -dac = DAC("P6") # Must always be "P6". +dac = DAC("P6") # Must always be "P6". -while(True): +while True: # The DAC has 8-12 bits of resolution (default 8-bits). for i in range(256): dac.write(i) time.sleep_ms(20) for i in range(256): - dac.write(255-i) + dac.write(255 - i) time.sleep_ms(20) diff --git a/scripts/examples/09-OpenMV-Boards/00-Board-Control/i2c_control.py b/scripts/examples/09-OpenMV-Boards/00-Board-Control/i2c_control.py index a284cb2a0..dee72add6 100644 --- a/scripts/examples/09-OpenMV-Boards/00-Board-Control/i2c_control.py +++ b/scripts/examples/09-OpenMV-Boards/00-Board-Control/i2c_control.py @@ -6,14 +6,15 @@ from pyb import I2C -i2c = I2C(2, I2C.MASTER) # The i2c bus must always be 2. -mem = i2c.mem_read(256, 0x50, 0) # The eeprom slave address is 0x50. +i2c = I2C(2, I2C.MASTER) # The i2c bus must always be 2. +mem = i2c.mem_read(256, 0x50, 0) # The eeprom slave address is 0x50. print("\n[") for i in range(16): - print("\t[", end='') + print("\t[", end="") for j in range(16): - print("%03d" % mem[(i*16)+j], end='') - if j != 15: print(", ", end='') + print("%03d" % mem[(i * 16) + j], end="") + if j != 15: + print(", ", end="") print("]," if i != 15 else "]") print("]") diff --git a/scripts/examples/09-OpenMV-Boards/00-Board-Control/led_control.py b/scripts/examples/09-OpenMV-Boards/00-Board-Control/led_control.py index c58bfb5de..3df3eb09b 100644 --- a/scripts/examples/09-OpenMV-Boards/00-Board-Control/led_control.py +++ b/scripts/examples/09-OpenMV-Boards/00-Board-Control/led_control.py @@ -6,22 +6,32 @@ import time from pyb import LED -red_led = LED(1) +red_led = LED(1) green_led = LED(2) -blue_led = LED(3) -ir_led = LED(4) +blue_led = LED(3) +ir_led = LED(4) + def led_control(x): - if (x&1)==0: red_led.off() - elif (x&1)==1: red_led.on() - if (x&2)==0: green_led.off() - elif (x&2)==2: green_led.on() - if (x&4)==0: blue_led.off() - elif (x&4)==4: blue_led.on() - if (x&8)==0: ir_led.off() - elif (x&8)==8: ir_led.on() + if (x & 1) == 0: + red_led.off() + elif (x & 1) == 1: + red_led.on() + if (x & 2) == 0: + green_led.off() + elif (x & 2) == 2: + green_led.on() + if (x & 4) == 0: + blue_led.off() + elif (x & 4) == 4: + blue_led.on() + if (x & 8) == 0: + ir_led.off() + elif (x & 8) == 8: + ir_led.on() -while(True): + +while True: for i in range(16): led_control(i) time.sleep_ms(500) diff --git a/scripts/examples/09-OpenMV-Boards/00-Board-Control/pin_control.py b/scripts/examples/09-OpenMV-Boards/00-Board-Control/pin_control.py index dbcae5fcd..9e39524d7 100644 --- a/scripts/examples/09-OpenMV-Boards/00-Board-Control/pin_control.py +++ b/scripts/examples/09-OpenMV-Boards/00-Board-Control/pin_control.py @@ -6,8 +6,8 @@ from pyb import Pin # Connect a switch to pin 0 that will pull it low when the switch is closed. # Pin 1 will then light up. -pin0 = Pin('P0', Pin.IN, Pin.PULL_UP) -pin1 = Pin('P1', Pin.OUT_PP, Pin.PULL_NONE) +pin0 = Pin("P0", Pin.IN, Pin.PULL_UP) +pin1 = Pin("P1", Pin.OUT_PP, Pin.PULL_NONE) -while(True): +while True: pin1.value(not pin0.value()) diff --git a/scripts/examples/09-OpenMV-Boards/00-Board-Control/pwm_control.py b/scripts/examples/09-OpenMV-Boards/00-Board-Control/pwm_control.py index ed9e60569..0e44a94a6 100644 --- a/scripts/examples/09-OpenMV-Boards/00-Board-Control/pwm_control.py +++ b/scripts/examples/09-OpenMV-Boards/00-Board-Control/pwm_control.py @@ -5,11 +5,11 @@ import time from pyb import Pin, Timer -tim = Timer(4, freq=1000) # Frequency in Hz +tim = Timer(4, freq=1000) # Frequency in Hz # Generate a 1KHz square wave on TIM4 with 50%, 75% and 50% duty cycles on channels 1, 2 and 3 respectively. ch1 = tim.channel(1, Timer.PWM, pin=Pin("P7"), pulse_width_percent=50) ch2 = tim.channel(2, Timer.PWM, pin=Pin("P8"), pulse_width_percent=75) ch3 = tim.channel(3, Timer.PWM, pin=Pin("P9"), pulse_width_percent=50) -while (True): +while True: time.sleep_ms(1000) diff --git a/scripts/examples/09-OpenMV-Boards/00-Board-Control/rtc.py b/scripts/examples/09-OpenMV-Boards/00-Board-Control/rtc.py index cfd2bbefc..f82bec7e7 100644 --- a/scripts/examples/09-OpenMV-Boards/00-Board-Control/rtc.py +++ b/scripts/examples/09-OpenMV-Boards/00-Board-Control/rtc.py @@ -7,6 +7,6 @@ from pyb import RTC rtc = RTC() rtc.datetime((2013, 7, 9, 2, 0, 0, 0, 0)) -while (True): +while True: print(rtc.datetime()) time.sleep_ms(1000) diff --git a/scripts/examples/09-OpenMV-Boards/00-Board-Control/servo_control.py b/scripts/examples/09-OpenMV-Boards/00-Board-Control/servo_control.py index 809fe3f7b..095aeed36 100644 --- a/scripts/examples/09-OpenMV-Boards/00-Board-Control/servo_control.py +++ b/scripts/examples/09-OpenMV-Boards/00-Board-Control/servo_control.py @@ -5,11 +5,11 @@ import time from pyb import Servo -s1 = Servo(1) # P7 -s2 = Servo(2) # P8 -s3 = Servo(3) # P9 +s1 = Servo(1) # P7 +s2 = Servo(2) # P8 +s3 = Servo(3) # P9 -while(True): +while True: for i in range(1000): s1.pulse_width(1000 + i) s2.pulse_width(1999 - i) diff --git a/scripts/examples/09-OpenMV-Boards/00-Board-Control/spi_control.py b/scripts/examples/09-OpenMV-Boards/00-Board-Control/spi_control.py index 774ceb732..1ca9e737e 100644 --- a/scripts/examples/09-OpenMV-Boards/00-Board-Control/spi_control.py +++ b/scripts/examples/09-OpenMV-Boards/00-Board-Control/spi_control.py @@ -8,14 +8,15 @@ import sensor import time from pyb import Pin, SPI -cs = Pin("P3", Pin.OUT_OD) +cs = Pin("P3", Pin.OUT_OD) rst = Pin("P7", Pin.OUT_PP) -rs = Pin("P8", Pin.OUT_PP) +rs = Pin("P8", Pin.OUT_PP) # The hardware SPI bus for your OpenMV Cam is always SPI bus 2. # NOTE: The SPI clock frequency will not always be the requested frequency. The hardware only supports # frequencies that are the bus frequency divided by a prescaler (which can be 2, 4, 8, 16, 32, 64, 128 or 256). -spi = SPI(2, SPI.MASTER, baudrate=int(1000000000/66), polarity=0, phase=0) +spi = SPI(2, SPI.MASTER, baudrate=int(1000000000 / 66), polarity=0, phase=0) + def write_command_byte(c): cs.low() @@ -23,16 +24,20 @@ def write_command_byte(c): spi.send(c) cs.high() + def write_data_byte(c): cs.low() rs.high() spi.send(c) cs.high() + def write_command(c, *data): write_command_byte(c) if data: - for d in data: write_data_byte(d) + for d in data: + write_data_byte(d) + def write_image(img): cs.low() @@ -40,13 +45,14 @@ def write_image(img): spi.send(img) cs.high() + # Reset the LCD. rst.low() time.sleep_ms(100) rst.high() time.sleep_ms(100) -write_command(0x11) # Sleep Exit +write_command(0x11) # Sleep Exit time.sleep_ms(120) # Memory Data Access Control @@ -59,18 +65,18 @@ write_command(0x3A, 0x05) # Display On write_command(0x29) -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # must be this -sensor.set_framesize(sensor.QQVGA2) # must be this -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # must be this +sensor.set_framesize(sensor.QQVGA2) # must be this +sensor.skip_frames(time=2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. - write_command(0x2C) # Write image command... + write_command(0x2C) # Write image command... write_image(img) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/09-OpenMV-Boards/00-Board-Control/timer_control.py b/scripts/examples/09-OpenMV-Boards/00-Board-Control/timer_control.py index 5ccdbe99f..5954c2f0f 100644 --- a/scripts/examples/09-OpenMV-Boards/00-Board-Control/timer_control.py +++ b/scripts/examples/09-OpenMV-Boards/00-Board-Control/timer_control.py @@ -3,17 +3,20 @@ # This example shows how to use a timer for callbacks. import time -from pyb import Pin, Timer, LED +from pyb import LED +from pyb import Timer + +blue_led = LED(3) -blue_led = LED(3) # we will receive the timer object when being called # Note: functions that allocate memory are Not allowed in callbacks -def tick(timer): +def tick(timer): blue_led.toggle() - -tim = Timer(2, freq=1) # create a timer object using timer 2 - trigger at 1Hz -tim.callback(tick) # set the callback to our tick function -while (True): + +tim = Timer(2, freq=1) # create a timer object using timer 2 - trigger at 1Hz +tim.callback(tick) # set the callback to our tick function + +while True: time.sleep_ms(1000) diff --git a/scripts/examples/09-OpenMV-Boards/00-Board-Control/timer_tests.py b/scripts/examples/09-OpenMV-Boards/00-Board-Control/timer_tests.py index 1ab12ec6e..025afb465 100644 --- a/scripts/examples/09-OpenMV-Boards/00-Board-Control/timer_tests.py +++ b/scripts/examples/09-OpenMV-Boards/00-Board-Control/timer_tests.py @@ -3,18 +3,21 @@ # This example tests all the timers. import time -from pyb import Pin, Timer, LED +from pyb import LED +from pyb import Timer + +blue_led = LED(3) -blue_led = LED(3) # Note: functions that allocate memory are Not allowed in callbacks def tick(timer): blue_led.toggle() + print("") for i in range(1, 18): try: - print("Testing TIM%d... "%(i), end="") + print("Testing TIM%d... " % (i), end="") tim = Timer(i, freq=10, callback=tick) time.sleep_ms(1000) tim.deinit() diff --git a/scripts/examples/09-OpenMV-Boards/00-Board-Control/uart_control.py b/scripts/examples/09-OpenMV-Boards/00-Board-Control/uart_control.py index 54a4122e1..334ac4208 100644 --- a/scripts/examples/09-OpenMV-Boards/00-Board-Control/uart_control.py +++ b/scripts/examples/09-OpenMV-Boards/00-Board-Control/uart_control.py @@ -12,6 +12,6 @@ from pyb import UART # example see the BLE-Shield driver. uart = UART(3, 19200) -while(True): +while True: uart.write("Hello World!\r") time.sleep_ms(1000) diff --git a/scripts/examples/09-OpenMV-Boards/00-Board-Control/usb_hid.py b/scripts/examples/09-OpenMV-Boards/00-Board-Control/usb_hid.py index 54d2627a6..f1b77aba5 100644 --- a/scripts/examples/09-OpenMV-Boards/00-Board-Control/usb_hid.py +++ b/scripts/examples/09-OpenMV-Boards/00-Board-Control/usb_hid.py @@ -7,10 +7,10 @@ # # Add the following script to boot.py: # -##import pyb #(UNCOMMENT THIS LINE!) -##pyb.usb_mode('VCP+HID') # serial device + mouse (UNCOMMENT THIS LINE!) -##pyb.usb_mode('VCP+MSC') # serial device + storage device (default) -##pyb.usb_mode('VCP+HID', hid=pyb.hid_keyboard) # serial device + keyboard +# import pyb +# pyb.usb_mode('VCP+HID') # serial device + mouse (UNCOMMENT THIS LINE!) +# pyb.usb_mode('VCP+MSC') # serial device + storage device (default) +# pyb.usb_mode('VCP+HID', hid=pyb.hid_keyboard) # serial device + keyboard # # Copy boot.py to the root of the uSD card and restart the camera, it should now # act as a serial device and a mouse. @@ -25,7 +25,7 @@ import time hid = pyb.USB_HID() -while(True): +while True: # x, y and scroll # move 10 pixels to the right hid.send((0, 10, 0, 0)) diff --git a/scripts/examples/09-OpenMV-Boards/00-Board-Control/usb_vcp.py b/scripts/examples/09-OpenMV-Boards/00-Board-Control/usb_vcp.py index 47a0aeba3..7c1c4fa53 100644 --- a/scripts/examples/09-OpenMV-Boards/00-Board-Control/usb_vcp.py +++ b/scripts/examples/09-OpenMV-Boards/00-Board-Control/usb_vcp.py @@ -16,24 +16,23 @@ # size = struct.unpack('/:port -#url = 'http://website.com:80/upload.php/' +# url = 'http://website.com:80/upload.php/' # SSL is supported. -#url = 'https://192.168.1.102:443/upload.php/' +# url = 'https://192.168.1.102:443/upload.php/' headers = { - 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0', + "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0", # Add more headers if needed } # Send some files files = { - 'image1': ('example1.jpg', open('example1.jpg', 'rb')), - 'image2': ('example2.jpg', open('example2.jpg', 'rb')), + "image1": ("example1.jpg", open("example1.jpg", "rb")), + "image2": ("example2.jpg", open("example2.jpg", "rb")), } # Post a request -if (True): +if True: # Send some files - r = urequests.post(url, files=files, headers=headers) #Can add auth=('username', 'password') if needed + r = urequests.post( + url, files=files, headers=headers + ) # Can add auth=('username', 'password') if needed else: # Or send some JSON data - r = urequests.post(url, json={'some': 'data'}, headers=headers) #Can add auth=('username', 'password') if needed + r = urequests.post( + url, json={"some": "data"}, headers=headers + ) # Can add auth=('username', 'password') if needed print(r.status_code, r.reason) print(r.headers, r.content) diff --git a/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/mjpeg_streamer.py b/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/mjpeg_streamer.py index e294c128d..bbe8599bb 100644 --- a/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/mjpeg_streamer.py +++ b/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/mjpeg_streamer.py @@ -8,11 +8,10 @@ import sensor import time import network import usocket -import sys -SSID ='' # Network SSID -KEY ='' # Network key -HOST ='' # Use first available interface +SSID = "" # Network SSID +KEY = "" # Network key +HOST = "" # Use first available interface PORT = 8080 # Arbitrary non-privileged port # Reset sensor @@ -38,43 +37,49 @@ s.listen(5) # Set server socket to blocking s.setblocking(True) + def start_streaming(s): - print ('Waiting for connections..') + print("Waiting for connections..") client, addr = s.accept() # set client socket timeout to 2s client.settimeout(2.0) - print ('Connected to ' + addr[0] + ':' + str(addr[1])) + print("Connected to " + addr[0] + ":" + str(addr[1])) # Read request from client data = client.recv(1024) # Should parse client request here # Send multipart header - client.send("HTTP/1.1 200 OK\r\n" \ - "Server: OpenMV\r\n" \ - "Content-Type: multipart/x-mixed-replace;boundary=openmv\r\n" \ - "Cache-Control: no-cache\r\n" \ - "Pragma: no-cache\r\n\r\n") + client.send( + "HTTP/1.1 200 OK\r\n" + "Server: OpenMV\r\n" + "Content-Type: multipart/x-mixed-replace;boundary=openmv\r\n" + "Cache-Control: no-cache\r\n" + "Pragma: no-cache\r\n\r\n" + ) # FPS clock clock = time.clock() # Start streaming images # NOTE: Disable IDE preview to increase streaming FPS. - while (True): - clock.tick() # Track elapsed milliseconds between snapshots(). + while True: + clock.tick() # Track elapsed milliseconds between snapshots(). frame = sensor.snapshot() cframe = frame.compressed(quality=35) - header = "\r\n--openmv\r\n" \ - "Content-Type: image/jpeg\r\n"\ - "Content-Length:"+str(cframe.size())+"\r\n\r\n" + header = ( + "\r\n--openmv\r\n" + "Content-Type: image/jpeg\r\n" + "Content-Length:" + str(cframe.size()) + "\r\n\r\n" + ) client.send(header) client.send(cframe) print(clock.fps()) -while (True): + +while True: try: start_streaming(s) except OSError as e: print("socket error: ", e) - #sys.print_exception(e) + # sys.print_exception(e) diff --git a/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/mjpeg_streamer_ap.py b/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/mjpeg_streamer_ap.py index 25de389cf..58fa586b2 100644 --- a/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/mjpeg_streamer_ap.py +++ b/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/mjpeg_streamer_ap.py @@ -8,12 +8,11 @@ import sensor import time import network import usocket -import sys -SSID ='OPENMV_AP' # Network SSID -KEY ='1234567890' # Network key (must be 10 chars) -HOST = '' # Use first available interface -PORT = 8080 # Arbitrary non-privileged port +SSID = "OPENMV_AP" # Network SSID +KEY = "1234567890" # Network key (must be 10 chars) +HOST = "" # Use first available interface +PORT = 8080 # Arbitrary non-privileged port # Reset sensor sensor.reset() @@ -26,43 +25,49 @@ wlan.start_ap(SSID, key=KEY, security=wlan.WEP, channel=2) print("AP mode started. SSID: {} IP: {}".format(SSID, wlan.ifconfig()[0])) # You can block waiting for client to connect -#print(wlan.wait_for_sta(10000)) +# print(wlan.wait_for_sta(10000)) + def start_streaming(s): - print ('Waiting for connections..') + print("Waiting for connections..") client, addr = s.accept() # set client socket timeout to 2s client.settimeout(2.0) - print ('Connected to ' + addr[0] + ':' + str(addr[1])) + print("Connected to " + addr[0] + ":" + str(addr[1])) # Read request from client data = client.recv(1024) # Should parse client request here # Send multipart header - client.send("HTTP/1.1 200 OK\r\n" \ - "Server: OpenMV\r\n" \ - "Content-Type: multipart/x-mixed-replace;boundary=openmv\r\n" \ - "Cache-Control: no-cache\r\n" \ - "Pragma: no-cache\r\n\r\n") + client.send( + "HTTP/1.1 200 OK\r\n" + "Server: OpenMV\r\n" + "Content-Type: multipart/x-mixed-replace;boundary=openmv\r\n" + "Cache-Control: no-cache\r\n" + "Pragma: no-cache\r\n\r\n" + ) # FPS clock clock = time.clock() # Start streaming images # NOTE: Disable IDE preview to increase streaming FPS. - while (True): - clock.tick() # Track elapsed milliseconds between snapshots(). + while True: + clock.tick() # Track elapsed milliseconds between snapshots(). frame = sensor.snapshot() cframe = frame.compressed(quality=35) - header = "\r\n--openmv\r\n" \ - "Content-Type: image/jpeg\r\n"\ - "Content-Length:"+str(cframe.size())+"\r\n\r\n" + header = ( + "\r\n--openmv\r\n" + "Content-Type: image/jpeg\r\n" + "Content-Length:" + str(cframe.size()) + "\r\n\r\n" + ) client.send(header) client.send(cframe) print(clock.fps()) -while (True): + +while True: # Create server socket s = usocket.socket(usocket.AF_INET, usocket.SOCK_STREAM) try: @@ -78,4 +83,4 @@ while (True): except OSError as e: s.close() print("socket error: ", e) - #sys.print_exception(e) + # sys.print_exception(e) diff --git a/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/mjpeg_streamer_fir.py b/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/mjpeg_streamer_fir.py index 57f2f8628..b40d9f945 100644 --- a/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/mjpeg_streamer_fir.py +++ b/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/mjpeg_streamer_fir.py @@ -5,15 +5,14 @@ # connect to the IP address/port printed out from ifconfig. import sensor -import image import network import usocket import fir -SSID='' # Network SSID -KEY='' # Network key -HOST = '' # Use first available interface -PORT = 8000 # Arbitrary non-privileged port +SSID = "" # Network SSID +KEY = "" # Network key +HOST = "" # Use first available interface +PORT = 8000 # Arbitrary non-privileged port # Reset sensor sensor.reset() @@ -41,9 +40,9 @@ s.listen(5) # Set timeout to 1s s.settimeout(1.0) -print ('Waiting for connections..') +print("Waiting for connections..") client, addr = s.accept() -print ('Connected to ' + addr[0] + ':' + str(addr[1])) +print("Connected to " + addr[0] + ":" + str(addr[1])) # Read request from client data = client.recv(1024) @@ -51,14 +50,16 @@ data = client.recv(1024) # Should parse client request here # Send multipart header -client.send("HTTP/1.1 200 OK\r\n" \ - "Server: OpenMV\r\n" \ - "Content-Type: multipart/x-mixed-replace;boundary=openmv\r\n" \ - "Cache-Control: no-cache\r\n" \ - "Pragma: no-cache\r\n\r\n") +client.send( + "HTTP/1.1 200 OK\r\n" + "Server: OpenMV\r\n" + "Content-Type: multipart/x-mixed-replace;boundary=openmv\r\n" + "Cache-Control: no-cache\r\n" + "Pragma: no-cache\r\n\r\n" +) # Start streaming images -while (True): +while True: image = sensor.snapshot() # Capture FIR data @@ -72,14 +73,16 @@ while (True): fir.draw_ir(image, ir) # Draw ambient, min and max temperatures. - image.draw_string(0, 0, "Ta: %0.2f"%ta, color = (0xFF, 0x00, 0x00)) - image.draw_string(0, 8, "To min: %0.2f"%to_min, color = (0xFF, 0x00, 0x00)) - image.draw_string(0, 16, "To max: %0.2f"%to_max, color = (0xFF, 0x00, 0x00)) + image.draw_string(0, 0, "Ta: %0.2f" % ta, color=(0xFF, 0x00, 0x00)) + image.draw_string(0, 8, "To min: %0.2f" % to_min, color=(0xFF, 0x00, 0x00)) + image.draw_string(0, 16, "To max: %0.2f" % to_max, color=(0xFF, 0x00, 0x00)) cimage = image.compressed(quality=90) - client.send("\r\n--openmv\r\n" \ - "Content-Type: image/jpeg\r\n"\ - "Content-Length:"+str(cimage.size())+"\r\n\r\n") + client.send( + "\r\n--openmv\r\n" + "Content-Type: image/jpeg\r\n" + "Content-Length:" + str(cimage.size()) + "\r\n\r\n" + ) client.send(cimage) client.close() diff --git a/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/mqtt_pub.py b/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/mqtt_pub.py index edfeb9197..3893f20fe 100644 --- a/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/mqtt_pub.py +++ b/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/mqtt_pub.py @@ -11,8 +11,8 @@ import time import network from mqtt import MQTTClient -SSID='' # Network SSID -KEY='' # Network key +SSID = "" # Network SSID +KEY = "" # Network key # Init wlan module and connect to network print("Trying to connect... (may take a while)...") @@ -26,6 +26,6 @@ print(wlan.ifconfig()) client = MQTTClient("openmv", "test.mosquitto.org", port=1883) client.connect() -while (True): +while True: client.publish("openmv/test", "Hello World!") time.sleep_ms(1000) diff --git a/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/mqtt_sub.py b/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/mqtt_sub.py index 22022b529..1118c71d9 100644 --- a/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/mqtt_sub.py +++ b/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/mqtt_sub.py @@ -11,8 +11,8 @@ import time import network from mqtt import MQTTClient -SSID='' # Network SSID -KEY='' # Network key +SSID = "" # Network SSID +KEY = "" # Network key # Init wlan module and connect to network print("Trying to connect... (may take a while)...") @@ -26,13 +26,15 @@ print(wlan.ifconfig()) client = MQTTClient("openmv", "test.mosquitto.org", port=1883) client.connect() + def callback(topic, msg): print(topic, msg) + # must set callback first client.set_callback(callback) client.subscribe("openmv/test") -while (True): - client.check_msg() # poll for messages. +while True: + client.check_msg() # poll for messages. time.sleep_ms(1000) diff --git a/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/ntp.py b/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/ntp.py index 41150aea6..abf5b7cb8 100644 --- a/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/ntp.py +++ b/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/ntp.py @@ -7,10 +7,10 @@ import usocket import ustruct import utime -SSID='' # Network SSID -KEY='' # Network key +SSID = "" # Network SSID +KEY = "" # Network key -TIMESTAMP = 2208988800+946684800 +TIMESTAMP = 2208988800 + 946684800 # Init wlan module and connect to network print("Trying to connect... (may take a while)...") @@ -28,9 +28,9 @@ client = usocket.socket(usocket.AF_INET, usocket.SOCK_DGRAM) addr = usocket.getaddrinfo("pool.ntp.org", 123)[0][4] # Send query -client.sendto('\x1b' + 47 * '\0', addr) +client.sendto("\x1b" + 47 * "\0", addr) data, address = client.recvfrom(1024) # Print time t = ustruct.unpack(">IIIIIIIIIIII", data)[10] - TIMESTAMP -print ("Year:%d Month:%d Day:%d Time: %d:%d:%d" % (utime.localtime(t)[0:6])) +print("Year:%d Month:%d Day:%d Time: %d:%d:%d" % (utime.localtime(t)[0:6])) diff --git a/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/scan.py b/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/scan.py index fbd5a8762..fc92fd9d4 100644 --- a/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/scan.py +++ b/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/scan.py @@ -8,9 +8,9 @@ import network wlan = network.WINC() print("\nFirmware version:", wlan.fw_version()) -while (True): +while True: scan_result = wlan.scan() for ap in scan_result: - print("Channel:%d RSSI:%d Auth:%d BSSID:%s SSID:%s"%(ap)) + print("Channel:%d RSSI:%d Auth:%d BSSID:%s SSID:%s" % (ap)) print() time.sleep_ms(1000) diff --git a/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/static_ip.py b/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/static_ip.py index 8c5afd761..987d4dce1 100644 --- a/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/static_ip.py +++ b/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/static_ip.py @@ -7,17 +7,17 @@ import usocket import ustruct import utime -SSID='' # Network SSID -KEY='' # Network key +SSID = "" # Network SSID +KEY = "" # Network key -TIMESTAMP = 2208988800+946684800 +TIMESTAMP = 2208988800 + 946684800 # Init wlan module and connect to network print("Trying to connect... (may take a while)...") wlan = network.WINC() # ifconfig must be called before connect() -wlan.ifconfig(('192.168.1.200', '255.255.255.0', '192.168.1.1', '192.168.1.1')) +wlan.ifconfig(("192.168.1.200", "255.255.255.0", "192.168.1.1", "192.168.1.1")) wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK) # Create new socket @@ -27,9 +27,9 @@ client = usocket.socket(usocket.AF_INET, usocket.SOCK_DGRAM) addr = usocket.getaddrinfo("pool.ntp.org", 123)[0][4] # Send query -client.sendto('\x1b' + 47 * '\0', addr) +client.sendto("\x1b" + 47 * "\0", addr) data, address = client.recvfrom(1024) # Print time t = ustruct.unpack(">IIIIIIIIIIII", data)[10] - TIMESTAMP -print ("Year:%d Month:%d Day:%d Time: %d:%d:%d" % (utime.localtime(t)[0:6])) +print("Year:%d Month:%d Day:%d Time: %d:%d:%d" % (utime.localtime(t)[0:6])) diff --git a/scripts/examples/09-OpenMV-Boards/02-LCD-Shield/lcd.py b/scripts/examples/09-OpenMV-Boards/02-LCD-Shield/lcd.py index cc70a210f..8516a6419 100644 --- a/scripts/examples/09-OpenMV-Boards/02-LCD-Shield/lcd.py +++ b/scripts/examples/09-OpenMV-Boards/02-LCD-Shield/lcd.py @@ -7,10 +7,10 @@ import sensor import lcd -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_framesize(sensor.QQVGA2) # Special 128x160 framesize for LCD Shield. -lcd.init() # Initialize the lcd screen. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_framesize(sensor.QQVGA2) # Special 128x160 framesize for LCD Shield. +lcd.init() # Initialize the lcd screen. -while(True): - lcd.display(sensor.snapshot()) # Take a picture and display the image. +while True: + lcd.display(sensor.snapshot()) # Take a picture and display the image. diff --git a/scripts/examples/09-OpenMV-Boards/03-Servo-Shield/main.py b/scripts/examples/09-OpenMV-Boards/03-Servo-Shield/main.py index 124af8b1e..b3a9727f1 100644 --- a/scripts/examples/09-OpenMV-Boards/03-Servo-Shield/main.py +++ b/scripts/examples/09-OpenMV-Boards/03-Servo-Shield/main.py @@ -11,7 +11,7 @@ import time from servo import Servos from machine import I2C, Pin -i2c = I2C(sda=Pin('P5'), scl=Pin('P4')) +i2c = I2C(sda=Pin("P5"), scl=Pin("P4")) servo = Servos(i2c, address=0x40, freq=50, min_us=650, max_us=2800, degrees=180) while True: diff --git a/scripts/examples/09-OpenMV-Boards/03-Servo-Shield/pca9685.py b/scripts/examples/09-OpenMV-Boards/03-Servo-Shield/pca9685.py index 6cdab3d93..a31ead605 100644 --- a/scripts/examples/09-OpenMV-Boards/03-Servo-Shield/pca9685.py +++ b/scripts/examples/09-OpenMV-Boards/03-Servo-Shield/pca9685.py @@ -1,6 +1,7 @@ import utime import ustruct + class PCA9685: def __init__(self, i2c, address=0x40): self.i2c = i2c @@ -14,25 +15,25 @@ class PCA9685: return self.i2c.readfrom_mem(self.address, address, 1)[0] def reset(self): - self._write(0x00, 0x00) # Mode1 + self._write(0x00, 0x00) # Mode1 def freq(self, freq=None): if freq is None: - return int(25000000.0 / 4096 / (self._read(0xfe) - 0.5)) + return int(25000000.0 / 4096 / (self._read(0xFE) - 0.5)) prescale = int(25000000.0 / 4096.0 / freq + 0.5) - old_mode = self._read(0x00) # Mode 1 - self._write(0x00, (old_mode & 0x7F) | 0x10) # Mode 1, sleep - self._write(0xfe, prescale) # Prescale - self._write(0x00, old_mode) # Mode 1 + old_mode = self._read(0x00) # Mode 1 + self._write(0x00, (old_mode & 0x7F) | 0x10) # Mode 1, sleep + self._write(0xFE, prescale) # Prescale + self._write(0x00, old_mode) # Mode 1 utime.sleep_us(5) - self._write(0x00, old_mode | 0xa1) # Mode 1, autoincrement on + self._write(0x00, old_mode | 0xA1) # Mode 1, autoincrement on def pwm(self, index, on=None, off=None): if on is None or off is None: data = self.i2c.readfrom_mem(self.address, 0x06 + 4 * index, 4) - return ustruct.unpack(' 200, # Blue - lambda r, g, b: r > 200 and g < 70 and b < 70, # Red - lambda r, g, b: r > 200 and g < 70 and b > 200, # Purple - lambda r, g, b: r < 70 and g > 200 and b < 70, # Green - lambda r, g, b: r < 70 and g > 200 and b > 200, # Aqua - lambda r, g, b: r > 200 and g > 200 and b < 70, # Yellow - lambda r, g, b: r > 200 and g > 200 and b > 200] # White +t = [ + lambda r, g, b: r < 70 and g < 70 and b < 70, # Black + lambda r, g, b: r < 70 and g < 70 and b > 200, # Blue + lambda r, g, b: r > 200 and g < 70 and b < 70, # Red + lambda r, g, b: r > 200 and g < 70 and b > 200, # Purple + lambda r, g, b: r < 70 and g > 200 and b < 70, # Green + lambda r, g, b: r < 70 and g > 200 and b > 200, # Aqua + lambda r, g, b: r > 200 and g > 200 and b < 70, # Yellow + lambda r, g, b: r > 200 and g > 200 and b > 200, +] # White # color bars are inverted for OV7725 -if (sensor.get_id() == sensor.OV7725): +if sensor.get_id() == sensor.OV7725: t = t[::-1] # 320x240 image with 8 color bars each one is approx 40 pixels. @@ -44,13 +45,15 @@ if (sensor.get_id() == sensor.OV7725): # values of 10 sample pixels from the center of each color bar. for i in range(0, 8): avg = (0, 0, 0) - idx = 40*i+20 # center of colorbars - for off in range(0, 10): # avg 10 pixels - rgb = image.get_pixel(idx+off, 120) + idx = 40 * i + 20 # center of colorbars + for off in range(0, 10): # avg 10 pixels + rgb = image.get_pixel(idx + off, 120) avg = tuple(map(sum, zip(avg, rgb))) - if not t[i](avg[0]/10, avg[1]/10, avg[2]/10): - raise Exception("COLOR BARS TEST FAILED. " - "BAR#(%d): RGB(%d,%d,%d)"%(i+1, avg[0]/10, avg[1]/10, avg[2]/10)) + if not t[i](avg[0] / 10, avg[1] / 10, avg[2] / 10): + raise Exception( + "COLOR BARS TEST FAILED. " + "BAR#(%d): RGB(%d,%d,%d)" % (i + 1, avg[0] / 10, avg[1] / 10, avg[2] / 10) + ) print("COLOR BARS TEST PASSED...") diff --git a/scripts/examples/09-OpenMV-Boards/99-Tests/fps.py b/scripts/examples/09-OpenMV-Boards/99-Tests/fps.py index cb3ec7e30..5917c36c8 100644 --- a/scripts/examples/09-OpenMV-Boards/99-Tests/fps.py +++ b/scripts/examples/09-OpenMV-Boards/99-Tests/fps.py @@ -2,14 +2,14 @@ import sensor import time -sensor.reset() # Initialize the camera sensor. -sensor.set_framesize(sensor.QQVGA) # or sensor.QQVGA (or others) -sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE -sensor.set_colorbar(True) # Enable colorbars output +sensor.reset() # Initialize the camera sensor. +sensor.set_framesize(sensor.QQVGA) # or sensor.QQVGA (or others) +sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE +sensor.set_colorbar(True) # Enable colorbars output -clock = time.clock() # Tracks FPS. +clock = time.clock() # Tracks FPS. for i in range(0, 600): - clock.tick() # Track elapsed milliseconds between snapshots(). - sensor.snapshot() # Capture snapshot. + clock.tick() # Track elapsed milliseconds between snapshots(). + sensor.snapshot() # Capture snapshot. print("FPS:", clock.fps()) diff --git a/scripts/examples/09-OpenMV-Boards/99-Tests/selftest.py b/scripts/examples/09-OpenMV-Boards/99-Tests/selftest.py index 0250e19b0..a356e0775 100644 --- a/scripts/examples/09-OpenMV-Boards/99-Tests/selftest.py +++ b/scripts/examples/09-OpenMV-Boards/99-Tests/selftest.py @@ -4,24 +4,25 @@ # of the factory. Every OpenMV Cam should pass this test. import sensor -import time import pyb + def test_int_adc(): - adc = pyb.ADCAll(12) + adc = pyb.ADCAll(12) # Test VBAT vbat = adc.read_core_vbat() - vbat_diff = abs(vbat-3.3) - if (vbat_diff > 0.1): - raise Exception('INTERNAL ADC TEST FAILED VBAT=%fv'%vbat) + vbat_diff = abs(vbat - 3.3) + if vbat_diff > 0.1: + raise Exception("INTERNAL ADC TEST FAILED VBAT=%fv" % vbat) # Test VREF vref = adc.read_core_vref() - vref_diff = abs(vref-1.2) - if (vref_diff > 0.1): - raise Exception('INTERNAL ADC TEST FAILED VREF=%fv'%vref) + vref_diff = abs(vref - 1.2) + if vref_diff > 0.1: + raise Exception("INTERNAL ADC TEST FAILED VREF=%fv" % vref) adc = None - print('INTERNAL ADC TEST PASSED...') + print("INTERNAL ADC TEST PASSED...") + def test_color_bars(): sensor.reset() @@ -42,38 +43,43 @@ def test_color_bars(): for i in range(0, 100): image = sensor.snapshot() - #color bars thresholds - t = [lambda r, g, b: r < 70 and g < 70 and b < 70, # Black - lambda r, g, b: r < 70 and g < 70 and b > 200, # Blue - lambda r, g, b: r > 200 and g < 70 and b < 70, # Red - lambda r, g, b: r > 200 and g < 70 and b > 200, # Purple - lambda r, g, b: r < 70 and g > 200 and b < 70, # Green - lambda r, g, b: r < 70 and g > 200 and b > 200, # Aqua - lambda r, g, b: r > 200 and g > 200 and b < 70, # Yellow - lambda r, g, b: r > 200 and g > 200 and b > 200] # White + # color bars thresholds + t = [ + lambda r, g, b: r < 70 and g < 70 and b < 70, # Black + lambda r, g, b: r < 70 and g < 70 and b > 200, # Blue + lambda r, g, b: r > 200 and g < 70 and b < 70, # Red + lambda r, g, b: r > 200 and g < 70 and b > 200, # Purple + lambda r, g, b: r < 70 and g > 200 and b < 70, # Green + lambda r, g, b: r < 70 and g > 200 and b > 200, # Aqua + lambda r, g, b: r > 200 and g > 200 and b < 70, # Yellow + lambda r, g, b: r > 200 and g > 200 and b > 200, + ] # White # color bars are inverted for OV7725 - if (sensor.get_id() == sensor.OV7725): + if sensor.get_id() == sensor.OV7725: t = t[::-1] - #320x240 image with 8 color bars each one is approx 40 pixels. - #we start from the center of the frame buffer, and average the - #values of 10 sample pixels from the center of each color bar. + # 320x240 image with 8 color bars each one is approx 40 pixels. + # we start from the center of the frame buffer, and average the + # values of 10 sample pixels from the center of each color bar. for i in range(0, 8): avg = (0, 0, 0) - idx = 40*i+20 #center of colorbars - for off in range(0, 10): #avg 10 pixels - rgb = image.get_pixel(idx+off, 120) + idx = 40 * i + 20 # center of colorbars + for off in range(0, 10): # avg 10 pixels + rgb = image.get_pixel(idx + off, 120) avg = tuple(map(sum, zip(avg, rgb))) - if not t[i](avg[0]/10, avg[1]/10, avg[2]/10): - raise Exception('COLOR BARS TEST FAILED.' - 'BAR#(%d): RGB(%d,%d,%d)'%(i+1, avg[0]/10, avg[1]/10, avg[2]/10)) + if not t[i](avg[0] / 10, avg[1] / 10, avg[2] / 10): + raise Exception( + "COLOR BARS TEST FAILED." + "BAR#(%d): RGB(%d,%d,%d)" + % (i + 1, avg[0] / 10, avg[1] / 10, avg[2] / 10) + ) - print('COLOR BARS TEST PASSED...') + print("COLOR BARS TEST PASSED...") -if __name__ == '__main__': - print('') + +if __name__ == "__main__": + print("") test_int_adc() test_color_bars() - diff --git a/scripts/examples/09-OpenMV-Boards/99-Tests/unittests.py b/scripts/examples/09-OpenMV-Boards/99-Tests/unittests.py index 1fd2fbd3b..31058e69f 100644 --- a/scripts/examples/09-OpenMV-Boards/99-Tests/unittests.py +++ b/scripts/examples/09-OpenMV-Boards/99-Tests/unittests.py @@ -1,25 +1,26 @@ # OpenMV Unit Tests. # import os -import sensor import gc -TEST_DIR = "unittest" -TEMP_DIR = "unittest/temp" -DATA_DIR = "unittest/data" -SCRIPT_DIR = "unittest/script" +TEST_DIR = "unittest" +TEMP_DIR = "unittest/temp" +DATA_DIR = "unittest/data" +SCRIPT_DIR = "unittest/script" if not (TEST_DIR in os.listdir("")): - raise Exception('Unittest dir not found!') + raise Exception("Unittest dir not found!") print("") test_failed = False + def print_result(test, result): - s = "Unittest (%s)"%(test) - padding = "."*(60-len(s)) + s = "Unittest (%s)" % (test) + padding = "." * (60 - len(s)) print(s + padding + result) + for test in sorted(os.listdir(SCRIPT_DIR)): if test.endswith(".py"): test_result = "PASSED" @@ -27,14 +28,14 @@ for test in sorted(os.listdir(SCRIPT_DIR)): try: exec(open(test_path).read()) gc.collect() - if unittest(DATA_DIR, TEMP_DIR) == False: + if unittest(DATA_DIR, TEMP_DIR) is False: raise Exception() except Exception as e: if "unavailable" in str(e): - test_result = "DISABLED" + test_result = "DISABLED" else: test_failed = True - test_result = "FAILED" + test_result = "FAILED" print_result(test, test_result) if test_failed: diff --git a/scripts/examples/09-OpenMV-Boards/main.py b/scripts/examples/09-OpenMV-Boards/main.py index eb113fee7..9de30e084 100644 --- a/scripts/examples/09-OpenMV-Boards/main.py +++ b/scripts/examples/09-OpenMV-Boards/main.py @@ -7,11 +7,11 @@ import time import pyb -led = pyb.LED(3) # Red LED = 1, Green LED = 2, Blue LED = 3, IR LEDs = 4. -usb = pyb.USB_VCP() # This is a serial port object that allows you to +led = pyb.LED(3) # Red LED = 1, Green LED = 2, Blue LED = 3, IR LEDs = 4. +usb = pyb.USB_VCP() # This is a serial port object that allows you to # communciate with your computer. While it is not open the code below runs. -while(not usb.isconnected()): +while not usb.isconnected(): led.on() time.sleep_ms(150) led.off() @@ -21,9 +21,9 @@ while(not usb.isconnected()): led.off() time.sleep_ms(600) -led = pyb.LED(2) # Switch to using the green LED. +led = pyb.LED(2) # Switch to using the green LED. -while(usb.isconnected()): +while usb.isconnected(): led.on() time.sleep_ms(150) led.off() diff --git a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/00-Board-Control/blinky.py b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/00-Board-Control/blinky.py index 9e3188ed9..000d401bf 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/00-Board-Control/blinky.py +++ b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/00-Board-Control/blinky.py @@ -8,7 +8,7 @@ led_green = LED(2) led_blue = LED(3) led_yellow = LED(4) -while (True): +while True: led_blue.on() time.sleep_ms(250) led_blue.off() @@ -26,4 +26,3 @@ while (True): led_yellow.off() time.sleep_ms(500) - diff --git a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/00-Board-Control/i2c_scanner.py b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/00-Board-Control/i2c_scanner.py index 9e04878c7..958a3bcb2 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/00-Board-Control/i2c_scanner.py +++ b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/00-Board-Control/i2c_scanner.py @@ -6,14 +6,13 @@ # LSM9DS1 0x1E # LSM9DS1 0x6B # APDS9960 0x39 -import time from machine import Pin, I2C -i2c_list = [None, None] +i2c_list = [None, None] i2c_list[0] = I2C(0, scl=Pin(2), sda=Pin(31)) i2c_list[1] = I2C(1, scl=Pin(15), sda=Pin(14)) for bus in range(0, 2): - print("\nScanning bus %d..."%(bus)) + print("\nScanning bus %d..." % (bus)) for addr in i2c_list[bus].scan(): - print("Found device at addres %d:0x%x" %(bus, addr)) + print("Found device at addres %d:0x%x" % (bus, addr)) diff --git a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/apds9960/ambient.py b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/apds9960/ambient.py index 84e067462..b624dd36b 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/apds9960/ambient.py +++ b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/apds9960/ambient.py @@ -1,6 +1,5 @@ from time import sleep_ms from machine import Pin, I2C -from apds9960.const import * from apds9960 import uAPDS9960 as APDS9960 bus = I2C(1, sda=Pin(13), scl=Pin(14)) diff --git a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/apds9960/gesture.py b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/apds9960/gesture.py index 04191f491..c90b3ab15 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/apds9960/gesture.py +++ b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/apds9960/gesture.py @@ -28,4 +28,3 @@ while True: if apds.isGestureAvailable(): motion = apds.readGesture() print("Gesture={}".format(dirs.get(motion, "unknown"))) - diff --git a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/apds9960/proximity.py b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/apds9960/proximity.py index a4e185002..c70df1845 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/apds9960/proximity.py +++ b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/apds9960/proximity.py @@ -1,7 +1,6 @@ from time import sleep_ms from machine import Pin, I2C -from apds9960.const import * from apds9960 import uAPDS9960 as APDS9960 bus = I2C(1, sda=Pin(13), scl=Pin(14)) diff --git a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/hts221.py b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/hts221.py index f813fb2e0..a0eea49d1 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/hts221.py +++ b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/hts221.py @@ -5,8 +5,8 @@ from machine import Pin, I2C bus = I2C(1, scl=Pin(15), sda=Pin(14)) hts = hts221.HTS221(bus) -while (True): - rH = hts.humidity() +while True: + rH = hts.humidity() temp = hts.temperature() - print ("rH: %.2f%% T: %.2fC" %(rH, temp)) + print("rH: %.2f%% T: %.2fC" % (rH, temp)) time.sleep_ms(100) diff --git a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/lps22.py b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/lps22.py index 448df986e..1fa269c35 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/lps22.py +++ b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/lps22.py @@ -5,8 +5,8 @@ from machine import Pin, I2C bus = I2C(1, scl=Pin(15), sda=Pin(14)) lps = lps22h.LPS22H(bus) -while (True): +while True: pressure = lps.pressure() temperature = lps.temperature() - print("Pressure: %.2f hPa Temperature: %.2f C"%(pressure, temperature)) + print("Pressure: %.2f hPa Temperature: %.2f C" % (pressure, temperature)) time.sleep_ms(100) diff --git a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/lsm9ds1.py b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/lsm9ds1.py index efb9be31c..89e7e4adb 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/lsm9ds1.py +++ b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/01-Sensors/lsm9ds1.py @@ -5,10 +5,10 @@ from machine import Pin, I2C bus = I2C(1, scl=Pin(15), sda=Pin(14)) lsm = lsm9ds1.LSM9DS1(bus) -while (True): - #for g,a in lsm.iter_accel_gyro(): print(g,a) # using fifo - print('Accelerometer: x:{:>8.3f} y:{:>8.3f} z:{:>8.3f}'.format(*lsm.read_accel())) - print('Magnetometer: x:{:>8.3f} y:{:>8.3f} z:{:>8.3f}'.format(*lsm.read_magnet())) - print('Gyroscope: x:{:>8.3f} y:{:>8.3f} z:{:>8.3f}'.format(*lsm.read_gyro())) +while True: + # for g,a in lsm.iter_accel_gyro(): print(g,a) # using fifo + print("Accelerometer: x:{:>8.3f} y:{:>8.3f} z:{:>8.3f}".format(*lsm.read_accel())) + print("Magnetometer: x:{:>8.3f} y:{:>8.3f} z:{:>8.3f}".format(*lsm.read_magnet())) + print("Gyroscope: x:{:>8.3f} y:{:>8.3f} z:{:>8.3f}".format(*lsm.read_gyro())) print("") time.sleep_ms(500) diff --git a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/02-Audio/audio_fft.py b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/02-Audio/audio_fft.py index a53693dcc..acd12e29e 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/02-Audio/audio_fft.py +++ b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/02-Audio/audio_fft.py @@ -1,52 +1,63 @@ import image import audio -import time from ulab import numpy as np -from ulab import scipy as sp from ulab import utils CHANNELS = 1 -SIZE = 256//(2*CHANNELS) +SIZE = 256 // (2 * CHANNELS) raw_buf = None -fb = image.Image(SIZE+50, SIZE, image.RGB565, copy_to_fb=True) +fb = image.Image(SIZE + 50, SIZE, image.RGB565, copy_to_fb=True) audio.init(channels=CHANNELS, frequency=16000, gain_db=80, highpass=0.9883) + def audio_callback(buf): # NOTE: do Not call any function that allocates memory. global raw_buf - if (raw_buf == None): + if raw_buf is None: raw_buf = buf + # Start audio streaming audio.start_streaming(audio_callback) + def draw_fft(img, fft_buf): fft_buf = (fft_buf / max(fft_buf)) * SIZE fft_buf = np.log10(fft_buf + 1) * 20 color = (0xFF, 0x0F, 0x00) for i in range(0, SIZE): - img.draw_line(i, SIZE, i, SIZE-int(fft_buf[i]), color, 1) + img.draw_line(i, SIZE, i, SIZE - int(fft_buf[i]), color, 1) + def draw_audio_bar(img, level, offset): - blk_size = SIZE//10 + blk_size = SIZE // 10 color = (0xFF, 0x00, 0xF0) - blk_space = (blk_size//4) - for i in range(0, int(round(level/10))): - fb.draw_rectangle(SIZE+offset, SIZE - ((i+1)*blk_size) + blk_space, 20, blk_size - blk_space, color, 1, True) + blk_space = blk_size // 4 + for i in range(0, int(round(level / 10))): + fb.draw_rectangle( + SIZE + offset, + SIZE - ((i + 1) * blk_size) + blk_space, + 20, + blk_size - blk_space, + color, + 1, + True, + ) -while (True): - if (raw_buf != None): + +while True: + if raw_buf is not None: pcm_buf = np.frombuffer(raw_buf, dtype=np.int16) raw_buf = None if CHANNELS == 1: fft_buf = utils.spectrogram(pcm_buf) - l_lvl = int((np.mean(abs(pcm_buf[1::2])) / 32768)*100) + l_lvl = int((np.mean(abs(pcm_buf[1::2])) / 32768) * 100) else: fft_buf = utils.spectrogram(pcm_buf[0::2]) - l_lvl = int((np.mean(abs(pcm_buf[1::2])) / 32768)*100) - r_lvl = int((np.mean(abs(pcm_buf[0::2])) / 32768)*100) + l_lvl = int((np.mean(abs(pcm_buf[1::2])) / 32768) * 100) + r_lvl = int((np.mean(abs(pcm_buf[0::2])) / 32768) * 100) fb.clear() draw_fft(fb, fft_buf) diff --git a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/03-Bluetooth/ble_blinky.py b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/03-Bluetooth/ble_blinky.py index 6c684aea1..198d5d0c5 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/03-Bluetooth/ble_blinky.py +++ b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/03-Bluetooth/ble_blinky.py @@ -4,6 +4,7 @@ import time from board import LED from ubluepy import Service, Characteristic, UUID, Peripheral, constants + def event_handler(id, handle, data): global periph global service @@ -15,12 +16,13 @@ def event_handler(id, handle, data): elif id == constants.EVT_GATTS_WRITE: LED(1).on() if int(data[0]) else LED(1).off() + # start off with LED(1) off LED(1).off() notif_enabled = False uuid_service = UUID("0x1523") -uuid_led = UUID("0x1525") +uuid_led = UUID("0x1525") service = Service(uuid_service) char_led = Characteristic(uuid_led, props=Characteristic.PROP_WRITE) @@ -31,5 +33,5 @@ periph.addService(service) periph.setConnectionHandler(event_handler) periph.advertise(device_name="Nano Blinky", services=[service]) -while (True): +while True: time.sleep_ms(500) diff --git a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/03-Bluetooth/ble_scan.py b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/03-Bluetooth/ble_scan.py index ccacdc2d5..bce99a67d 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/03-Bluetooth/ble_scan.py +++ b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/03-Bluetooth/ble_scan.py @@ -1,12 +1,14 @@ import time from ubluepy import Scanner, constants + def bytes_to_str(bytes): string = "" for b in bytes: string += chr(b) return string + def get_device_names(scan_entries): dev_names = [] print(len(scan_entries)) @@ -18,6 +20,7 @@ def get_device_names(scan_entries): dev_names.append((e, bytes_to_str(s[2]))) return dev_names + def find_device_by_name(name): s = Scanner() scan_res = s.scan(1000) @@ -26,7 +29,8 @@ def find_device_by_name(name): if name == dev[1]: return dev[0] -while (True): + +while True: res = find_device_by_name("micr") if res: print("address:", res.addr()) diff --git a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/03-Bluetooth/ble_temperature.py b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/03-Bluetooth/ble_temperature.py index 1bf7093e0..7b3cb3575 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/03-Bluetooth/ble_temperature.py +++ b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/03-Bluetooth/ble_temperature.py @@ -6,6 +6,7 @@ from board import LED from machine import Pin, I2C from ubluepy import Service, Characteristic, UUID, Peripheral, constants + def event_handler(id, handle, data): global periph, service, notif_enabled @@ -24,15 +25,16 @@ def event_handler(id, handle, data): else: notif_enabled = False + # start off with LED(1) off LED(1).off() notif_enabled = False uuid_service = UUID("0x181A") # Environmental Sensing service -uuid_temp = UUID("0x2A6E") # Temperature characteristic +uuid_temp = UUID("0x2A6E") # Temperature characteristic service = Service(uuid_service) -temp_props = Characteristic.PROP_READ|Characteristic.PROP_NOTIFY +temp_props = Characteristic.PROP_READ | Characteristic.PROP_NOTIFY temp_attrs = Characteristic.ATTR_CCCD temp_char = Characteristic(uuid_temp, props=temp_props, attrs=temp_attrs) service.addCharacteristic(temp_char) @@ -45,8 +47,8 @@ periph.advertise(device_name="Temperature Sensor", services=[service]) bus = I2C(1, scl=Pin(15), sda=Pin(14)) hts = hts221.HTS221(bus) -while (True): +while True: if notif_enabled: - temp = int(hts.temperature()*100) + temp = int(hts.temperature() * 100) temp_char.write(bytearray([temp & 0xFF, temp >> 8])) - time.sleep_ms(100) + time.sleep_ms(100) diff --git a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/04-Thermal/thermal_camera.py b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/04-Thermal/thermal_camera.py index 90b2b5dae..2ad32e4f0 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/04-Thermal/thermal_camera.py +++ b/scripts/examples/10-Arduino-Boards/Nano-33-BLE-Sense/04-Thermal/thermal_camera.py @@ -7,36 +7,40 @@ import image import time import fir -drawing_hint = image.BICUBIC # or image.BILINEAR or 0 (nearest neighbor) +drawing_hint = image.BICUBIC # or image.BILINEAR or 0 (nearest neighbor) # Initialize the thermal sensor fir.init() w = fir.width() h = fir.height() -if (fir.type() == fir.FIR_MLX90621): +if fir.type() == fir.FIR_MLX90621: w = w * 5 h = h * 5 -elif (fir.type() == fir.FIR_MLX90640): +elif fir.type() == fir.FIR_MLX90640: w = w * 5 h = h * 5 -elif (fir.type() == fir.FIR_MLX90641): +elif fir.type() == fir.FIR_MLX90641: w = w * 5 h = h * 5 -elif (fir.type() == fir.FIR_AMG8833): +elif fir.type() == fir.FIR_AMG8833: w = w * 10 h = h * 10 # FPS clock clock = time.clock() -while (True): +while True: clock.tick() try: - img = fir.snapshot(x_size=w, y_size=h, - color_palette=fir.PALETTE_IRONBOW, hint=drawing_hint, - copy_to_fb=True) + img = fir.snapshot( + x_size=w, + y_size=h, + color_palette=fir.PALETTE_IRONBOW, + hint=drawing_hint, + copy_to_fb=True, + ) except OSError: continue diff --git a/scripts/examples/10-Arduino-Boards/Nano-RP2040/00-Board-Control/blinky.py b/scripts/examples/10-Arduino-Boards/Nano-RP2040/00-Board-Control/blinky.py index 6a4c54232..aa9098f92 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-RP2040/00-Board-Control/blinky.py +++ b/scripts/examples/10-Arduino-Boards/Nano-RP2040/00-Board-Control/blinky.py @@ -7,7 +7,7 @@ from machine import Pin # other than the RGB LED connected to Nina WiFi module. led = Pin(6, Pin.OUT) -while (True): +while True: led.on() time.sleep_ms(250) led.off() diff --git a/scripts/examples/10-Arduino-Boards/Nano-RP2040/00-Board-Control/i2c_scanner.py b/scripts/examples/10-Arduino-Boards/Nano-RP2040/00-Board-Control/i2c_scanner.py index 7a07fd3f6..e396043b8 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-RP2040/00-Board-Control/i2c_scanner.py +++ b/scripts/examples/10-Arduino-Boards/Nano-RP2040/00-Board-Control/i2c_scanner.py @@ -5,14 +5,14 @@ # ATECC608A 0x60 # LSM6DSOX 0x6A -import time -from machine import Pin, I2C +from machine import Pin +from machine import I2C -i2c_list = [None, None] +i2c_list = [None, None] i2c_list[0] = I2C(0, scl=Pin(13), sda=Pin(12), freq=100_000) i2c_list[1] = I2C(1, scl=Pin(7), sda=Pin(6), freq=100_000) for bus in range(0, 2): - print("\nScanning bus %d..."%(bus)) + print("\nScanning bus %d..." % (bus)) for addr in i2c_list[bus].scan(): - print("Found device at addres %d:0x%x" %(bus, addr)) + print("Found device at addres %d:0x%x" % (bus, addr)) diff --git a/scripts/examples/10-Arduino-Boards/Nano-RP2040/01-Sensors/lsm6dsox_basic.py b/scripts/examples/10-Arduino-Boards/Nano-RP2040/01-Sensors/lsm6dsox_basic.py index 29146b0f8..a196d9046 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-RP2040/01-Sensors/lsm6dsox_basic.py +++ b/scripts/examples/10-Arduino-Boards/Nano-RP2040/01-Sensors/lsm6dsox_basic.py @@ -3,10 +3,11 @@ import time from lsm6dsox import LSM6DSOX from machine import Pin, I2C + lsm = LSM6DSOX(I2C(0, scl=Pin(13), sda=Pin(12))) -while (True): - print('Accelerometer: x:{:>8.3f} y:{:>8.3f} z:{:>8.3f}'.format(*lsm.read_accel())) - print('Gyroscope: x:{:>8.3f} y:{:>8.3f} z:{:>8.3f}'.format(*lsm.read_gyro())) +while True: + print("Accelerometer: x:{:>8.3f} y:{:>8.3f} z:{:>8.3f}".format(*lsm.read_accel())) + print("Gyroscope: x:{:>8.3f} y:{:>8.3f} z:{:>8.3f}".format(*lsm.read_gyro())) print("") time.sleep_ms(100) diff --git a/scripts/examples/10-Arduino-Boards/Nano-RP2040/01-Sensors/lsm6dsox_mlc.py b/scripts/examples/10-Arduino-Boards/Nano-RP2040/01-Sensors/lsm6dsox_mlc.py index 105b81159..e441b3101 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-RP2040/01-Sensors/lsm6dsox_mlc.py +++ b/scripts/examples/10-Arduino-Boards/Nano-RP2040/01-Sensors/lsm6dsox_mlc.py @@ -4,18 +4,20 @@ # NOTE: The pre-trained models (UCF files) for the examples can be found here: # https://github.com/STMicroelectronics/STMems_Machine_Learning_Core/tree/master/application_examples/lsm6dsox -import time +from machine import Pin +from machine import I2C from lsm6dsox import LSM6DSOX -from machine import Pin, I2C -INT_MODE = True # Run in interrupt mode. -INT_FLAG = False # Set True on interrupt. +INT_MODE = True # Run in interrupt mode. +INT_FLAG = False # Set True on interrupt. + def imu_int_handler(pin): global INT_FLAG INT_FLAG = True -if (INT_MODE == True): + +if INT_MODE is True: int_pin = Pin(24, mode=Pin.IN, pull=Pin.PULL_UP) int_pin.irq(handler=imu_int_handler, trigger=Pin.IRQ_RISING) @@ -23,24 +25,26 @@ i2c = I2C(0, scl=Pin(13), sda=Pin(12)) # Vibration detection example UCF_FILE = "lsm6dsox_vibration_monitoring.ucf" -UCF_LABELS = {0:"no vibration", 1:"low vibration", 2:"high vibration"} +UCF_LABELS = {0: "no vibration", 1: "low vibration", 2: "high vibration"} # NOTE: Selected data rate and scale must match the MLC data rate and scale. -lsm = LSM6DSOX(i2c, gyro_odr=26, accel_odr=26, gyro_scale=2000, accel_scale=4, ucf=UCF_FILE) +lsm = LSM6DSOX( + i2c, gyro_odr=26, accel_odr=26, gyro_scale=2000, accel_scale=4, ucf=UCF_FILE +) # Head gestures example -#UCF_FILE = "lsm6dsox_head_gestures.ucf" -#UCF_LABELS = {0:"Nod", 1:"Shake", 2:"Stationary", 3:"Swing", 4:"Walk"} +# UCF_FILE = "lsm6dsox_head_gestures.ucf" +# UCF_LABELS = {0:"Nod", 1:"Shake", 2:"Stationary", 3:"Swing", 4:"Walk"} # NOTE: Selected data rate and scale must match the MLC data rate and scale. -#lsm = LSM6DSOX(i2c, gyro_odr=26, accel_odr=26, gyro_scale=250, accel_scale=2, ucf=UCF_FILE) +# lsm = LSM6DSOX(i2c, gyro_odr=26, accel_odr=26, gyro_scale=250, accel_scale=2, ucf=UCF_FILE) print("MLC configured...") -while (True): - if (INT_MODE): - if (INT_FLAG): - INT_FLAG=False +while True: + if INT_MODE: + if INT_FLAG: + INT_FLAG = False print(UCF_LABELS[lsm.read_mlc_output()[0]]) else: buf = lsm.read_mlc_output() - if (buf != None): + if buf is not None: print(UCF_LABELS[buf[0]]) diff --git a/scripts/examples/10-Arduino-Boards/Nano-RP2040/03-Audio/audio_fft.py b/scripts/examples/10-Arduino-Boards/Nano-RP2040/03-Audio/audio_fft.py index 5b4225d50..0e623edba 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-RP2040/03-Audio/audio_fft.py +++ b/scripts/examples/10-Arduino-Boards/Nano-RP2040/03-Audio/audio_fft.py @@ -1,8 +1,6 @@ import image import audio -import time from ulab import numpy as np -from ulab import scipy as sp from ulab import utils CHANNELS = 1 @@ -12,49 +10,64 @@ SCALE = 1 SIZE = (N_SAMPLES * SCALE) // CHANNELS raw_buf = None -fb = image.Image(SIZE+(50*SCALE), SIZE, image.RGB565, copy_to_fb=True) +fb = image.Image(SIZE + (50 * SCALE), SIZE, image.RGB565, copy_to_fb=True) audio.init(channels=CHANNELS, frequency=FREQUENCY, gain_db=16, overflow=False) + def audio_callback(buf): # NOTE: do Not call any function that allocates memory. global raw_buf - if (raw_buf == None): + if raw_buf is None: raw_buf = buf + # Start audio streaming audio.start_streaming(audio_callback) + def draw_fft(img, fft_buf): fft_buf = (fft_buf / max(fft_buf)) * SIZE fft_buf = np.log10(fft_buf + 1) * 20 color = (0xFF, 0x0F, 0x00) for i in range(0, len(fft_buf)): - img.draw_line(i*SCALE, SIZE, i*SCALE, SIZE-int(fft_buf[i]) * SCALE, color, SCALE) + img.draw_line( + i * SCALE, SIZE, i * SCALE, SIZE - int(fft_buf[i]) * SCALE, color, SCALE + ) + def draw_audio_bar(img, level, offset): - blk_size = (SIZE//10) + blk_size = SIZE // 10 color = (0xFF, 0x00, 0xF0) - blk_space = (blk_size//4) - for i in range(0, int(round(level/10))): - fb.draw_rectangle(SIZE+offset, SIZE - ((i+1)*blk_size) + blk_space, 20 * SCALE, blk_size - blk_space, color, 1, True) + blk_space = blk_size // 4 + for i in range(0, int(round(level / 10))): + fb.draw_rectangle( + SIZE + offset, + SIZE - ((i + 1) * blk_size) + blk_space, + 20 * SCALE, + blk_size - blk_space, + color, + 1, + True, + ) -while (True): - if (raw_buf != None): + +while True: + if raw_buf is not None: pcm_buf = np.frombuffer(raw_buf, dtype=np.int16) raw_buf = None if CHANNELS == 1: fft_buf = utils.spectrogram(pcm_buf) - l_lvl = int((np.mean(abs(pcm_buf[1::2])) / 32768)*100) + l_lvl = int((np.mean(abs(pcm_buf[1::2])) / 32768) * 100) else: fft_buf = utils.spectrogram(pcm_buf[0::2]) - l_lvl = int((np.mean(abs(pcm_buf[1::2])) / 32768)*100) - r_lvl = int((np.mean(abs(pcm_buf[0::2])) / 32768)*100) + l_lvl = int((np.mean(abs(pcm_buf[1::2])) / 32768) * 100) + r_lvl = int((np.mean(abs(pcm_buf[0::2])) / 32768) * 100) fb.clear() draw_fft(fb, fft_buf) draw_audio_bar(fb, l_lvl, 0) - draw_audio_bar(fb, l_lvl, 25*SCALE) + draw_audio_bar(fb, l_lvl, 25 * SCALE) if CHANNELS == 2: draw_audio_bar(fb, r_lvl, 25 * SCALE) fb.flush() diff --git a/scripts/examples/10-Arduino-Boards/Nano-RP2040/03-WiFi/ap_mode.py b/scripts/examples/10-Arduino-Boards/Nano-RP2040/03-WiFi/ap_mode.py index 805b5c6ce..e501b4e2d 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-RP2040/03-WiFi/ap_mode.py +++ b/scripts/examples/10-Arduino-Boards/Nano-RP2040/03-WiFi/ap_mode.py @@ -3,14 +3,11 @@ # This example shows how to use WiFi in Access Point mode. import network import socket -import sys -import time -import gc -SSID ='OPENMV_AP' # Network SSID -KEY ='1234567890' # Network key (must be 10 chars) -HOST = '' # Use first available interface -PORT = 8080 # Arbitrary non-privileged port +SSID = "OPENMV_AP" # Network SSID +KEY = "1234567890" # Network key (must be 10 chars) +HOST = "" # Use first available interface +PORT = 8080 # Arbitrary non-privileged port # Init wlan module and connect to network wlan = network.WLAN(network.AP_IF) @@ -18,6 +15,7 @@ wlan.active(True) wlan.config(essid=SSID, key=KEY, security=wlan.WEP, channel=2) print("AP mode started. SSID: {} IP: {}".format(SSID, wlan.ifconfig()[0])) + def recvall(sock, n): # Helper function to recv n bytes or return None if EOF is hit data = bytearray() @@ -28,15 +26,16 @@ def recvall(sock, n): data.extend(packet) return data + def start_streaming(server): - print ('Waiting for connections..') + print("Waiting for connections..") client, addr = server.accept() # set client socket timeout to 5s client.settimeout(5.0) - print ('Connected to ' + addr[0] + ':' + str(addr[1])) + print("Connected to " + addr[0] + ":" + str(addr[1])) - while (True): + while True: try: # Read data from client data = recvall(client, 1024) @@ -47,7 +46,8 @@ def start_streaming(server): client.close() break -while (True): + +while True: try: server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Bind and listen @@ -56,7 +56,7 @@ while (True): # Set server socket to blocking server.setblocking(True) - while (True): + while True: start_streaming(server) except OSError as e: server.close() diff --git a/scripts/examples/10-Arduino-Boards/Nano-RP2040/03-WiFi/http_client.py b/scripts/examples/10-Arduino-Boards/Nano-RP2040/03-WiFi/http_client.py index 5d12aba5c..e4edf62a5 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-RP2040/03-WiFi/http_client.py +++ b/scripts/examples/10-Arduino-Boards/Nano-RP2040/03-WiFi/http_client.py @@ -4,8 +4,8 @@ import network import socket # AP info -SSID='' # Network SSID -KEY='' # Network key +SSID = "" # Network SSID +KEY = "" # Network key PORT = 80 HOST = "www.google.com" @@ -32,7 +32,7 @@ client.connect(addr) client.settimeout(3.0) # Send HTTP request and recv response -client.send("GET / HTTP/1.1\r\nHost: %s\r\n\r\n"%(HOST)) +client.send("GET / HTTP/1.1\r\nHost: %s\r\n\r\n" % (HOST)) print(client.recv(1024)) # Close socket diff --git a/scripts/examples/10-Arduino-Boards/Nano-RP2040/03-WiFi/ntp.py b/scripts/examples/10-Arduino-Boards/Nano-RP2040/03-WiFi/ntp.py index 5a8a69e08..72a0f20fa 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-RP2040/03-WiFi/ntp.py +++ b/scripts/examples/10-Arduino-Boards/Nano-RP2040/03-WiFi/ntp.py @@ -8,8 +8,8 @@ import ustruct import utime # AP info -SSID='' # Network SSID -KEY='' # Network key +SSID = "" # Network SSID +KEY = "" # Network key TIMESTAMP = 2208988800 @@ -26,15 +26,15 @@ print(wlan.ifconfig()) # Create new socket client = usocket.socket(usocket.AF_INET, usocket.SOCK_DGRAM) client.bind(("", 8080)) -#client.settimeout(3.0) +# client.settimeout(3.0) # Get addr info via DNS addr = usocket.getaddrinfo("pool.ntp.org", 123)[0][4] # Send query -client.sendto('\x1b' + 47 * '\0', addr) +client.sendto("\x1b" + 47 * "\0", addr) data, address = client.recvfrom(1024) # Print time t = ustruct.unpack(">IIIIIIIIIIII", data)[10] - TIMESTAMP -print ("Year:%d Month:%d Day:%d Time: %d:%d:%d" % (utime.localtime(t)[0:6])) +print("Year:%d Month:%d Day:%d Time: %d:%d:%d" % (utime.localtime(t)[0:6])) diff --git a/scripts/examples/10-Arduino-Boards/Nano-RP2040/03-WiFi/scan.py b/scripts/examples/10-Arduino-Boards/Nano-RP2040/03-WiFi/scan.py index 0689a160e..fa83758da 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-RP2040/03-WiFi/scan.py +++ b/scripts/examples/10-Arduino-Boards/Nano-RP2040/03-WiFi/scan.py @@ -9,10 +9,12 @@ wlan = network.WLAN(network.STA_IF) wlan.active(True) print("Scanning...") -while (True): +while True: scan_result = wlan.scan() for ap in scan_result: - print("SSID: %s BSSID: %s Channel: %d RSSI: %d Auth: %d" - %(ap[0], ":".join(["%X"%i for i in ap[1]]), ap[2], ap[3], ap[4])) + print( + "SSID: %s BSSID: %s Channel: %d RSSI: %d Auth: %d" + % (ap[0], ":".join(["%X" % i for i in ap[1]]), ap[2], ap[3], ap[4]) + ) print() time.sleep_ms(1000) diff --git a/scripts/examples/10-Arduino-Boards/Nano-RP2040/04-Bluetooth/ble_blinky.py b/scripts/examples/10-Arduino-Boards/Nano-RP2040/04-Bluetooth/ble_blinky.py index d03306d92..bdc174b57 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-RP2040/04-Bluetooth/ble_blinky.py +++ b/scripts/examples/10-Arduino-Boards/Nano-RP2040/04-Bluetooth/ble_blinky.py @@ -3,8 +3,6 @@ # Use nRFConnect app from the App store, connect to the Nano and write 1/0 to control the LED. import bluetooth -import random -import struct import time from ble_advertising import advertising_payload from machine import Pin @@ -23,7 +21,11 @@ _FLAG_INDICATE = const(0x0020) _SERVICE_UUID = bluetooth.UUID(0x1523) _LED_CHAR_UUID = (bluetooth.UUID(0x1525), _FLAG_WRITE) -_LED_SERVICE = (_SERVICE_UUID, (_LED_CHAR_UUID,),) +_LED_SERVICE = ( + _SERVICE_UUID, + (_LED_CHAR_UUID,), +) + class BLETemperature: def __init__(self, ble, name="NANO RP2040"): @@ -47,13 +49,14 @@ class BLETemperature: self._advertise() elif event == _IRQ_GATTS_WRITE: Pin(LED_PIN, Pin.OUT).value(int(self._ble.gatts_read(data[-1])[0])) - + def _advertise(self, interval_us=500000): self._ble.gap_advertise(interval_us, adv_data=self._payload) + if __name__ == "__main__": ble = bluetooth.BLE() temp = BLETemperature(ble) - + while True: time.sleep_ms(1000) diff --git a/scripts/examples/10-Arduino-Boards/Nano-RP2040/04-Bluetooth/ble_temperature.py b/scripts/examples/10-Arduino-Boards/Nano-RP2040/04-Bluetooth/ble_temperature.py index b55fe4680..c79fbebe5 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-RP2040/04-Bluetooth/ble_temperature.py +++ b/scripts/examples/10-Arduino-Boards/Nano-RP2040/04-Bluetooth/ble_temperature.py @@ -45,7 +45,9 @@ class BLETemperature: ((self._handle,),) = self._ble.gatts_register_services((_ENV_SENSE_SERVICE,)) self._connections = set() self._payload = advertising_payload( - name=name, services=[_ENV_SENSE_UUID], appearance=_ADV_APPEARANCE_GENERIC_THERMOMETER + name=name, + services=[_ENV_SENSE_UUID], + appearance=_ADV_APPEARANCE_GENERIC_THERMOMETER, ) self._advertise() diff --git a/scripts/examples/10-Arduino-Boards/Nano-RP2040/04-Bluetooth/temp_sensor_aioble.py b/scripts/examples/10-Arduino-Boards/Nano-RP2040/04-Bluetooth/temp_sensor_aioble.py index 536b603f8..cadbf0060 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-RP2040/04-Bluetooth/temp_sensor_aioble.py +++ b/scripts/examples/10-Arduino-Boards/Nano-RP2040/04-Bluetooth/temp_sensor_aioble.py @@ -1,7 +1,3 @@ -import sys - -sys.path.append("") - from micropython import const import uasyncio as asyncio @@ -63,4 +59,5 @@ async def main(): t2 = asyncio.create_task(peripheral_task()) await asyncio.gather(t1, t2) + asyncio.run(main()) diff --git a/scripts/examples/10-Arduino-Boards/Nano-RP2040/05-Thermal/thermal_camera.py b/scripts/examples/10-Arduino-Boards/Nano-RP2040/05-Thermal/thermal_camera.py index 968c39b84..2b38d9aec 100644 --- a/scripts/examples/10-Arduino-Boards/Nano-RP2040/05-Thermal/thermal_camera.py +++ b/scripts/examples/10-Arduino-Boards/Nano-RP2040/05-Thermal/thermal_camera.py @@ -7,24 +7,28 @@ import image import time import fir -IMAGE_SCALE = 5 # Higher scaling uses more memory. -drawing_hint = image.BICUBIC # or image.BILINEAR or 0 (nearest neighbor) +IMAGE_SCALE = 5 # Higher scaling uses more memory. +drawing_hint = image.BICUBIC # or image.BILINEAR or 0 (nearest neighbor) # Initialize the thermal sensor -fir.init() #Auto-detects the connected sensor. -w = fir.width() * IMAGE_SCALE +fir.init() # Auto-detects the connected sensor. +w = fir.width() * IMAGE_SCALE h = fir.height() * IMAGE_SCALE # FPS clock clock = time.clock() -while (True): +while True: clock.tick() try: - img = fir.snapshot(x_size=w, y_size=h, - color_palette=fir.PALETTE_IRONBOW, hint=drawing_hint, - copy_to_fb=True) + img = fir.snapshot( + x_size=w, + y_size=h, + color_palette=fir.PALETTE_IRONBOW, + hint=drawing_hint, + copy_to_fb=True, + ) except OSError: continue diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/adc_read_ext_channel.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/adc_read_ext_channel.py index 20fec14b3..4d49c9472 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/adc_read_ext_channel.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/adc_read_ext_channel.py @@ -7,7 +7,7 @@ from pyb import ADC adc = ADC("A0") -while(True): +while True: # The ADC has 12-bits of resolution for 4096 values. print("ADC = %fv" % ((adc.read() * 3.3) / 4095)) time.sleep_ms(100) diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/adc_read_int_channel.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/adc_read_int_channel.py index 446d86a0e..50fdb4b12 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/adc_read_int_channel.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/adc_read_int_channel.py @@ -2,8 +2,10 @@ # # This example shows how to read internal ADC channels. -import time import pyb -adc = pyb.ADCAll(12) -print("VREF = %.1fv VBAT = %.1fv Temp = %d" % (adc.read_core_vref(), adc.read_core_vbat(), adc.read_core_temp())) +adc = pyb.ADCAll(12) +print( + "VREF = %.1fv VBAT = %.1fv Temp = %d" + % (adc.read_core_vref(), adc.read_core_vbat(), adc.read_core_temp()) +) diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/blinky.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/blinky.py index 495387562..4d3dc2a7d 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/blinky.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/blinky.py @@ -7,7 +7,7 @@ from machine import Pin # other than the RGB LED connected to Nina WiFi module. led = Pin("LED_BLUE", Pin.OUT) -while (True): +while True: led.on() time.sleep_ms(250) led.off() diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/can.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/can.py index c5048dc38..f5ab81a55 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/can.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/can.py @@ -12,18 +12,18 @@ TRANSMITTER = True can = CAN(1, CAN.NORMAL, baudrate=125_000, sample_point=75) # NOTE: uncomment to set bit timing manually, for example: -#can.init(CAN.NORMAL, prescaler=32, sjw=1, bs1=8, bs2=3) +# can.init(CAN.NORMAL, prescaler=32, sjw=1, bs1=8, bs2=3) can.restart() -if (TRANSMITTER): - while (True): +if TRANSMITTER: + while True: # Send message with id 1 - can.send('Hello', 1) + can.send("Hello", 1) time.sleep_ms(1000) else: # Runs on the receiving node. - if (omv.board_type() == 'H7'): # FDCAN + if omv.board_type() == "H7": # FDCAN # Set a filter to receive messages with id=1 -> 4 # Filter index, mode (RANGE, DUAL or MASK), FIFO (0 or 1), params can.setfilter(0, CAN.RANGE, 0, (1, 4)) @@ -32,6 +32,6 @@ else: # Filter index, mode (LIST16, etc..), FIFO (0 or 1), params can.setfilter(0, CAN.LIST16, 0, (1, 2, 3, 4)) - while (True): + while True: # Receive messages on FIFO 0 print(can.recv(0, timeout=10000)) diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/cpufreq_scaling.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/cpufreq_scaling.py index e707e7618..b2a650492 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/cpufreq_scaling.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/cpufreq_scaling.py @@ -6,22 +6,27 @@ import image import time import cpufreq -sensor.reset() # Reset and initialize the sensor. +sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -clock = time.clock() # Create a clock object to track the FPS. +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +clock = time.clock() # Create a clock object to track the FPS. + def test_image_processing(): for i in range(0, 50): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. img.find_edges(image.EDGE_CANNY, threshold=(50, 80)) + print("\nFrequency Scaling Test...") for f in cpufreq.get_supported_frequencies(): - print("Testing CPU Freq: %dMHz..." %(f)) + print("Testing CPU Freq: %dMHz..." % (f)) cpufreq.set_frequency(f) clock.reset() test_image_processing() freqs = cpufreq.get_current_frequencies() - print("CPU Freq:%dMHz HCLK:%dMhz PCLK1:%dMhz PCLK2:%dMhz FPS:%.2f" %(freqs[0], freqs[1], freqs[2], freqs[3], clock.fps())) + print( + "CPU Freq:%dMHz HCLK:%dMhz PCLK1:%dMhz PCLK2:%dMhz FPS:%.2f" + % (freqs[0], freqs[1], freqs[2], freqs[3], clock.fps()) + ) diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/i2c_control.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/i2c_control.py index 2d0ca10d9..4307a9f07 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/i2c_control.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/i2c_control.py @@ -7,13 +7,14 @@ from pyb import I2C i2c = I2C(1, I2C.MASTER) -mem = i2c.mem_read(256, 0x50, 0) # The eeprom slave address is 0x50. +mem = i2c.mem_read(256, 0x50, 0) # The eeprom slave address is 0x50. print("\n[") for i in range(16): - print("\t[", end='') + print("\t[", end="") for j in range(16): - print("%03d" % mem[(i*16)+j], end='') - if j != 15: print(", ", end='') + print("%03d" % mem[(i * 16) + j], end="") + if j != 15: + print(", ", end="") print("]," if i != 15 else "]") print("]") diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/led_control.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/led_control.py index 81a25b302..49b97f5be 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/led_control.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/led_control.py @@ -4,19 +4,27 @@ import time from pyb import LED -red_led = LED(1) +red_led = LED(1) green_led = LED(2) -blue_led = LED(3) +blue_led = LED(3) + def led_control(x): - if (x&1)==0: red_led.off() - elif (x&1)==1: red_led.on() - if (x&2)==0: green_led.off() - elif (x&2)==2: green_led.on() - if (x&4)==0: blue_led.off() - elif (x&4)==4: blue_led.on() + if (x & 1) == 0: + red_led.off() + elif (x & 1) == 1: + red_led.on() + if (x & 2) == 0: + green_led.off() + elif (x & 2) == 2: + green_led.on() + if (x & 4) == 0: + blue_led.off() + elif (x & 4) == 4: + blue_led.on() -while(True): + +while True: for i in range(16): led_control(i) time.sleep_ms(500) diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/pin_control.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/pin_control.py index 91a241b89..af838f134 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/pin_control.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/pin_control.py @@ -6,8 +6,8 @@ from pyb import Pin # Connect a switch to pin 0 that will pull it low when the switch is closed. # Pin 1 will then light up. -pin0 = Pin('GPIO1', Pin.IN, Pin.PULL_UP) -pin1 = Pin('GPIO2', Pin.OUT_PP, Pin.PULL_NONE) +pin0 = Pin("GPIO1", Pin.IN, Pin.PULL_UP) +pin1 = Pin("GPIO2", Pin.OUT_PP, Pin.PULL_NONE) -while(True): +while True: pin1.value(not pin0.value()) diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/pwm_control.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/pwm_control.py index 8c67c0d42..ff9c1d7c3 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/pwm_control.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/pwm_control.py @@ -5,26 +5,28 @@ import time from pyb import Pin, Timer -class PWM(): + +class PWM: def __init__(self, pin, tim, ch): self.pin = pin self.tim = tim - self.ch = ch; + self.ch = ch + pwms = { - 'PWM1' : PWM('PE12', 1, 1), - 'PWM2' : PWM('PE11', 1, 2), -# 'PWM3' : PWM('PA9', 1, 2), - 'PWM3' : PWM('PA10', 1, 3), - 'PWM4' : PWM('PE14', 1, 4), - 'PWM5' : PWM('PB8', 4, 3), - 'PWM6' : PWM('PB9', 4, 4), + "PWM1": PWM("PE12", 1, 1), + "PWM2": PWM("PE11", 1, 2), + # 'PWM3' : PWM('PA9', 1, 2), + "PWM3": PWM("PA10", 1, 3), + "PWM4": PWM("PE14", 1, 4), + "PWM5": PWM("PB8", 4, 3), + "PWM6": PWM("PB9", 4, 4), } # Generate a 1KHz square wave with 50% cycle on the following PWM. for k, pwm in pwms.items(): - tim = Timer(pwm.tim, freq=1000) # Frequency in Hz - ch = tim.channel(pwm.ch, Timer.PWM, pin=Pin(pwm.pin), pulse_width_percent=50) + tim = Timer(pwm.tim, freq=1000) # Frequency in Hz + ch = tim.channel(pwm.ch, Timer.PWM, pin=Pin(pwm.pin), pulse_width_percent=50) -while (True): +while True: time.sleep_ms(1000) diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/rtc.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/rtc.py index cfd2bbefc..f82bec7e7 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/rtc.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/rtc.py @@ -7,6 +7,6 @@ from pyb import RTC rtc = RTC() rtc.datetime((2013, 7, 9, 2, 0, 0, 0, 0)) -while (True): +while True: print(rtc.datetime()) time.sleep_ms(1000) diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/spi_control.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/spi_control.py index e37b1e520..b370dbb49 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/spi_control.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/spi_control.py @@ -7,13 +7,14 @@ import sensor import time from pyb import Pin, SPI -cs = Pin("GPIO1", Pin.OUT_OD) +cs = Pin("GPIO1", Pin.OUT_OD) rst = Pin("GPIO2", Pin.OUT_PP) -rs = Pin("GPIO3", Pin.OUT_PP) +rs = Pin("GPIO3", Pin.OUT_PP) # NOTE: The SPI clock frequency will not always be the requested frequency. The hardware only supports # frequencies that are the bus frequency divided by a prescaler (which can be 2, 4, 8, 16, 32, 64, 128 or 256). -spi = SPI(4, SPI.MASTER, baudrate=int(1000000000/66), polarity=0, phase=0) +spi = SPI(4, SPI.MASTER, baudrate=int(1000000000 / 66), polarity=0, phase=0) + def write_command_byte(c): cs.low() @@ -21,16 +22,20 @@ def write_command_byte(c): spi.send(c) cs.high() + def write_data_byte(c): cs.low() rs.high() spi.send(c) cs.high() + def write_command(c, *data): write_command_byte(c) if data: - for d in data: write_data_byte(d) + for d in data: + write_data_byte(d) + def write_image(img): cs.low() @@ -38,13 +43,14 @@ def write_image(img): spi.send(img) cs.high() + # Reset the LCD. rst.low() time.sleep_ms(100) rst.high() time.sleep_ms(100) -write_command(0x11) # Sleep Exit +write_command(0x11) # Sleep Exit time.sleep_ms(120) # Memory Data Access Control @@ -57,18 +63,18 @@ write_command(0x3A, 0x05) # Display On write_command(0x29) -sensor.reset() # Initialize the camera sensor. +sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QQVGA2) -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.skip_frames(time=2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. - write_command(0x2C) # Write image command... + write_command(0x2C) # Write image command... write_image(img) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/timer_control.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/timer_control.py index 5ccdbe99f..5954c2f0f 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/timer_control.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/timer_control.py @@ -3,17 +3,20 @@ # This example shows how to use a timer for callbacks. import time -from pyb import Pin, Timer, LED +from pyb import LED +from pyb import Timer + +blue_led = LED(3) -blue_led = LED(3) # we will receive the timer object when being called # Note: functions that allocate memory are Not allowed in callbacks -def tick(timer): +def tick(timer): blue_led.toggle() - -tim = Timer(2, freq=1) # create a timer object using timer 2 - trigger at 1Hz -tim.callback(tick) # set the callback to our tick function -while (True): + +tim = Timer(2, freq=1) # create a timer object using timer 2 - trigger at 1Hz +tim.callback(tick) # set the callback to our tick function + +while True: time.sleep_ms(1000) diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/uart_control.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/uart_control.py index 7eff664f0..d8ec6b2f2 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/uart_control.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/uart_control.py @@ -8,6 +8,6 @@ from pyb import UART # Init UART object. uart = UART(4, 19200) -while(True): +while True: uart.write("Hello World!\r") time.sleep_ms(1000) diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/usb_hid.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/usb_hid.py index 54d2627a6..f1b77aba5 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/usb_hid.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/usb_hid.py @@ -7,10 +7,10 @@ # # Add the following script to boot.py: # -##import pyb #(UNCOMMENT THIS LINE!) -##pyb.usb_mode('VCP+HID') # serial device + mouse (UNCOMMENT THIS LINE!) -##pyb.usb_mode('VCP+MSC') # serial device + storage device (default) -##pyb.usb_mode('VCP+HID', hid=pyb.hid_keyboard) # serial device + keyboard +# import pyb +# pyb.usb_mode('VCP+HID') # serial device + mouse (UNCOMMENT THIS LINE!) +# pyb.usb_mode('VCP+MSC') # serial device + storage device (default) +# pyb.usb_mode('VCP+HID', hid=pyb.hid_keyboard) # serial device + keyboard # # Copy boot.py to the root of the uSD card and restart the camera, it should now # act as a serial device and a mouse. @@ -25,7 +25,7 @@ import time hid = pyb.USB_HID() -while(True): +while True: # x, y and scroll # move 10 pixels to the right hid.send((0, 10, 0, 0)) diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/usb_vcp.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/usb_vcp.py index 47a0aeba3..7c1c4fa53 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/usb_vcp.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/00-Board-Control/usb_vcp.py @@ -16,24 +16,23 @@ # size = struct.unpack('8.3f} y:{:>8.3f} z:{:>8.3f}'.format(*lsm.read_accel())) - print('Gyroscope: x:{:>8.3f} y:{:>8.3f} z:{:>8.3f}'.format(*lsm.read_gyro())) +while True: + print("Accelerometer: x:{:>8.3f} y:{:>8.3f} z:{:>8.3f}".format(*lsm.read_accel())) + print("Gyroscope: x:{:>8.3f} y:{:>8.3f} z:{:>8.3f}".format(*lsm.read_gyro())) print("") time.sleep_ms(100) diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/01-Sensors/lsm6dsox_mlc.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/01-Sensors/lsm6dsox_mlc.py index 96110acb9..46995b2b3 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/01-Sensors/lsm6dsox_mlc.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/01-Sensors/lsm6dsox_mlc.py @@ -4,43 +4,52 @@ # NOTE: The pre-trained models (UCF files) for the examples can be found here: # https://github.com/STMicroelectronics/STMems_Machine_Learning_Core/tree/master/application_examples/lsm6dsox -import time +from machine import Pin +from machine import SPI from lsm6dsox import LSM6DSOX -from machine import I2C, SPI, Pin -INT_MODE = True # Run in interrupt mode. -INT_FLAG = False # Set True on interrupt. +INT_MODE = True # Run in interrupt mode. +INT_FLAG = False # Set True on interrupt. + def imu_int_handler(pin): global INT_FLAG INT_FLAG = True -if (INT_MODE == True): - int_pin = Pin('PA1', mode=Pin.IN, pull=Pin.PULL_UP) + +if INT_MODE is True: + int_pin = Pin("PA1", mode=Pin.IN, pull=Pin.PULL_UP) int_pin.irq(handler=imu_int_handler, trigger=Pin.IRQ_RISING) # Vibration detection example UCF_FILE = "lsm6dsox_vibration_monitoring.ucf" -UCF_LABELS = {0:"no vibration", 1:"low vibration", 2:"high vibration"} +UCF_LABELS = {0: "no vibration", 1: "low vibration", 2: "high vibration"} # NOTE: Selected data rate and scale must match the MLC data rate and scale. -lsm = LSM6DSOX(SPI(5), cs_pin=Pin("PF6", Pin.OUT_PP, Pin.PULL_UP), - gyro_odr=26, accel_odr=26, gyro_scale=2000, accel_scale=4, ucf=UCF_FILE) +lsm = LSM6DSOX( + SPI(5), + cs_pin=Pin("PF6", Pin.OUT_PP, Pin.PULL_UP), + gyro_odr=26, + accel_odr=26, + gyro_scale=2000, + accel_scale=4, + ucf=UCF_FILE, +) # Head gestures example -#UCF_FILE = "lsm6dsox_head_gestures.ucf" -#UCF_LABELS = {0:"Nod", 1:"Shake", 2:"Stationary", 3:"Swing", 4:"Walk"} +# UCF_FILE = "lsm6dsox_head_gestures.ucf" +# UCF_LABELS = {0:"Nod", 1:"Shake", 2:"Stationary", 3:"Swing", 4:"Walk"} # NOTE: Selected data rate and scale must match the MLC data rate and scale. -#lsm = LSM6DSOX(SPI(5), cs_pin=Pin("PF6", Pin.OUT_PP, Pin.PULL_UP), +# lsm = LSM6DSOX(SPI(5), cs_pin=Pin("PF6", Pin.OUT_PP, Pin.PULL_UP), # gyro_odr=26, accel_odr=26, gyro_scale=250, accel_scale=2, ucf=UCF_FILE) print("MLC configured...") -while (True): - if (INT_MODE): - if (INT_FLAG): - INT_FLAG=False +while True: + if INT_MODE: + if INT_FLAG: + INT_FLAG = False print(UCF_LABELS[lsm.read_mlc_output()[0]]) else: buf = lsm.read_mlc_output() - if (buf != None): + if buf is not None: print(UCF_LABELS[buf[0]]) diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/01-Sensors/vl53l1x_tof.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/01-Sensors/vl53l1x_tof.py index e8ef75464..6d4ec5a96 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/01-Sensors/vl53l1x_tof.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/01-Sensors/vl53l1x_tof.py @@ -8,4 +8,3 @@ tof = VL53L1X(I2C(2)) while True: print(f"Distance: {tof.read()}mm") time.sleep_ms(50) - diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/02-Audio/audio_fft.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/02-Audio/audio_fft.py index 3571701a5..50c5e4fef 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/02-Audio/audio_fft.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/02-Audio/audio_fft.py @@ -1,52 +1,63 @@ import image import audio -import time from ulab import numpy as np -from ulab import scipy as sp from ulab import utils CHANNELS = 1 -SIZE = 256//(2*CHANNELS) +SIZE = 256 // (2 * CHANNELS) raw_buf = None -fb = image.Image(SIZE+50, SIZE, image.RGB565, copy_to_fb=True) +fb = image.Image(SIZE + 50, SIZE, image.RGB565, copy_to_fb=True) audio.init(channels=CHANNELS, frequency=16000, gain_db=24, highpass=0.9883) + def audio_callback(buf): # NOTE: do Not call any function that allocates memory. global raw_buf - if (raw_buf == None): + if raw_buf is None: raw_buf = buf + # Start audio streaming audio.start_streaming(audio_callback) + def draw_fft(img, fft_buf): fft_buf = (fft_buf / max(fft_buf)) * SIZE fft_buf = np.log10(fft_buf + 1) * 20 color = (0xFF, 0x0F, 0x00) for i in range(0, SIZE): - img.draw_line(i, SIZE, i, SIZE-int(fft_buf[i]), color, 1) + img.draw_line(i, SIZE, i, SIZE - int(fft_buf[i]), color, 1) + def draw_audio_bar(img, level, offset): - blk_size = SIZE//10 + blk_size = SIZE // 10 color = (0xFF, 0x00, 0xF0) - blk_space = (blk_size//4) - for i in range(0, int(round(level/10))): - fb.draw_rectangle(SIZE+offset, SIZE - ((i+1)*blk_size) + blk_space, 20, blk_size - blk_space, color, 1, True) + blk_space = blk_size // 4 + for i in range(0, int(round(level / 10))): + fb.draw_rectangle( + SIZE + offset, + SIZE - ((i + 1) * blk_size) + blk_space, + 20, + blk_size - blk_space, + color, + 1, + True, + ) -while (True): - if (raw_buf != None): + +while True: + if raw_buf is not None: pcm_buf = np.frombuffer(raw_buf, dtype=np.int16) raw_buf = None if CHANNELS == 1: fft_buf = utils.spectrogram(pcm_buf) - l_lvl = int((np.mean(abs(pcm_buf[1::2])) / 32768)*100) + l_lvl = int((np.mean(abs(pcm_buf[1::2])) / 32768) * 100) else: fft_buf = utils.spectrogram(pcm_buf[0::2]) - l_lvl = int((np.mean(abs(pcm_buf[1::2])) / 32768)*100) - r_lvl = int((np.mean(abs(pcm_buf[0::2])) / 32768)*100) + l_lvl = int((np.mean(abs(pcm_buf[1::2])) / 32768) * 100) + r_lvl = int((np.mean(abs(pcm_buf[0::2])) / 32768) * 100) fb.clear() draw_fft(fb, fft_buf) diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/02-Audio/micro_speech.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/02-Audio/micro_speech.py index 782ace69f..0cead2c1d 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/02-Audio/micro_speech.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/02-Audio/micro_speech.py @@ -8,26 +8,29 @@ import time import tf import micro_speech import pyb -labels = ['Silence', 'Unknown', 'Yes', 'No'] -led_red = pyb.LED(1) +labels = ["Silence", "Unknown", "Yes", "No"] + +led_red = pyb.LED(1) led_green = pyb.LED(2) -model = tf.load('/model.tflite') +model = tf.load("/model.tflite") speech = micro_speech.MicroSpeech() audio.init(channels=1, frequency=16000, gain=24, highpass=0.9883) # Start audio streaming audio.start_streaming(speech.audio_callback) -while (True): +while True: # Run micro-speech without a timeout and filter detections by label index. - idx = speech.listen(model, timeout=0, threshold = 0.78, filter=[2, 3]) + idx = speech.listen(model, timeout=0, threshold=0.78, filter=[2, 3]) led = led_green if idx == 2 else led_red print(labels[idx]) for i in range(0, 4): - led.on(); time.sleep_ms(25) - led.off(); time.sleep_ms(25) + led.on() + time.sleep_ms(25) + led.off() + time.sleep_ms(25) # Stop streaming audio.stop_streaming() diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/connect.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/connect.py index 41b114a83..443784736 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/connect.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/connect.py @@ -4,8 +4,8 @@ import network -SSID='' # Network SSID -KEY='' # Network key +SSID = "" # Network SSID +KEY = "" # Network key # Init wlan module and connect to network wlan = network.WLAN(network.STA_IF) @@ -13,7 +13,7 @@ wlan.active(True) wlan.connect(SSID, KEY) while not wlan.isconnected(): - print("Trying to connect to \"{:s}\"...".format(SSID)) + print('Trying to connect to "{:s}"...'.format(SSID)) time.sleep_ms(1000) # We should have a valid IP now via DHCP diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/dns.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/dns.py index 6ae7b68ac..13d1c2864 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/dns.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/dns.py @@ -5,8 +5,8 @@ import network import usocket -SSID='' # Network SSID -KEY='' # Network key +SSID = "" # Network SSID +KEY = "" # Network key # Init wlan module and connect to network wlan = network.WLAN(network.STA_IF) @@ -14,7 +14,7 @@ wlan.active(True) wlan.connect(SSID, KEY) while not wlan.isconnected(): - print("Trying to connect to \"{:s}\"...".format(SSID)) + print('Trying to connect to "{:s}"...'.format(SSID)) time.sleep_ms(1000) # We should have a valid IP now via DHCP diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/http_client.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/http_client.py index 56861fac4..63986d875 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/http_client.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/http_client.py @@ -4,8 +4,8 @@ import network import socket # AP info -SSID='' # Network SSID -KEY='' # Network key +SSID = "" # Network SSID +KEY = "" # Network key PORT = 80 HOST = "www.google.com" @@ -16,7 +16,7 @@ wlan.active(True) wlan.connect(SSID, KEY) while not wlan.isconnected(): - print("Trying to connect to \"{:s}\"...".format(SSID)) + print('Trying to connect to "{:s}"...'.format(SSID)) time.sleep_ms(1000) # We should have a valid IP now via DHCP @@ -34,7 +34,7 @@ client.connect(addr) client.settimeout(3.0) # Send HTTP request and recv response -client.send("GET / HTTP/1.1\r\nHost: %s\r\n\r\n"%(HOST)) +client.send("GET / HTTP/1.1\r\nHost: %s\r\n\r\n" % (HOST)) print(client.recv(1024)) # Close socket diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/http_client_ssl.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/http_client_ssl.py index bc4d0ad58..74287c961 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/http_client_ssl.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/http_client_ssl.py @@ -4,8 +4,8 @@ import socket import ussl # AP info -SSID='' # Network SSID -KEY='' # Network key +SSID = "" # Network SSID +KEY = "" # Network key PORT = 443 HOST = "www.google.com" @@ -16,7 +16,7 @@ wlan.active(True) wlan.connect(SSID, KEY) while not wlan.isconnected(): - print("Trying to connect to \"{:s}\"...".format(SSID)) + print('Trying to connect to "{:s}"...'.format(SSID)) time.sleep_ms(1000) # We should have a valid IP now via DHCP @@ -42,7 +42,7 @@ request += "HOST: %s\r\n" request += "User-Agent: Mozilla/5.0\r\n" request += "Connection: keep-alive\r\n\r\n" # Add more headers if needed. -client.write(request%(HOST)+"\r\n") +client.write(request % (HOST) + "\r\n") response = client.read(1024) for l in response.split(b"\r\n"): diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/mjpeg_streamer.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/mjpeg_streamer.py index 5a42655fc..5b0344cc5 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/mjpeg_streamer.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/mjpeg_streamer.py @@ -7,11 +7,10 @@ import sensor import time import network import socket -import sys -SSID='' # Network SSID -KEY='' # Network key -HOST ='' # Use first available interface +SSID = "" # Network SSID +KEY = "" # Network key +HOST = "" # Use first available interface PORT = 8080 # Arbitrary non-privileged port # Init sensor @@ -25,7 +24,7 @@ wlan.active(True) wlan.connect(SSID, KEY) while not wlan.isconnected(): - print("Trying to connect to \"{:s}\"...".format(SSID)) + print('Trying to connect to "{:s}"...'.format(SSID)) time.sleep_ms(1000) # We should have a valid IP now via DHCP @@ -42,43 +41,49 @@ s.listen(5) # Set server socket to blocking s.setblocking(True) + def start_streaming(s): - print ('Waiting for connections..') + print("Waiting for connections..") client, addr = s.accept() # set client socket timeout to 5s client.settimeout(5.0) - print ('Connected to ' + addr[0] + ':' + str(addr[1])) + print("Connected to " + addr[0] + ":" + str(addr[1])) # Read request from client data = client.recv(1024) # Should parse client request here # Send multipart header - client.sendall("HTTP/1.1 200 OK\r\n" \ - "Server: OpenMV\r\n" \ - "Content-Type: multipart/x-mixed-replace;boundary=openmv\r\n" \ - "Cache-Control: no-cache\r\n" \ - "Pragma: no-cache\r\n\r\n") + client.sendall( + "HTTP/1.1 200 OK\r\n" + "Server: OpenMV\r\n" + "Content-Type: multipart/x-mixed-replace;boundary=openmv\r\n" + "Cache-Control: no-cache\r\n" + "Pragma: no-cache\r\n\r\n" + ) # FPS clock clock = time.clock() # Start streaming images # NOTE: Disable IDE preview to increase streaming FPS. - while (True): - clock.tick() # Track elapsed milliseconds between snapshots(). + while True: + clock.tick() # Track elapsed milliseconds between snapshots(). frame = sensor.snapshot() cframe = frame.compressed(quality=35) - header = "\r\n--openmv\r\n" \ - "Content-Type: image/jpeg\r\n"\ - "Content-Length:"+str(cframe.size())+"\r\n\r\n" + header = ( + "\r\n--openmv\r\n" + "Content-Type: image/jpeg\r\n" + "Content-Length:" + str(cframe.size()) + "\r\n\r\n" + ) client.sendall(header) client.sendall(cframe) print(clock.fps()) -while (True): + +while True: try: start_streaming(s) except OSError as e: print("socket error: ", e) - #sys.print_exception(e) + # sys.print_exception(e) diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/mqtt_pub.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/mqtt_pub.py index fb0652d54..b32ff0dce 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/mqtt_pub.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/mqtt_pub.py @@ -11,8 +11,8 @@ import time import network from mqtt import MQTTClient -SSID='' # Network SSID -KEY='' # Network key +SSID = "" # Network SSID +KEY = "" # Network key # Init wlan module and connect to network wlan = network.WLAN(network.STA_IF) @@ -20,7 +20,7 @@ wlan.active(True) wlan.connect(SSID, KEY) while not wlan.isconnected(): - print("Trying to connect to \"{:s}\"...".format(SSID)) + print('Trying to connect to "{:s}"...'.format(SSID)) time.sleep_ms(1000) # We should have a valid IP now via DHCP @@ -29,6 +29,6 @@ print("WiFi Connected ", wlan.ifconfig()) client = MQTTClient("openmv", "test.mosquitto.org", port=1883) client.connect() -while (True): +while True: client.publish("openmv/test", "Hello World!") time.sleep_ms(1000) diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/mqtt_sub.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/mqtt_sub.py index d8bf96b36..4fb3645c6 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/mqtt_sub.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/mqtt_sub.py @@ -11,8 +11,8 @@ import time import network from mqtt import MQTTClient -SSID='' # Network SSID -KEY='' # Network key +SSID = "" # Network SSID +KEY = "" # Network key # Init wlan module and connect to network wlan = network.WLAN(network.STA_IF) @@ -20,7 +20,7 @@ wlan.active(True) wlan.connect(SSID, KEY) while not wlan.isconnected(): - print("Trying to connect to \"{:s}\"...".format(SSID)) + print('Trying to connect to "{:s}"...'.format(SSID)) time.sleep_ms(1000) # We should have a valid IP now via DHCP @@ -29,13 +29,15 @@ print("WiFi Connected ", wlan.ifconfig()) client = MQTTClient("openmv", "test.mosquitto.org", port=1883) client.connect() + def callback(topic, msg): print(topic, msg) + # must set callback first client.set_callback(callback) client.subscribe("openmv/test") -while (True): - client.check_msg() # poll for messages. +while True: + client.check_msg() # poll for messages. time.sleep_ms(1000) diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/ntp.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/ntp.py index a8f42123d..65edea931 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/ntp.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/ntp.py @@ -7,10 +7,10 @@ import socket import ustruct import utime -SSID='' # Network SSID -KEY='' # Network key +SSID = "" # Network SSID +KEY = "" # Network key -TIMESTAMP = 2208988800+946684800 +TIMESTAMP = 2208988800 + 946684800 # Init wlan module and connect to network print("Trying to connect... (This may take a while)...") @@ -19,7 +19,7 @@ wlan.active(True) wlan.connect(SSID, KEY) while not wlan.isconnected(): - print("Trying to connect to \"{:s}\"...".format(SSID)) + print('Trying to connect to "{:s}"...'.format(SSID)) time.sleep_ms(1000) # We should have a valid IP now via DHCP @@ -32,9 +32,9 @@ client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) addr = socket.getaddrinfo("pool.ntp.org", 123)[0][4] # Send query -client.sendto('\x1b' + 47 * '\0', addr) +client.sendto("\x1b" + 47 * "\0", addr) data, address = client.recvfrom(1024) # Print time t = ustruct.unpack(">IIIIIIIIIIII", data)[10] - TIMESTAMP -print ("Year:%d Month:%d Day:%d Time: %d:%d:%d" % (utime.localtime(t)[0:6])) +print("Year:%d Month:%d Day:%d Time: %d:%d:%d" % (utime.localtime(t)[0:6])) diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/scan.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/scan.py index b02249620..a566be764 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/scan.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/scan.py @@ -9,10 +9,12 @@ wlan = network.WLAN(network.STA_IF) wlan.active(True) print("Scanning...") -while (True): +while True: scan_result = wlan.scan() for ap in scan_result: - print("SSID: %s BSSID: %s Channel: %d RSSI: %d Auth: %d" - %(ap[0], ":".join(["%X"%i for i in ap[1]]), ap[2], ap[3], ap[4])) + print( + "SSID: %s BSSID: %s Channel: %d RSSI: %d Auth: %d" + % (ap[0], ":".join(["%X" % i for i in ap[1]]), ap[2], ap[3], ap[4]) + ) print() time.sleep_ms(1000) diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/static_ip.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/static_ip.py index 775e05960..20730398a 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/static_ip.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/static_ip.py @@ -7,21 +7,21 @@ import socket import ustruct import utime -SSID='' # Network SSID -KEY='' # Network key +SSID = "" # Network SSID +KEY = "" # Network key -TIMESTAMP = 2208988800+946684800 +TIMESTAMP = 2208988800 + 946684800 # Init wlan module and connect to network wlan = network.WLAN(network.STA_IF) wlan.active(True) # ifconfig must be called before connect() -wlan.ifconfig(('192.168.1.200', '255.255.255.0', '192.168.1.1', '192.168.1.1')) +wlan.ifconfig(("192.168.1.200", "255.255.255.0", "192.168.1.1", "192.168.1.1")) wlan.connect(SSID, KEY) while not wlan.isconnected(): - print("Trying to connect to \"{:s}\"...".format(SSID)) + print('Trying to connect to "{:s}"...'.format(SSID)) time.sleep_ms(1000) # Create new socket @@ -31,9 +31,9 @@ client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) addr = socket.getaddrinfo("pool.ntp.org", 123)[0][4] # Send query -client.sendto('\x1b' + 47 * '\0', addr) +client.sendto("\x1b" + 47 * "\0", addr) data, address = client.recvfrom(1024) # Print time t = ustruct.unpack(">IIIIIIIIIIII", data)[10] - TIMESTAMP -print ("Year:%d Month:%d Day:%d Time: %d:%d:%d" % (utime.localtime(t)[0:6])) +print("Year:%d Month:%d Day:%d Time: %d:%d:%d" % (utime.localtime(t)[0:6])) diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/04-Bluetooth/ble_temperature.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/04-Bluetooth/ble_temperature.py index 7a48d0c91..90d0b3372 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/04-Bluetooth/ble_temperature.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/04-Bluetooth/ble_temperature.py @@ -43,7 +43,9 @@ class BLETemperature: ((self._handle,),) = self._ble.gatts_register_services((_ENV_SENSE_SERVICE,)) self._connections = set() self._payload = advertising_payload( - name=name, services=[_ENV_SENSE_UUID], appearance=_ADV_APPEARANCE_GENERIC_THERMOMETER + name=name, + services=[_ENV_SENSE_UUID], + appearance=_ADV_APPEARANCE_GENERIC_THERMOMETER, ) self._advertise() diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/05-Low-Power/extint_wakeup.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/05-Low-Power/extint_wakeup.py index e27bde794..b8fca6386 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/05-Low-Power/extint_wakeup.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/05-Low-Power/extint_wakeup.py @@ -6,9 +6,11 @@ import pyb import machine from pyb import Pin, ExtInt + def callback(line): pass + led = pyb.LED(3) pin = Pin("PG12", Pin.IN, Pin.PULL_UP) ext = ExtInt(pin, ExtInt.IRQ_FALLING, Pin.PULL_UP, callback) @@ -16,7 +18,7 @@ ext = ExtInt(pin, ExtInt.IRQ_FALLING, Pin.PULL_UP, callback) # Enter Stop Mode. Note the IDE will disconnect. machine.sleep() -while (True): +while True: led.on() time.sleep_ms(100) led.off() diff --git a/scripts/examples/10-Arduino-Boards/Nicla-Vision/05-Low-Power/stop_mode.py b/scripts/examples/10-Arduino-Boards/Nicla-Vision/05-Low-Power/stop_mode.py index e51b36b60..40b67a100 100644 --- a/scripts/examples/10-Arduino-Boards/Nicla-Vision/05-Low-Power/stop_mode.py +++ b/scripts/examples/10-Arduino-Boards/Nicla-Vision/05-Low-Power/stop_mode.py @@ -1,7 +1,6 @@ # Stop Mode Example # This example demonstrates using the low-power Stop Mode. -import time import pyb import machine diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/adc_read_ext_channel.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/adc_read_ext_channel.py index 8903afbbf..d487899c4 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/adc_read_ext_channel.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/adc_read_ext_channel.py @@ -5,9 +5,9 @@ import time from pyb import ADC -adc = ADC("P6") # Must always be "P6". +adc = ADC("P6") # Must always be "P6". -while(True): +while True: # The ADC has 12-bits of resolution for 4096 values. print("ADC = %fv" % ((adc.read() * 3.3) / 4095)) time.sleep_ms(100) diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/adc_read_int_channel.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/adc_read_int_channel.py index 276a03cbb..0bf6fdae3 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/adc_read_int_channel.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/adc_read_int_channel.py @@ -2,8 +2,10 @@ # # This example shows how to read internal ADC channels. -import time import pyb -adc = pyb.ADCAll(12) -print("VREF = %.1fv VREF = %.1fv Temp = %d" % (adc.read_core_vref(), adc.read_core_vbat(), adc.read_core_temp())) +adc = pyb.ADCAll(12) +print( + "VREF = %.1fv VREF = %.1fv Temp = %d" + % (adc.read_core_vref(), adc.read_core_vbat(), adc.read_core_temp()) +) diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/blinky.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/blinky.py index 495387562..4d3dc2a7d 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/blinky.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/blinky.py @@ -7,7 +7,7 @@ from machine import Pin # other than the RGB LED connected to Nina WiFi module. led = Pin("LED_BLUE", Pin.OUT) -while (True): +while True: led.on() time.sleep_ms(250) led.off() diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/can.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/can.py index d24b866e8..e00bc1462 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/can.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/can.py @@ -12,18 +12,18 @@ TRANSMITTER = True can = CAN(2, CAN.NORMAL, baudrate=125_000, sample_point=75) # NOTE: uncomment to set bit timing manually, for example: -#can.init(CAN.NORMAL, prescaler=32, sjw=1, bs1=8, bs2=3) +# can.init(CAN.NORMAL, prescaler=32, sjw=1, bs1=8, bs2=3) can.restart() -if (TRANSMITTER): - while (True): +if TRANSMITTER: + while True: # Send message with id 1 - can.send('Hello', 1) + can.send("Hello", 1) time.sleep_ms(1000) else: # Runs on the receiving node. - if (omv.board_type() == 'H7'): # FDCAN + if omv.board_type() == "H7": # FDCAN # Set a filter to receive messages with id=1 -> 4 # Filter index, mode (RANGE, DUAL or MASK), FIFO (0 or 1), params can.setfilter(0, CAN.RANGE, 0, (1, 4)) @@ -32,6 +32,6 @@ else: # Filter index, mode (LIST16, etc..), FIFO (0 or 1), params can.setfilter(0, CAN.LIST16, 0, (1, 2, 3, 4)) - while (True): + while True: # Receive messages on FIFO 0 print(can.recv(0, timeout=10000)) diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/cpufreq_scaling.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/cpufreq_scaling.py index e707e7618..b2a650492 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/cpufreq_scaling.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/cpufreq_scaling.py @@ -6,22 +6,27 @@ import image import time import cpufreq -sensor.reset() # Reset and initialize the sensor. +sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -clock = time.clock() # Create a clock object to track the FPS. +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +clock = time.clock() # Create a clock object to track the FPS. + def test_image_processing(): for i in range(0, 50): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. img.find_edges(image.EDGE_CANNY, threshold=(50, 80)) + print("\nFrequency Scaling Test...") for f in cpufreq.get_supported_frequencies(): - print("Testing CPU Freq: %dMHz..." %(f)) + print("Testing CPU Freq: %dMHz..." % (f)) cpufreq.set_frequency(f) clock.reset() test_image_processing() freqs = cpufreq.get_current_frequencies() - print("CPU Freq:%dMHz HCLK:%dMhz PCLK1:%dMhz PCLK2:%dMhz FPS:%.2f" %(freqs[0], freqs[1], freqs[2], freqs[3], clock.fps())) + print( + "CPU Freq:%dMHz HCLK:%dMhz PCLK1:%dMhz PCLK2:%dMhz FPS:%.2f" + % (freqs[0], freqs[1], freqs[2], freqs[3], clock.fps()) + ) diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/dac_write.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/dac_write.py index c43a13c91..0ded38433 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/dac_write.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/dac_write.py @@ -5,13 +5,13 @@ import time from pyb import DAC -dac = DAC("P6") # Must always be "P6". +dac = DAC("P6") # Must always be "P6". -while(True): +while True: # The DAC has 8-12 bits of resolution (default 8-bits). for i in range(256): dac.write(i) time.sleep_ms(20) for i in range(256): - dac.write(255-i) + dac.write(255 - i) time.sleep_ms(20) diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/i2c_control.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/i2c_control.py index a284cb2a0..dee72add6 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/i2c_control.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/i2c_control.py @@ -6,14 +6,15 @@ from pyb import I2C -i2c = I2C(2, I2C.MASTER) # The i2c bus must always be 2. -mem = i2c.mem_read(256, 0x50, 0) # The eeprom slave address is 0x50. +i2c = I2C(2, I2C.MASTER) # The i2c bus must always be 2. +mem = i2c.mem_read(256, 0x50, 0) # The eeprom slave address is 0x50. print("\n[") for i in range(16): - print("\t[", end='') + print("\t[", end="") for j in range(16): - print("%03d" % mem[(i*16)+j], end='') - if j != 15: print(", ", end='') + print("%03d" % mem[(i * 16) + j], end="") + if j != 15: + print(", ", end="") print("]," if i != 15 else "]") print("]") diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/led_control.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/led_control.py index c58bfb5de..3df3eb09b 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/led_control.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/led_control.py @@ -6,22 +6,32 @@ import time from pyb import LED -red_led = LED(1) +red_led = LED(1) green_led = LED(2) -blue_led = LED(3) -ir_led = LED(4) +blue_led = LED(3) +ir_led = LED(4) + def led_control(x): - if (x&1)==0: red_led.off() - elif (x&1)==1: red_led.on() - if (x&2)==0: green_led.off() - elif (x&2)==2: green_led.on() - if (x&4)==0: blue_led.off() - elif (x&4)==4: blue_led.on() - if (x&8)==0: ir_led.off() - elif (x&8)==8: ir_led.on() + if (x & 1) == 0: + red_led.off() + elif (x & 1) == 1: + red_led.on() + if (x & 2) == 0: + green_led.off() + elif (x & 2) == 2: + green_led.on() + if (x & 4) == 0: + blue_led.off() + elif (x & 4) == 4: + blue_led.on() + if (x & 8) == 0: + ir_led.off() + elif (x & 8) == 8: + ir_led.on() -while(True): + +while True: for i in range(16): led_control(i) time.sleep_ms(500) diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/pin_control.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/pin_control.py index dbcae5fcd..9e39524d7 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/pin_control.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/pin_control.py @@ -6,8 +6,8 @@ from pyb import Pin # Connect a switch to pin 0 that will pull it low when the switch is closed. # Pin 1 will then light up. -pin0 = Pin('P0', Pin.IN, Pin.PULL_UP) -pin1 = Pin('P1', Pin.OUT_PP, Pin.PULL_NONE) +pin0 = Pin("P0", Pin.IN, Pin.PULL_UP) +pin1 = Pin("P1", Pin.OUT_PP, Pin.PULL_NONE) -while(True): +while True: pin1.value(not pin0.value()) diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/pwm_control.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/pwm_control.py index c7a52ce6e..13ea8cd81 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/pwm_control.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/pwm_control.py @@ -13,26 +13,28 @@ import time from pyb import Pin, Timer -class PWM(): + +class PWM: def __init__(self, pin, tim, ch): self.pin = pin self.tim = tim - self.ch = ch; + self.ch = ch + pwms = { -# 'PWM1' : PWM('PA8', 1, 1), # TIM1 is Reserved. - 'PWM2' : PWM('PC6', 3, 1), - 'PWM3' : PWM('PC7', 3, 2), -# 'PWM4' : PWM('PG7', 0, 0), # HRTIM not supported. - 'PWM5' : PWM('PJ11', 8, 2), - 'PWM6' : PWM('PK1', 8, 3), - 'PWM7' : PWM('PH15', 8, 3), + # 'PWM1' : PWM('PA8', 1, 1), # TIM1 is Reserved. + "PWM2": PWM("PC6", 3, 1), + "PWM3": PWM("PC7", 3, 2), + # 'PWM4' : PWM('PG7', 0, 0), # HRTIM not supported. + "PWM5": PWM("PJ11", 8, 2), + "PWM6": PWM("PK1", 8, 3), + "PWM7": PWM("PH15", 8, 3), } # Generate a 1KHz square wave with 50% cycle on the following PWM. for k, pwm in pwms.items(): - tim = Timer(pwm.tim, freq=1000) # Frequency in Hz - ch = tim.channel(pwm.ch, Timer.PWM, pin=Pin(pwm.pin), pulse_width_percent=50) + tim = Timer(pwm.tim, freq=1000) # Frequency in Hz + ch = tim.channel(pwm.ch, Timer.PWM, pin=Pin(pwm.pin), pulse_width_percent=50) -while (True): +while True: time.sleep_ms(1000) diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/rtc.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/rtc.py index cfd2bbefc..f82bec7e7 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/rtc.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/rtc.py @@ -7,6 +7,6 @@ from pyb import RTC rtc = RTC() rtc.datetime((2013, 7, 9, 2, 0, 0, 0, 0)) -while (True): +while True: print(rtc.datetime()) time.sleep_ms(1000) diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/servo_control.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/servo_control.py index 809fe3f7b..095aeed36 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/servo_control.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/servo_control.py @@ -5,11 +5,11 @@ import time from pyb import Servo -s1 = Servo(1) # P7 -s2 = Servo(2) # P8 -s3 = Servo(3) # P9 +s1 = Servo(1) # P7 +s2 = Servo(2) # P8 +s3 = Servo(3) # P9 -while(True): +while True: for i in range(1000): s1.pulse_width(1000 + i) s2.pulse_width(1999 - i) diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/spi_control.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/spi_control.py index 1d368c779..cf5c732b6 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/spi_control.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/spi_control.py @@ -8,14 +8,15 @@ import sensor import time from pyb import Pin, SPI -cs = Pin("P3", Pin.OUT_OD) +cs = Pin("P3", Pin.OUT_OD) rst = Pin("P7", Pin.OUT_PP) -rs = Pin("P8", Pin.OUT_PP) +rs = Pin("P8", Pin.OUT_PP) # The hardware SPI bus for your OpenMV Cam is always SPI bus 2. # NOTE: The SPI clock frequency will not always be the requested frequency. The hardware only supports # frequencies that are the bus frequency divided by a prescaler (which can be 2, 4, 8, 16, 32, 64, 128 or 256). -spi = SPI(2, SPI.MASTER, baudrate=int(1000000000/66), polarity=0, phase=0) +spi = SPI(2, SPI.MASTER, baudrate=int(1000000000 / 66), polarity=0, phase=0) + def write_command_byte(c): cs.low() @@ -23,16 +24,20 @@ def write_command_byte(c): spi.send(c) cs.high() + def write_data_byte(c): cs.low() rs.high() spi.send(c) cs.high() + def write_command(c, *data): write_command_byte(c) if data: - for d in data: write_data_byte(d) + for d in data: + write_data_byte(d) + def write_image(img): cs.low() @@ -40,13 +45,14 @@ def write_image(img): spi.send(img) cs.high() + # Reset the LCD. rst.low() time.sleep_ms(100) rst.high() time.sleep_ms(100) -write_command(0x11) # Sleep Exit +write_command(0x11) # Sleep Exit time.sleep_ms(120) # Memory Data Access Control @@ -59,18 +65,18 @@ write_command(0x3A, 0x05) # Display On write_command(0x29) -sensor.reset() # Initialize the camera sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # must be this -sensor.set_framesize(sensor.QQVGA2) # must be this -sensor.skip_frames(time = 2000) # Let new settings take affect. -clock = time.clock() # Tracks FPS. +sensor.reset() # Initialize the camera sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # must be this +sensor.set_framesize(sensor.QQVGA2) # must be this +sensor.skip_frames(time=2000) # Let new settings take affect. +clock = time.clock() # Tracks FPS. -while(True): - clock.tick() # Track elapsed milliseconds between snapshots(). - img = sensor.snapshot() # Take a picture and return the image. +while True: + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. - write_command(0x2C) # Write image command... + write_command(0x2C) # Write image command... write_image(img) - print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while + print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected. diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/timer_control.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/timer_control.py index 5ccdbe99f..5954c2f0f 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/timer_control.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/timer_control.py @@ -3,17 +3,20 @@ # This example shows how to use a timer for callbacks. import time -from pyb import Pin, Timer, LED +from pyb import LED +from pyb import Timer + +blue_led = LED(3) -blue_led = LED(3) # we will receive the timer object when being called # Note: functions that allocate memory are Not allowed in callbacks -def tick(timer): +def tick(timer): blue_led.toggle() - -tim = Timer(2, freq=1) # create a timer object using timer 2 - trigger at 1Hz -tim.callback(tick) # set the callback to our tick function -while (True): + +tim = Timer(2, freq=1) # create a timer object using timer 2 - trigger at 1Hz +tim.callback(tick) # set the callback to our tick function + +while True: time.sleep_ms(1000) diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/timer_tests.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/timer_tests.py index deee30bec..7152c87f9 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/timer_tests.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/timer_tests.py @@ -3,20 +3,23 @@ # This example tests all the timers. import time -from pyb import Pin, Timer, LED +from pyb import LED +from pyb import Timer + +blue_led = LED(3) -blue_led = LED(3) # Note: functions that allocate memory are Not allowed in callbacks def tick(timer): blue_led.toggle() + print("") for i in range(1, 18): try: - print("Testing TIM%d... "%(i), end="") - tim = Timer(i, freq=10) # create a timer object using timer 4 - trigger at 1Hz - tim.callback(tick) # set the callback to our tick function + print("Testing TIM%d... " % (i), end="") + tim = Timer(i, freq=10) # create a timer object using timer 4 - trigger at 1Hz + tim.callback(tick) # set the callback to our tick function time.sleep_ms(1000) tim.deinit() except ValueError as e: diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/uart_control.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/uart_control.py index 54a4122e1..334ac4208 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/uart_control.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/uart_control.py @@ -12,6 +12,6 @@ from pyb import UART # example see the BLE-Shield driver. uart = UART(3, 19200) -while(True): +while True: uart.write("Hello World!\r") time.sleep_ms(1000) diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/usb_hid.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/usb_hid.py index 54d2627a6..f1b77aba5 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/usb_hid.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/usb_hid.py @@ -7,10 +7,10 @@ # # Add the following script to boot.py: # -##import pyb #(UNCOMMENT THIS LINE!) -##pyb.usb_mode('VCP+HID') # serial device + mouse (UNCOMMENT THIS LINE!) -##pyb.usb_mode('VCP+MSC') # serial device + storage device (default) -##pyb.usb_mode('VCP+HID', hid=pyb.hid_keyboard) # serial device + keyboard +# import pyb +# pyb.usb_mode('VCP+HID') # serial device + mouse (UNCOMMENT THIS LINE!) +# pyb.usb_mode('VCP+MSC') # serial device + storage device (default) +# pyb.usb_mode('VCP+HID', hid=pyb.hid_keyboard) # serial device + keyboard # # Copy boot.py to the root of the uSD card and restart the camera, it should now # act as a serial device and a mouse. @@ -25,7 +25,7 @@ import time hid = pyb.USB_HID() -while(True): +while True: # x, y and scroll # move 10 pixels to the right hid.send((0, 10, 0, 0)) diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/usb_vcp.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/usb_vcp.py index 9884e32ee..c3fcbc41e 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/usb_vcp.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/00-Board-Control/usb_vcp.py @@ -16,24 +16,23 @@ # size = struct.unpack('IIIIIIIIIIII", data)[10] - TIMESTAMP -print ("Year:%d Month:%d Day:%d Time: %d:%d:%d" % (utime.localtime(t)[0:6])) +print("Year:%d Month:%d Day:%d Time: %d:%d:%d" % (utime.localtime(t)[0:6])) diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/02-WiFi/scan.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/02-WiFi/scan.py index b02249620..a566be764 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/02-WiFi/scan.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/02-WiFi/scan.py @@ -9,10 +9,12 @@ wlan = network.WLAN(network.STA_IF) wlan.active(True) print("Scanning...") -while (True): +while True: scan_result = wlan.scan() for ap in scan_result: - print("SSID: %s BSSID: %s Channel: %d RSSI: %d Auth: %d" - %(ap[0], ":".join(["%X"%i for i in ap[1]]), ap[2], ap[3], ap[4])) + print( + "SSID: %s BSSID: %s Channel: %d RSSI: %d Auth: %d" + % (ap[0], ":".join(["%X" % i for i in ap[1]]), ap[2], ap[3], ap[4]) + ) print() time.sleep_ms(1000) diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/02-WiFi/static_ip.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/02-WiFi/static_ip.py index 775e05960..20730398a 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/02-WiFi/static_ip.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/02-WiFi/static_ip.py @@ -7,21 +7,21 @@ import socket import ustruct import utime -SSID='' # Network SSID -KEY='' # Network key +SSID = "" # Network SSID +KEY = "" # Network key -TIMESTAMP = 2208988800+946684800 +TIMESTAMP = 2208988800 + 946684800 # Init wlan module and connect to network wlan = network.WLAN(network.STA_IF) wlan.active(True) # ifconfig must be called before connect() -wlan.ifconfig(('192.168.1.200', '255.255.255.0', '192.168.1.1', '192.168.1.1')) +wlan.ifconfig(("192.168.1.200", "255.255.255.0", "192.168.1.1", "192.168.1.1")) wlan.connect(SSID, KEY) while not wlan.isconnected(): - print("Trying to connect to \"{:s}\"...".format(SSID)) + print('Trying to connect to "{:s}"...'.format(SSID)) time.sleep_ms(1000) # Create new socket @@ -31,9 +31,9 @@ client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) addr = socket.getaddrinfo("pool.ntp.org", 123)[0][4] # Send query -client.sendto('\x1b' + 47 * '\0', addr) +client.sendto("\x1b" + 47 * "\0", addr) data, address = client.recvfrom(1024) # Print time t = ustruct.unpack(">IIIIIIIIIIII", data)[10] - TIMESTAMP -print ("Year:%d Month:%d Day:%d Time: %d:%d:%d" % (utime.localtime(t)[0:6])) +print("Year:%d Month:%d Day:%d Time: %d:%d:%d" % (utime.localtime(t)[0:6])) diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/03-Bluetooth/ble_temperature.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/03-Bluetooth/ble_temperature.py index 7a48d0c91..90d0b3372 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/03-Bluetooth/ble_temperature.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/03-Bluetooth/ble_temperature.py @@ -43,7 +43,9 @@ class BLETemperature: ((self._handle,),) = self._ble.gatts_register_services((_ENV_SENSE_SERVICE,)) self._connections = set() self._payload = advertising_payload( - name=name, services=[_ENV_SENSE_UUID], appearance=_ADV_APPEARANCE_GENERIC_THERMOMETER + name=name, + services=[_ENV_SENSE_UUID], + appearance=_ADV_APPEARANCE_GENERIC_THERMOMETER, ) self._advertise() diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/04-LoRa/lora-example.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/04-LoRa/lora-example.py index 7b26ef3e1..bb2f00baa 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/04-LoRa/lora-example.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/04-LoRa/lora-example.py @@ -14,7 +14,7 @@ appKey = "12345678901234567890123456789012" try: lora.join_OTAA(appEui, appKey) # Or ABP: - #lora.join_ABP(devAddr, nwkSKey, appSKey, timeout=5000) + # lora.join_ABP(devAddr, nwkSKey, appSKey, timeout=5000) # You can catch individual errors like timeout, rx etc... except LoraErrorTimeout as e: print("Something went wrong; are you indoor? Move near a window and retry") @@ -35,8 +35,8 @@ except LoraErrorTimeout as e: print("ErrorTimeout:", e) # Read downlink messages -while (True): - if (lora.available()): +while True: + if lora.available(): data = lora.receive_data() if data: print("Port: " + data["port"]) diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/05-Ethernet/eth_cable_test.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/05-Ethernet/eth_cable_test.py index 8944c813c..9aa325d80 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/05-Ethernet/eth_cable_test.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/05-Ethernet/eth_cable_test.py @@ -13,9 +13,9 @@ lan.config(low_power=False) # Delay for auto negotiation time.sleep(3.0) -while (True): +while True: print("Cable is", "connected." if lan.status() else "disconnected.") time.sleep(1.0) # Put Eth back in low-power mode if needed. -#lan.config(low_power=True) +# lan.config(low_power=True) diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/05-Ethernet/http_client.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/05-Ethernet/http_client.py index f1f414a2c..e8c4fd5bd 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/05-Ethernet/http_client.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/05-Ethernet/http_client.py @@ -7,7 +7,7 @@ HOST = "www.google.com" lan = network.LAN() lan.active(True) -lan.ifconfig('dhcp') +lan.ifconfig("dhcp") # We should have a valid IP now via DHCP print(lan.ifconfig()) @@ -24,7 +24,7 @@ client.connect(addr) client.settimeout(3.0) # Send HTTP request and recv response -client.send("GET / HTTP/1.1\r\nHost: %s\r\n\r\n"%(HOST)) +client.send("GET / HTTP/1.1\r\nHost: %s\r\n\r\n" % (HOST)) print(client.recv(1024)) # Close socket diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/05-Ethernet/http_client_ssl.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/05-Ethernet/http_client_ssl.py index a246fa730..72d348d9c 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/05-Ethernet/http_client_ssl.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/05-Ethernet/http_client_ssl.py @@ -8,7 +8,7 @@ HOST = "www.google.com" lan = network.LAN() lan.active(True) -lan.ifconfig('dhcp') +lan.ifconfig("dhcp") # We should have a valid IP now via DHCP print(lan.ifconfig()) @@ -31,7 +31,7 @@ request += "HOST: %s\r\n" request += "User-Agent: Mozilla/5.0\r\n" request += "Connection: keep-alive\r\n\r\n" # Add more headers if needed. -client.write(request%(HOST)+"\r\n") +client.write(request % (HOST) + "\r\n") response = client.read(1024) for l in response.split(b"\r\n"): diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/05-Ethernet/peer_to_peer.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/05-Ethernet/peer_to_peer.py index 8612baafc..acf0c701d 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/05-Ethernet/peer_to_peer.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/05-Ethernet/peer_to_peer.py @@ -9,8 +9,8 @@ import time lan = network.LAN() lan.active(True) -lan.ifconfig(('192.168.1.102', '255.255.255.0', '192.168.1.1', '192.168.1.1')) +lan.ifconfig(("192.168.1.102", "255.255.255.0", "192.168.1.1", "192.168.1.1")) -while (True): +while True: # Nothing else to do. time.sleep(1.0) diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/extint_wakeup.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/extint_wakeup.py index a80f5ef4f..8a98522b8 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/extint_wakeup.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/extint_wakeup.py @@ -6,9 +6,11 @@ import pyb import machine from pyb import Pin, ExtInt + def callback(line): pass + led = pyb.LED(3) pin = Pin("P5", Pin.IN, Pin.PULL_UP) ext = ExtInt(pin, ExtInt.IRQ_FALLING, Pin.PULL_UP, callback) @@ -16,7 +18,7 @@ ext = ExtInt(pin, ExtInt.IRQ_FALLING, Pin.PULL_UP, callback) # Enter Stop Mode. Note the IDE will disconnect. machine.sleep() -while (True): +while True: led.on() time.sleep_ms(100) led.off() diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/himax_wakeup_on_motion_detection.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/himax_wakeup_on_motion_detection.py index 45f3b7ab3..2f1014b50 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/himax_wakeup_on_motion_detection.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/himax_wakeup_on_motion_detection.py @@ -2,7 +2,6 @@ # to wake up from low-power Stop Mode on motion detection interrupts. import sensor -import time import pyb import machine from pyb import Pin, ExtInt @@ -17,18 +16,20 @@ sensor.ioctl(sensor.IOCTL_HIMAX_MD_WINDOW, (0, 0, 320, 240)) sensor.ioctl(sensor.IOCTL_HIMAX_MD_CLEAR) sensor.ioctl(sensor.IOCTL_HIMAX_MD_ENABLE, True) + def on_motion(line): pass + led = pyb.LED(3) ext = ExtInt(Pin("PC15"), ExtInt.IRQ_RISING, Pin.PULL_DOWN, on_motion) -while(True): +while True: led.off() - sensor.ioctl(sensor.IOCTL_HIMAX_OSC_ENABLE, True) # Switch to internal OSC - sensor.ioctl(sensor.IOCTL_HIMAX_MD_CLEAR) # Clear MD flag - machine.sleep() # Enter low-power mode, will wake up on MD interrupt. + sensor.ioctl(sensor.IOCTL_HIMAX_OSC_ENABLE, True) # Switch to internal OSC + sensor.ioctl(sensor.IOCTL_HIMAX_MD_CLEAR) # Clear MD flag + machine.sleep() # Enter low-power mode, will wake up on MD interrupt. sensor.ioctl(sensor.IOCTL_HIMAX_OSC_ENABLE, False) # Switch back to MCLK led.on() - for i in range(0, 60): # Capture a few frames + for i in range(0, 60): # Capture a few frames img = sensor.snapshot() diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/sensor_sleep.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/sensor_sleep.py index 33185f097..7983a27f8 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/sensor_sleep.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/sensor_sleep.py @@ -3,10 +3,9 @@ # 40mA when enabled and it's automatically cleared when calling sensor reset(). import sensor -import time -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.skip_frames(time = 3000) # Capture frames for 3000ms. -sensor.sleep(True) # Enable sensor sleep mode (saves about 40mA). +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.skip_frames(time=3000) # Capture frames for 3000ms. +sensor.sleep(True) # Enable sensor sleep mode (saves about 40mA). diff --git a/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/stop_mode.py b/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/stop_mode.py index e51b36b60..40b67a100 100644 --- a/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/stop_mode.py +++ b/scripts/examples/10-Arduino-Boards/Portenta-H7/06-Low-Power/stop_mode.py @@ -1,7 +1,6 @@ # Stop Mode Example # This example demonstrates using the low-power Stop Mode. -import time import pyb import machine From 577277e50921216634635e0f8a1db0846c5ee59f Mon Sep 17 00:00:00 2001 From: iabdalkader Date: Wed, 5 Jul 2023 19:05:56 +0200 Subject: [PATCH 3/3] github: Update Python linter workflow. --- .github/workflows/python-linter.yml | 12 ++++++------ .github/workflows/requirements.txt | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/python-linter.yml b/.github/workflows/python-linter.yml index f4f129074..75680c97d 100644 --- a/.github/workflows/python-linter.yml +++ b/.github/workflows/python-linter.yml @@ -12,11 +12,11 @@ on: branches: - 'master' paths: - - 'scripts/examples/*.py' - - 'scripts/libraries/*.py' + - 'scripts/examples/**.py' + - 'scripts/libraries/**.py' jobs: - build: + formatting-check: runs-on: ubuntu-latest strategy: fail-fast: false @@ -35,12 +35,12 @@ jobs: - name: '🛠 Install dependencies' run: | - pip install -r requirements.txt + pip install -r .github/workflows/requirements.txt flake8 --version pytest --version - name: '😾 Lint with flake8' run: | # stop the build if there are Python syntax errors or undefined names - flake8 --count --select=E9,F63,F7,F82 --ignore=F821 --show-source --statistics scripts/libraries/ - flake8 --count --max-complexity=15 --max-line-length=120 --ignore=F821,E722,E741,C901,E713,W605,E203,W503,F841,F403,F405 --statistics scripts/libraries/ + flake8 --count --select=E9,F63,F7,F82 --ignore=F821 --show-source --statistics scripts/libraries/ scripts/examples/ + flake8 --count --max-complexity=15 --max-line-length=120 --ignore=F821,E722,E741,C901,E713,W605,E203,W503,F841,F403,F405 --statistics scripts/libraries/ scripts/examples/ diff --git a/.github/workflows/requirements.txt b/.github/workflows/requirements.txt index 28ecacab6..074bb6d19 100644 --- a/.github/workflows/requirements.txt +++ b/.github/workflows/requirements.txt @@ -1,2 +1,2 @@ -flake8 -pytest +flake8==6.0.0 +pytest==7.4.0