mirror of
https://github.com/openmv/openmv.git
synced 2025-11-04 14:49:50 +08:00
scripts/examples: Update examples.
- Fix Audio scripts init args. - Update display example.
This commit is contained in:
parent
c7de325f82
commit
c5f2bc7e10
@ -6,6 +6,7 @@
|
|||||||
|
|
||||||
import sensor
|
import sensor
|
||||||
import time
|
import time
|
||||||
|
import image
|
||||||
import display
|
import display
|
||||||
from gt911 import GT911
|
from gt911 import GT911
|
||||||
from machine import I2C
|
from machine import I2C
|
||||||
@ -17,8 +18,6 @@ points_colors = ((255, 0, 0), (0, 255, 0), (0, 0, 255), (0, 255, 255), (255, 255
|
|||||||
sensor.reset() # Reset and initialize the sensor.
|
sensor.reset() # Reset and initialize the sensor.
|
||||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||||
sensor.set_framesize(sensor.VGA) # Set frame size to QVGA (320x240)
|
sensor.set_framesize(sensor.VGA) # Set frame size to QVGA (320x240)
|
||||||
sensor.set_vflip(True) # Flip image for the display
|
|
||||||
# sensor.set_transpose(True) # Hardware transpose will be slower at this resolution
|
|
||||||
|
|
||||||
lcd = display.DSIDisplay(
|
lcd = display.DSIDisplay(
|
||||||
framesize=display.FWVGA, portrait=True, refresh=60, controller=display.ST7701()
|
framesize=display.FWVGA, portrait=True, refresh=60, controller=display.ST7701()
|
||||||
@ -57,10 +56,7 @@ while True:
|
|||||||
)
|
)
|
||||||
touch_detected = False
|
touch_detected = False
|
||||||
|
|
||||||
# Rotate the image in place.
|
|
||||||
img.replace(transpose=True)
|
|
||||||
|
|
||||||
# Draw the image on the display.
|
# Draw the image on the display.
|
||||||
lcd.write(img, y=IMG_OFFSET)
|
lcd.write(img, y=IMG_OFFSET, hint=image.TRANSPOSE | image.VFLIP)
|
||||||
|
|
||||||
print(clock.fps())
|
print(clock.fps())
|
||||||
|
|||||||
@ -20,7 +20,7 @@ led_green = pyb.LED(2)
|
|||||||
|
|
||||||
model = tf.load("/model.tflite")
|
model = tf.load("/model.tflite")
|
||||||
speech = micro_speech.MicroSpeech()
|
speech = micro_speech.MicroSpeech()
|
||||||
audio.init(channels=1, frequency=16000, gain=24, highpass=0.9883)
|
audio.init(channels=1, frequency=16000, gain_db=24, highpass=0.9883)
|
||||||
|
|
||||||
# Start audio streaming
|
# Start audio streaming
|
||||||
audio.start_streaming(speech.audio_callback)
|
audio.start_streaming(speech.audio_callback)
|
||||||
|
|||||||
@ -20,7 +20,7 @@ led_green = pyb.LED(2)
|
|||||||
|
|
||||||
model = tf.load("/model.tflite")
|
model = tf.load("/model.tflite")
|
||||||
speech = micro_speech.MicroSpeech()
|
speech = micro_speech.MicroSpeech()
|
||||||
audio.init(channels=1, frequency=16000, gain=24, highpass=0.9883)
|
audio.init(channels=1, frequency=16000, gain_db=24, highpass=0.9883)
|
||||||
|
|
||||||
# Start audio streaming
|
# Start audio streaming
|
||||||
audio.start_streaming(speech.audio_callback)
|
audio.start_streaming(speech.audio_callback)
|
||||||
|
|||||||
@ -20,7 +20,7 @@ led_green = pyb.LED(2)
|
|||||||
|
|
||||||
model = tf.load("/model.tflite")
|
model = tf.load("/model.tflite")
|
||||||
speech = micro_speech.MicroSpeech()
|
speech = micro_speech.MicroSpeech()
|
||||||
audio.init(channels=1, frequency=16000, gain=24, highpass=0.9883)
|
audio.init(channels=1, frequency=16000, gain_db=24, highpass=0.9883)
|
||||||
|
|
||||||
# Start audio streaming
|
# Start audio streaming
|
||||||
audio.start_streaming(speech.audio_callback)
|
audio.start_streaming(speech.audio_callback)
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user