Merge pull request #2236 from kwagyeman/kwabena/improve_image

modules/py_image: Add support for turning lists/tuples into Images.
This commit is contained in:
Ibrahim Abdelkader 2024-06-22 16:20:38 +02:00 committed by GitHub
commit 1c6a18dd1d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 99 additions and 6 deletions

View File

@ -10,6 +10,7 @@ import sensor
import time import time
import tf import tf
import math import math
import image
sensor.reset() # Reset and initialize the sensor. sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
@ -40,17 +41,16 @@ colors = [ # Add more colors if you are detecting more than 7 types of classes
# object. So, we will get those output images and then run find_blobs() on them to extract the # object. So, we will get those output images and then run find_blobs() on them to extract the
# centroids. We will also run get_stats() on the detected blobs to determine their score. # centroids. We will also run get_stats() on the detected blobs to determine their score.
# The Non-Max-Supression (NMS) object then filters out overlapping detections and maps their # The Non-Max-Supression (NMS) object then filters out overlapping detections and maps their
# position in the output image back to the original input image. The callback then returns a # position in the output image back to the original input image. The function then returns a
# list per class which each contain a list of (rect, score) tuples representing the detected # list per class which each contain a list of (rect, score) tuples representing the detected
# objects. # objects.
def fomo_callback(model, rect): def fomo_post_process(model, output, rect):
out = model.output[0]
oh, ow, oc = model.output_shape oh, ow, oc = model.output_shape
nms = tf.NMS(ow, oh, rect) nms = tf.NMS(ow, oh, rect)
for i in range(oc): for i in range(oc):
img = out.get_image(i) img = image.Image(output, shape=(oh, ow, 1), strides=(i, oc), scale=(255, 0))
blobs = img.find_blobs(threshold_list, x_stride=1, area_threshold=1, pixels_threshold=1) blobs = img.find_blobs(threshold_list, x_stride=1, area_threshold=1, pixels_threshold=1)
for b in blobs: for b in blobs:
rect = b.rect() rect = b.rect()
@ -67,7 +67,7 @@ while True:
img = sensor.snapshot() img = sensor.snapshot()
for i, detection_list in enumerate( for i, detection_list in enumerate(
net.predict(img, callback=fomo_callback) fomo_post_process(net, net.predict(img), rect=(0, 0, img.width(), img.height()))
): ):
if i == 0: if i == 0:
continue # background class continue # background class

View File

@ -6745,12 +6745,15 @@ mp_obj_t py_image_from_struct(image_t *img) {
} }
mp_obj_t py_image_load_image(uint n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { mp_obj_t py_image_load_image(uint n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
enum { ARG_height, ARG_pixformat, ARG_buffer, ARG_copy_to_fb }; enum { ARG_height, ARG_pixformat, ARG_buffer, ARG_copy_to_fb, ARG_shape, ARG_strides, ARG_scale};
static const mp_arg_t allowed_args[] = { static const mp_arg_t allowed_args[] = {
{ MP_QSTR_height, MP_ARG_INT, {.u_int = -1} }, { MP_QSTR_height, MP_ARG_INT, {.u_int = -1} },
{ MP_QSTR_pixformat, MP_ARG_INT, {.u_int = -1} }, { MP_QSTR_pixformat, MP_ARG_INT, {.u_int = -1} },
{ MP_QSTR_buffer, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} }, { MP_QSTR_buffer, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} },
{ MP_QSTR_copy_to_fb, MP_ARG_BOOL | MP_ARG_KW_ONLY, {.u_bool = false} }, { MP_QSTR_copy_to_fb, MP_ARG_BOOL | MP_ARG_KW_ONLY, {.u_bool = false} },
{ MP_QSTR_shape, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} },
{ MP_QSTR_strides, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} },
{ MP_QSTR_scale, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} },
}; };
mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
@ -6779,6 +6782,96 @@ mp_obj_t py_image_load_image(uint n_args, const mp_obj_t *pos_args, mp_map_t *kw
#else #else
mp_raise_msg(&mp_type_OSError, MP_ERROR_TEXT("Image I/O is not supported")); mp_raise_msg(&mp_type_OSError, MP_ERROR_TEXT("Image I/O is not supported"));
#endif // IMLIB_ENABLE_IMAGE_FILE_IO #endif // IMLIB_ENABLE_IMAGE_FILE_IO
} else if (MP_OBJ_IS_TYPE(pos_args[0], &mp_type_tuple) || MP_OBJ_IS_TYPE(pos_args[0], &mp_type_list)) {
mp_obj_t *shape;
mp_obj_get_array_fixed_n(args[ARG_shape].u_obj, 3, &shape);
image.h = mp_obj_get_int(shape[0]);
PY_ASSERT_TRUE_MSG(image.h > 0, "Image height must be > 0");
image.w = mp_obj_get_int(shape[1]);
PY_ASSERT_TRUE_MSG(image.w > 0, "Image width must be > 0");
int channels = mp_obj_get_int(shape[2]);
if (channels == 1) {
image.pixfmt = PIXFORMAT_GRAYSCALE;
} else if (channels == 3) {
image.pixfmt = PIXFORMAT_RGB565;
} else {
mp_raise_ValueError(MP_ERROR_TEXT("Channels must be 1 or 3"));
}
mp_obj_t *strides;
mp_obj_get_array_fixed_n(args[ARG_strides].u_obj, 2, &strides);
int start = 0;
int start_r = 0;
int start_g = 0;
int start_b = 0;
if (channels == 1) {
start = mp_obj_get_int(strides[0]);
PY_ASSERT_TRUE_MSG(start >= 0, "Start must be >= 0");
} else {
mp_obj_t *rgb_strides;
mp_obj_get_array_fixed_n(strides[0], 3, &rgb_strides);
start_r = mp_obj_get_int(rgb_strides[0]);
PY_ASSERT_TRUE_MSG(start_r >= 0, "R Start must be >= 0");
start_g = mp_obj_get_int(rgb_strides[1]);
PY_ASSERT_TRUE_MSG(start_g >= 0, "G Start must be >= 0");
start_b = mp_obj_get_int(rgb_strides[2]);
PY_ASSERT_TRUE_MSG(start_b >= 0, "B Start must be >= 0");
}
int step = mp_obj_get_int(strides[1]);
PY_ASSERT_TRUE_MSG(step > 0, "Step must be > 0");
mp_obj_t *items;
size_t items_len;
mp_obj_get_array(pos_args[0], &items_len, &items);
int size = image.w * image.h;
int step_max = (size - 1) * step;
if (channels == 1) {
if (items_len <= (start + step_max)) {
mp_raise_ValueError(MP_ERROR_TEXT("Array too small"));
}
} else {
if ((items_len <= (start_r + step_max)) ||
(items_len <= (start_g + step_max)) ||
(items_len <= (start_b + step_max))) {
mp_raise_ValueError(MP_ERROR_TEXT("Array too small"));
}
}
mp_obj_t *scale;
mp_obj_get_array_fixed_n(args[ARG_scale].u_obj, 2, &scale);
float fscale = mp_obj_get_float(scale[0]);
float fadd = mp_obj_get_float(scale[1]);
if (args[ARG_copy_to_fb].u_bool) {
py_helper_set_to_framebuffer(&image);
} else {
image.data = xalloc(image_size(&image));
}
if (channels == 1) {
for (int i = 0; i < size; i++, start += step) {
((uint8_t *) image.data)[i] = __USAT(fast_roundf((mp_obj_get_float(items[start]) * fscale) + fadd), 8);
}
} else {
for (int i = 0; i < size; i++, start_r += step, start_g += step, start_b += step) {
int r = __USAT(fast_roundf((mp_obj_get_float(items[start_r]) * fscale) + fadd), 8);
int g = __USAT(fast_roundf((mp_obj_get_float(items[start_g]) * fscale) + fadd), 8);
int b = __USAT(fast_roundf((mp_obj_get_float(items[start_b]) * fscale) + fadd), 8);
((uint16_t *) image.data)[i] = COLOR_R8_G8_B8_TO_RGB565(r, g, b);
}
}
} else { } else {
image.w = mp_obj_get_int(pos_args[0]); image.w = mp_obj_get_int(pos_args[0]);
PY_ASSERT_TRUE_MSG(image.w > 0, "Image width must be > 0"); PY_ASSERT_TRUE_MSG(image.w > 0, "Image width must be > 0");