diff --git a/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_image_classification.py b/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_image_classification.py index d38133cbc..71f050745 100644 --- a/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_image_classification.py +++ b/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_image_classification.py @@ -1,5 +1,5 @@ # This work is licensed under the MIT license. -# Copyright (c) 2013-2023 OpenMV LLC. All rights reserved. +# Copyright (c) 2013-2024 OpenMV LLC. All rights reserved. # https://github.com/openmv/openmv/blob/master/LICENSE # # TensorFlow Lite Mobilenet V1 Example @@ -12,15 +12,11 @@ # default model is not really usable for anything. You have to use transfer # learning to apply the model to a target problem by re-training the model. # -# NOTE: This example only works on the OpenMV Cam H7 Pro (that has SDRAM) and better! +# NOTE: This example only works on the OpenMV Cam H7 Plus (that has SDRAM) and better! # To get the models please see the CNN Network library in OpenMV IDE under # Tools -> Machine Vision. The labels are there too. # You should insert a microSD card into your camera and copy-paste the mobilenet_labels.txt # file and your chosen model into the root folder for this script to work. -# -# In this example we slide the detector window over the image and get a list -# of activations. Note that use a CNN with a sliding window is extremely compute -# expensive so for an exhaustive search do not expect the CNN to be real-time. import sensor import time @@ -41,6 +37,8 @@ mobilenet = "mobilenet_v%s_%s_%s_quant.tflite" % ( mobilenet_width, mobilenet_resolution, ) + +net = tf.Model(mobilenet, load_to_fb=True) labels = [line.rstrip("\n") for line in open("mobilenet_labels.txt")] clock = time.clock() @@ -49,31 +47,12 @@ while True: img = sensor.snapshot() - # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not - # specified). A classification score output vector will be generated for each location. At each scale the - # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. - # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note - # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after - # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) - # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. - # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... - - # default settings just do one detection... change them to search the image... - # Setting x_overlap=-1 forces the window to stay centered in the ROI in the x direction always. If - # y_overlap is not -1 the method will search in all vertical positions. - # Setting y_overlap=-1 forces the window to stay centered in the ROI in the y direction always. If - # x_overlap is not -1 the method will search in all horizontal positions. - - for obj in tf.classify( - mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0 - ): - print("**********\nTop 5 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) - img.draw_rectangle(obj.rect()) - # This combines the labels and confidence values into a list of tuples - # and then sorts that list by the confidence values. - sorted_list = sorted( - zip(labels, obj.output()), key=lambda x: x[1], reverse=True - ) - for i in range(5): - print("%s = %f" % (sorted_list[i][0], sorted_list[i][1])) + print("**********\nTop 5 Detections") + # This combines the labels and confidence values into a list of tuples + # and then sorts that list by the confidence values. + sorted_list = sorted( + zip(labels, net.predict(img)), key=lambda x: x[1], reverse=True + ) + for i in range(5): + print("%s = %f" % (sorted_list[i][0], sorted_list[i][1])) print(clock.fps(), "fps") diff --git a/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_object_detection.py b/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_object_detection.py index 47bf7d4e2..4faf017f4 100644 --- a/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_object_detection.py +++ b/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_object_detection.py @@ -1,5 +1,5 @@ # This work is licensed under the MIT license. -# Copyright (c) 2013-2023 OpenMV LLC. All rights reserved. +# Copyright (c) 2013-2024 OpenMV LLC. All rights reserved. # https://github.com/openmv/openmv/blob/master/LICENSE # # TensorFlow Lite Object Detection Example @@ -14,16 +14,16 @@ import math sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) -sensor.set_windowing((240, 240)) # Set 240x240 window. sensor.skip_frames(time=2000) # Let the camera adjust. min_confidence = 0.4 +threshold_list = [(math.ceil(min_confidence * 255), 255)] # Load built-in FOMO face detection model -labels, net = tf.load_builtin_model("fomo_face_detection") +labels, net = tf.Model("fomo_face_detection") # Alternatively, models can be loaded from the filesystem storage. -# net = tf.load('', load_to_fb=True) +# net = tf.Model('', load_to_fb=True) # labels = [line.rstrip('\n') for line in open("labels.txt")] colors = [ # Add more colors if you are detecting more than 7 types of classes at once. @@ -36,18 +36,38 @@ colors = [ # Add more colors if you are detecting more than 7 types of classes (255, 255, 255), ] +# FOMO outputs an image per class where each pixel in the image is the centroid of the trained +# object. So, we will get those output images and then run find_blobs() on them to extract the +# centroids. We will also run get_stats() on the detected blobs to determine their score. +# The Non-Max-Supression (NMS) object then filters out overlapping detections and maps their +# position in the output image back to the original input image. The callback then returns a +# list per class which each contain a list of (rect, score) tuples representing the detected +# objects. + + +def fomo_callback(model, rect): + out = model.output[0] + oh, ow, oc = model.output_shape + nms = tf.NMS(ow, oh, rect) + for i in range(oc): + img = out.get_image(i) + blobs = img.find_blobs(threshold_list, x_stride=1, area_threshold=1, pixels_threshold=1) + for b in blobs: + rect = b.rect() + x, y, w, h = rect + score = img.get_statistics(thresholds=threshold_list, roi=rect).l_mean() / 255.0 + nms.add_bounding_box(x, y, x + w, y + h, score, i) + return nms.get_bounding_boxes() + + clock = time.clock() while True: clock.tick() img = sensor.snapshot() - # detect() returns all objects found in the image (splitted out per class already) - # we skip class index 0, as that is the background, and then draw circles of the center - # of our objects - for i, detection_list in enumerate( - net.detect(img, thresholds=[(math.ceil(min_confidence * 255), 255)]) + net.predict(img, callback=fomo_callback) ): if i == 0: continue # background class @@ -55,11 +75,10 @@ while True: continue # no detections for this class? print("********** %s **********" % labels[i]) - for d in detection_list: - [x, y, w, h] = d.rect() + for (x, y, w, h), score in detection_list: center_x = math.floor(x + (w / 2)) center_y = math.floor(y + (h / 2)) - print(f"x {center_x}\ty {center_y}") + print(f"x {center_x}\ty {center_y}\tscore {score}") img.draw_circle((center_x, center_y, 12), color=colors[i], thickness=2) print(clock.fps(), "fps", end="\n") diff --git a/src/omv/imlib/collections.c b/src/omv/imlib/collections.c index 3465098d9..b614bc738 100644 --- a/src/omv/imlib/collections.c +++ b/src/omv/imlib/collections.c @@ -211,106 +211,91 @@ size_t list_size(list_t *ptr) { return ptr->size; } -void list_push_front(list_t *ptr, void *data) { - list_lnk_t *tmp = (list_lnk_t *) xalloc(sizeof(list_lnk_t) + ptr->data_len); - memcpy(tmp->data, data, ptr->data_len); - - if (ptr->size++) { - tmp->next = ptr->head; - tmp->prev = NULL; - ptr->head->prev = tmp; - ptr->head = tmp; +static void list_link(list_t *dst, list_lnk_t *insert_before, list_lnk_t *lnk) { + if (!dst->size) { + lnk->next = NULL; + lnk->prev = NULL; + dst->head = lnk; + dst->tail = lnk; + } else if (dst->head == insert_before) { + lnk->next = insert_before; + lnk->prev = NULL; + insert_before->prev = lnk; + dst->head = lnk; + } else if (!insert_before) { + lnk->next = NULL; + lnk->prev = dst->tail; + dst->tail->next = lnk; + dst->tail = lnk; } else { - tmp->next = NULL; - tmp->prev = NULL; - ptr->head = tmp; - ptr->tail = tmp; + lnk->next = insert_before; + lnk->prev = insert_before->prev; + insert_before->prev->next = lnk; + insert_before->prev = lnk; } + + dst->size += 1; } -void list_push_back(list_t *ptr, void *data) { - list_lnk_t *tmp = (list_lnk_t *) xalloc(sizeof(list_lnk_t) + ptr->data_len); - memcpy(tmp->data, data, ptr->data_len); - - if (ptr->size++) { - tmp->next = NULL; - tmp->prev = ptr->tail; - ptr->tail->next = tmp; - ptr->tail = tmp; +static void list_unlink(list_t *src, list_lnk_t *lnk) { + if (src->head == lnk) { + if (lnk->next) { + lnk->next->prev = NULL; + } + src->head = lnk->next; + } else if (src->tail == lnk) { + if (lnk->prev) { + lnk->prev->next = NULL; + } + src->tail = lnk->prev; } else { - tmp->next = NULL; - tmp->prev = NULL; - ptr->head = tmp; - ptr->tail = tmp; - } -} - -void list_pop_front(list_t *ptr, void *data) { - list_lnk_t *tmp = ptr->head; - - if (data) { - memcpy(data, tmp->data, ptr->data_len); + lnk->prev->next = lnk->next; + lnk->next->prev = lnk->prev; } - if (tmp->next) { - tmp->next->prev = NULL; - } - ptr->head = tmp->next; - ptr->size -= 1; - xfree(tmp); -} - -void list_pop_back(list_t *ptr, void *data) { - list_lnk_t *tmp = ptr->tail; - - if (data) { - memcpy(data, tmp->data, ptr->data_len); - } - - tmp->prev->next = NULL; - ptr->tail = tmp->prev; - ptr->size -= 1; - xfree(tmp); -} - -void list_get(list_t *ptr, list_lnk_t *lnk, void *data) { - memcpy(data, lnk->data, ptr->data_len); -} - -void list_set(list_t *ptr, list_lnk_t *lnk, void *data) { - memcpy(lnk->data, data, ptr->data_len); + src->size -= 1; } void list_insert(list_t *ptr, list_lnk_t *lnk, void *data) { - if (ptr->head == lnk) { - list_push_front(ptr, data); - } else if (!lnk) { - list_push_back(ptr, data); - } else { - list_lnk_t *tmp = (list_lnk_t *) xalloc(sizeof(list_lnk_t) + ptr->data_len); - memcpy(tmp->data, data, ptr->data_len); + list_lnk_t *tmp = (list_lnk_t *) xalloc(sizeof(list_lnk_t) + ptr->data_len); + memcpy(tmp->data, data, ptr->data_len); + list_link(ptr, lnk, tmp); +} - tmp->next = lnk; - tmp->prev = lnk->prev; - lnk->prev->next = tmp; - lnk->prev = tmp; - ptr->size += 1; - } +void list_push_front(list_t *ptr, void *data) { + list_insert(ptr, ptr->head, data); +} + +void list_push_back(list_t *ptr, void *data) { + list_insert(ptr, NULL, data); } void list_remove(list_t *ptr, list_lnk_t *lnk, void *data) { - if (ptr->head == lnk) { - list_pop_front(ptr, data); - } else if (ptr->tail == lnk) { - list_pop_back(ptr, data); - } else { - if (data) { - memcpy(data, lnk->data, ptr->data_len); - } - - lnk->prev->next = lnk->next; - lnk->next->prev = lnk->prev; - ptr->size -= 1; - xfree(lnk); + if (data) { + memcpy(data, lnk->data, ptr->data_len); } + + list_unlink(ptr, lnk); + xfree(lnk); +} + +void list_pop_front(list_t *ptr, void *data) { + list_remove(ptr, ptr->head, data); +} + +void list_pop_back(list_t *ptr, void *data) { + list_remove(ptr, ptr->tail, data); +} + +void list_move(list_t *dst, list_t *src, list_lnk_t *before, list_lnk_t *lnk) { + list_unlink(src, lnk); + list_link(dst, before, lnk); +} + +void list_move_front(list_t *dst, list_t *src, list_lnk_t *lnk) { + list_move(dst, src, dst->head, lnk); +} + +void list_move_back(list_t *dst, list_t *src, list_lnk_t *lnk) { + list_move(dst, src, NULL, lnk); } diff --git a/src/omv/imlib/collections.h b/src/omv/imlib/collections.h index cc536da1a..e02e51511 100644 --- a/src/omv/imlib/collections.h +++ b/src/omv/imlib/collections.h @@ -88,12 +88,15 @@ void list_copy(list_t *dst, list_t *src); void list_free(list_t *ptr); void list_clear(list_t *ptr); size_t list_size(list_t *ptr); +void list_insert(list_t *ptr, list_lnk_t *lnk, void *data); void list_push_front(list_t *ptr, void *data); void list_push_back(list_t *ptr, void *data); +void list_remove(list_t *ptr, list_lnk_t *lnk, void *data); void list_pop_front(list_t *ptr, void *data); void list_pop_back(list_t *ptr, void *data); -void list_insert(list_t *ptr, list_lnk_t *lnk, void *data); -void list_remove(list_t *ptr, list_lnk_t *lnk, void *data); +void list_move(list_t *dst, list_t *src, list_lnk_t *before, list_lnk_t *lnk); +void list_move_front(list_t *dst, list_t *src, list_lnk_t *lnk); +void list_move_back(list_t *dst, list_t *src, list_lnk_t *lnk); #define list_for_each(iterator, list) \ for (list_lnk_t *iterator = list->head; iterator != NULL; iterator = iterator->next) #define list_get_data(iterator) ((void *) iterator->data) diff --git a/src/omv/imlib/imlib.h b/src/omv/imlib/imlib.h index 3bb736729..903ef322c 100644 --- a/src/omv/imlib/imlib.h +++ b/src/omv/imlib/imlib.h @@ -145,6 +145,12 @@ typedef struct rectangle { int16_t h; } rectangle_t; +typedef struct bounding_box_lnk_data { + rectangle_t rect; + float score; + int label_index; +} bounding_box_lnk_data_t; + void rectangle_init(rectangle_t *ptr, int x, int y, int w, int h); void rectangle_copy(rectangle_t *dst, rectangle_t *src); bool rectangle_equal_fast(rectangle_t *ptr0, rectangle_t *ptr1); @@ -152,6 +158,9 @@ bool rectangle_overlap(rectangle_t *ptr0, rectangle_t *ptr1); void rectangle_intersected(rectangle_t *dst, rectangle_t *src); void rectangle_united(rectangle_t *dst, rectangle_t *src); float rectangle_iou(rectangle_t *r1, rectangle_t *r2); +void rectangle_nms_add_bounding_box(list_t *bounding_boxes, bounding_box_lnk_data_t *box); +int rectangle_nms_get_bounding_boxes(list_t *bounding_boxes, float threshold, float sigma); +void rectangle_map_bounding_boxes(list_t *bounding_boxes, int window_w, int window_h, rectangle_t *roi); ///////////////// // Color Stuff // diff --git a/src/omv/imlib/rectangle.c b/src/omv/imlib/rectangle.c index 9f3dfcec4..611995c50 100644 --- a/src/omv/imlib/rectangle.c +++ b/src/omv/imlib/rectangle.c @@ -128,3 +128,86 @@ float rectangle_iou(rectangle_t *r1, rectangle_t *r2) { int rect_union = (r1->w * r1->h) + (r2->w * r2->h) - rect_intersection; return ((float) rect_intersection) / ((float) rect_union); } + +// Adds a bounding box to the list of bounding boxes in descending order of score. +void rectangle_nms_add_bounding_box(list_t *bounding_boxes, bounding_box_lnk_data_t *box) { + // Insertion sort bounding boxes by score. + list_lnk_t *it = bounding_boxes->head; + for (; it; it = it->next) { + if (box->score > ((bounding_box_lnk_data_t *) it->data)->score) { + list_insert(bounding_boxes, it, box); + break; + } + } + + if (!it) { + list_push_back(bounding_boxes, box); + } +} + +// Soft non-max supress the list of bounding boxes. Returns the maximum label index of the new list. +int rectangle_nms_get_bounding_boxes(list_t *bounding_boxes, float threshold, float sigma) { + // Soft non-max suppression with a Gaussian is used below, as this provides the best results. + // A Gaussian is used to apply a soft score penalty to overlapping boxes. On loop entry, + // "bounding_boxes" is sorted, but after each iteration, the next highest score must be picked + // again, given that the score penalty changes the order. + float sigma_scale = (sigma > 0.0f) ? (-1.0f / sigma) : 0.0f; + + list_t nms_bounding_boxes; + list_init(&nms_bounding_boxes, sizeof(bounding_box_lnk_data_t)); + + int max_label_index = 0; + + // The first detection has the higest score since the list is sorted. + list_lnk_t *max_it = bounding_boxes->head; + while (list_size(bounding_boxes)) { + bounding_box_lnk_data_t lnk_data; + memcpy(&lnk_data, max_it->data, bounding_boxes->data_len); + list_move_back(&nms_bounding_boxes, bounding_boxes, max_it); + + float max_score = 0.0f; + for (list_lnk_t *it = bounding_boxes->head; it; ) { + bounding_box_lnk_data_t *lnk_data2 = list_get_data(it); + + // Advance to next now as "it" will be invalid if we remove the current item. + list_lnk_t *old_it = it; + it = it->next; + + float iou = rectangle_iou(&lnk_data.rect, &lnk_data2->rect); + // Do not use fast_expf() as it does not output 1 when it's input is 0. + // This will cause the scores of non-overlapping bounding boxes to decay. + lnk_data2->score *= expf(sigma_scale * iou * iou); + + if (lnk_data2->score < threshold) { + list_remove(bounding_boxes, old_it, NULL); + } else if (lnk_data2->score > max_score) { + max_score = lnk_data2->score; + max_it = old_it; + } + } + + // Find the maximum label index for the output list. + max_label_index = IM_MAX(lnk_data.label_index, max_label_index); + } + + // Set the original list pointers to equal the new list. + memcpy(bounding_boxes, &nms_bounding_boxes, sizeof(list_t)); + return max_label_index; +} + +void rectangle_map_bounding_boxes(list_t *bounding_boxes, int window_w, int window_h, rectangle_t *roi) { + float x_scale = roi->w / ((float) window_w); + float y_scale = roi->h / ((float) window_h); + // MAX == KeepAspectRatioByExpanding - MIN == KeepAspectRatio + float scale = IM_MIN(x_scale, y_scale); + int x_offset = fast_floorf((roi->w - (window_w * scale)) / 2.0f) + roi->x; + int y_offset = fast_floorf((roi->h - (window_h * scale)) / 2.0f) + roi->y; + + list_for_each(it, bounding_boxes) { + rectangle_t *rect = &((bounding_box_lnk_data_t *) it->data)->rect; + rect->x = fast_floorf((rect->x * scale) + x_offset); + rect->y = fast_floorf((rect->y * scale) + y_offset); + rect->w = fast_floorf(rect->w * scale); + rect->h = fast_floorf(rect->h * scale); + } +} diff --git a/src/omv/modules/py_tf.c b/src/omv/modules/py_tf.c index 2b8504a1d..15c6dcf6b 100644 --- a/src/omv/modules/py_tf.c +++ b/src/omv/modules/py_tf.c @@ -27,7 +27,6 @@ #define PY_TF_LOG_BUFFER_SIZE (512) #define PY_TF_GRAYSCALE_RANGE ((COLOR_GRAYSCALE_MAX) -(COLOR_GRAYSCALE_MIN)) #define PY_TF_GRAYSCALE_MID (((PY_TF_GRAYSCALE_RANGE) +1) / 2) -#define PY_TF_CLASSIFICATION_OBJ_SIZE (5) typedef enum { PY_TF_SCALE_NONE, @@ -62,131 +61,18 @@ STATIC const char *py_tf_map_datatype(libtf_datatype_t datatype) { } } -// TF Classification Object -typedef struct py_tf_classification_obj { - mp_obj_base_t base; - mp_obj_t x, y, w, h, output; -} py_tf_classification_obj_t; - -STATIC void py_tf_classification_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) { - py_tf_classification_obj_t *self = self_in; - mp_printf(print, - "{\"x\":%d, \"y\":%d, \"w\":%d, \"h\":%d, \"output\":", - mp_obj_get_int(self->x), - mp_obj_get_int(self->y), - mp_obj_get_int(self->w), - mp_obj_get_int(self->h)); - mp_obj_print_helper(print, self->output, kind); - mp_printf(print, "}"); -} - -STATIC mp_obj_t py_tf_classification_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) { - if (value == MP_OBJ_SENTINEL) { - // load - py_tf_classification_obj_t *self = self_in; - if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) { - mp_bound_slice_t slice; - if (!mp_seq_get_fast_slice_indexes(PY_TF_CLASSIFICATION_OBJ_SIZE, index, &slice)) { - mp_raise_msg(&mp_type_OSError, MP_ERROR_TEXT("only slices with step=1 (aka None) are supported")); - } - mp_obj_tuple_t *result = mp_obj_new_tuple(slice.stop - slice.start, NULL); - mp_seq_copy(result->items, &(self->x) + slice.start, result->len, mp_obj_t); - return result; - } - switch (mp_get_index(self->base.type, PY_TF_CLASSIFICATION_OBJ_SIZE, index, false)) { - case 0: return self->x; - case 1: return self->y; - case 2: return self->w; - case 3: return self->h; - case 4: return self->output; - } - } - return MP_OBJ_NULL; // op not supported -} - -mp_obj_t py_tf_classification_rect(mp_obj_t self_in) { - return mp_obj_new_tuple(4, (mp_obj_t []) {((py_tf_classification_obj_t *) self_in)->x, - ((py_tf_classification_obj_t *) self_in)->y, - ((py_tf_classification_obj_t *) self_in)->w, - ((py_tf_classification_obj_t *) self_in)->h}); -} - -mp_obj_t py_tf_classification_x(mp_obj_t self_in) { - return ((py_tf_classification_obj_t *) self_in)->x; -} -mp_obj_t py_tf_classification_y(mp_obj_t self_in) { - return ((py_tf_classification_obj_t *) self_in)->y; -} -mp_obj_t py_tf_classification_w(mp_obj_t self_in) { - return ((py_tf_classification_obj_t *) self_in)->w; -} -mp_obj_t py_tf_classification_h(mp_obj_t self_in) { - return ((py_tf_classification_obj_t *) self_in)->h; -} -mp_obj_t py_tf_classification_output(mp_obj_t self_in) { - return ((py_tf_classification_obj_t *) self_in)->output; -} - -STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_classification_rect_obj, py_tf_classification_rect); -STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_classification_x_obj, py_tf_classification_x); -STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_classification_y_obj, py_tf_classification_y); -STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_classification_w_obj, py_tf_classification_w); -STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_classification_h_obj, py_tf_classification_h); -STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_classification_output_obj, py_tf_classification_output); - -STATIC const mp_rom_map_elem_t py_tf_classification_locals_dict_table[] = { - { MP_ROM_QSTR(MP_QSTR_rect), MP_ROM_PTR(&py_tf_classification_rect_obj) }, - { MP_ROM_QSTR(MP_QSTR_x), MP_ROM_PTR(&py_tf_classification_x_obj) }, - { MP_ROM_QSTR(MP_QSTR_y), MP_ROM_PTR(&py_tf_classification_y_obj) }, - { MP_ROM_QSTR(MP_QSTR_w), MP_ROM_PTR(&py_tf_classification_w_obj) }, - { MP_ROM_QSTR(MP_QSTR_h), MP_ROM_PTR(&py_tf_classification_h_obj) }, - { MP_ROM_QSTR(MP_QSTR_output), MP_ROM_PTR(&py_tf_classification_output_obj) } -}; - -STATIC MP_DEFINE_CONST_DICT(py_tf_classification_locals_dict, py_tf_classification_locals_dict_table); - -MP_DEFINE_CONST_OBJ_TYPE( - py_tf_classification_type, - MP_QSTR_tf_classification, - MP_TYPE_FLAG_NONE, - print, py_tf_classification_print, - subscr, py_tf_classification_subscr, - locals_dict, &py_tf_classification_locals_dict - ); - // TF Model Output Object. typedef struct py_tf_model_output_obj { mp_obj_base_t base; - rectangle_t *roi; void *model_output; libtf_parameters_t *params; - // Pre-compute for lookup speed. size_t output_size; - mp_obj_t rect; - // Convenience stuff. - list_t bounding_boxes; } py_tf_model_output_obj_t; -STATIC void py_tf_model_output_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) { - py_tf_model_output_obj_t *self = MP_OBJ_TO_PTR(self_in); - if (dest[0] == MP_OBJ_NULL) { - // Load attribute. - switch (attr) { - case MP_QSTR_rect: - dest[0] = self->rect; - break; - default: - // Continue lookup in locals_dict. - dest[1] = MP_OBJ_SENTINEL; - break; - } - } -} - STATIC mp_obj_t py_tf_model_output_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) { if (value == MP_OBJ_SENTINEL) { // load - py_tf_model_output_obj_t *self = self_in; + py_tf_model_output_obj_t *self = MP_OBJ_TO_PTR(self_in); void *model_output = self->model_output; libtf_parameters_t *params = self->params; if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) { @@ -234,162 +120,82 @@ STATIC mp_obj_t py_tf_model_output_subscr(mp_obj_t self_in, mp_obj_t index, mp_o return MP_OBJ_NULL; // op not supported } -typedef struct py_tf_model_output_bounding_box_lnk_data { - rectangle_t rect; - float score; - int label_index; -} py_tf_model_output_bounding_box_lnk_data_t; - -// This convenience function is designed to collect bounding boxes, allowing for a non-maximal -// suppression of them later. It is not necessary to use this function to parse model output. -// The use of mp_arg_parse_all() is deliberately avoided here to ensure this method remains fast. -STATIC mp_obj_t py_tf_model_output_add_bounding_box(uint n_args, const mp_obj_t *pos_args) { - enum { ARG_self, ARG_xmin, ARG_ymin, ARG_xmax, ARG_ymax, ARG_score, ARG_label_index }; - py_tf_model_output_obj_t *self_in = (py_tf_model_output_obj_t *) pos_args[ARG_self]; - - py_tf_model_output_bounding_box_lnk_data_t lnk_data; - lnk_data.score = mp_obj_get_float(pos_args[ARG_score]); - - if ((lnk_data.score >= 0.0f) && (lnk_data.score <= 1.0f)) { - float xmin = IM_CLAMP(mp_obj_get_float(pos_args[ARG_xmin]), 0.0f, (float) (self_in->params->input_width)); - float ymin = IM_CLAMP(mp_obj_get_float(pos_args[ARG_ymin]), 0.0f, (float) (self_in->params->input_height)); - float xmax = IM_CLAMP(mp_obj_get_float(pos_args[ARG_xmax]), 0.0f, (float) (self_in->params->input_width)); - float ymax = IM_CLAMP(mp_obj_get_float(pos_args[ARG_ymax]), 0.0f, (float) (self_in->params->input_height)); - - lnk_data.rect.w = fast_floorf(xmax - xmin); - lnk_data.rect.h = fast_floorf(ymax - ymin); - - if ((lnk_data.rect.w > 0) && (lnk_data.rect.h > 0)) { - lnk_data.rect.x = fast_floorf(xmin); - lnk_data.rect.y = fast_floorf(ymin); - lnk_data.label_index = mp_obj_get_int(pos_args[ARG_label_index]); - - // Insertion sort bounding boxes by score. - list_lnk_t *it = self_in->bounding_boxes.head; - for (; it; it = it->next) { - if (lnk_data.score > ((py_tf_model_output_bounding_box_lnk_data_t *) it->data)->score) { - list_insert(&self_in->bounding_boxes, it, &lnk_data); - break; - } - } - - if (!it) { - list_push_back(&self_in->bounding_boxes, &lnk_data); - } - } - } - - return mp_const_none; -} -STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(py_tf_model_output_add_bounding_box_obj, 7, 7, - py_tf_model_output_add_bounding_box); - -// This function performs non-maximal suppression on bounding boxes collected using the add_bounding_box -// function and returns the final list of bounding boxes, mapped to the image ROI and separated by label -// index into their own lists. Note that using this function to parse the model output is optional. -STATIC mp_obj_t py_tf_model_output_get_bounding_boxes(uint n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { - enum { ARG_threshold, ARG_sigma }; +STATIC mp_obj_t py_tf_model_output_get_image(uint n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + enum { ARG_channel, ARG_roi, ARG_scale }; static const mp_arg_t allowed_args[] = { - { MP_QSTR_threshold, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE } }, - { MP_QSTR_sigma, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE } }, + { MP_QSTR_channel, MP_ARG_INT | MP_ARG_REQUIRED, {.u_int = 0} }, + { MP_QSTR_roi, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} }, + { MP_QSTR_scale, MP_ARG_INT | MP_ARG_KW_ONLY, {.u_int = PY_TF_SCALE_0_1} }, }; + // Parse args. mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; mp_arg_parse_all(n_args - 1, pos_args + 1, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); - py_tf_model_output_obj_t *self_in = (py_tf_model_output_obj_t *) pos_args[0]; - float threshold = py_helper_arg_to_float(args[ARG_threshold].u_obj, 0.1f); - float sigma = py_helper_arg_to_float(args[ARG_sigma].u_obj, 0.1f); + py_tf_model_output_obj_t *self = MP_OBJ_TO_PTR(pos_args[0]); - // Soft non-max suppression with a Gaussian is used below, as this provides the best results. - // A Gaussian is used to apply a soft score penalty to overlapping boxes. On loop entry, - // "bounding_boxes" is sorted, but after each iteration, the next highest score must be picked - // again, given that the score penalty changes the order. + image_t temp = {.w = self->params->output_width, .h = self->params->output_height}; + rectangle_t roi = py_helper_arg_to_roi(args[ARG_roi].u_obj, &temp); - float sigma_scale = (sigma > 0.0f) ? (-1.0f / sigma) : 0.0f; + image_t img = { + .w = roi.w, + .h = roi.h, + .pixfmt = PIXFORMAT_GRAYSCALE, + .pixels = xalloc(roi.w * roi.h) + }; - list_t nms_bounding_boxes; - list_init(&nms_bounding_boxes, sizeof(py_tf_model_output_bounding_box_lnk_data_t)); + int channel = args[ARG_channel].u_int; - int max_label = 0; + int shift = (self->params->output_datatype == LIBTF_DATATYPE_INT8) ? PY_TF_GRAYSCALE_MID : 0; + float fscale = 1.0f, fadd = 0.0f; - // The first detection has the higest score since the list is sorted. - list_lnk_t *max_it = self_in->bounding_boxes.head; - while (list_size(&self_in->bounding_boxes)) { - py_tf_model_output_bounding_box_lnk_data_t lnk_data; - list_remove(&self_in->bounding_boxes, max_it, &lnk_data); - list_push_back(&nms_bounding_boxes, &lnk_data); + switch (args[ARG_scale].u_int) { + case PY_TF_SCALE_0_1: // convert 0->1 to 0->255 + fscale = 255.0f; + break; + case PY_TF_SCALE_S1_1: // convert -1->1 to 0->255 + fscale = 127.5f; + fadd = 127.5f; + break; + case PY_TF_SCALE_S128_127: // convert -128->127 to 0->255 + fadd = 128.0f; + break; + case PY_TF_SCALE_NONE: // convert 0->255 to 0->255 + default: + break; + } - float max_score = 0.0f; - for (list_lnk_t *it = self_in->bounding_boxes.head; it; ) { - py_tf_model_output_bounding_box_lnk_data_t *lnk_data2 = list_get_data(it); + for (int y = 0; y < roi.h; y++) { + int row_index = (y + roi.y) * self->params->output_width * self->params->output_channels; + uint8_t *row_ptr = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(&img, y); - // Advance to next now as "it" will be invalid if we remove the current item. - list_lnk_t *old_it = it; - it = it->next; + for (int x = 0; x < roi.w; x++) { + int index = row_index + ((x + roi.x) * self->params->output_channels) + channel; - float iou = rectangle_iou(&lnk_data.rect, &lnk_data2->rect); - // Do not use fast_expf() as it does not output 1 when it's input is 0. - // This will cause the scores of non-overlapping bounding boxes to decay. - lnk_data2->score *= expf(sigma_scale * iou * iou); - - if (lnk_data2->score < threshold) { - list_remove(&self_in->bounding_boxes, old_it, NULL); - } else if (lnk_data2->score > max_score) { - max_score = lnk_data2->score; - max_it = old_it; + if (self->params->output_datatype == LIBTF_DATATYPE_FLOAT) { + float mo = (((float *) self->model_output)[index] * fscale) + fadd; + IMAGE_PUT_GRAYSCALE_PIXEL_FAST(row_ptr, x, fast_floorf(mo)); + } else { + uint8_t mo = ((uint8_t *) self->model_output)[index] ^ shift; + IMAGE_PUT_GRAYSCALE_PIXEL_FAST(row_ptr, x, mo); } } - - // Find the maximum label index for the output list. - max_label = IM_MAX(lnk_data.label_index, max_label); } - memcpy(&self_in->bounding_boxes, &nms_bounding_boxes, sizeof(list_t)); - - // Create a list per class label. - mp_obj_t list = mp_obj_new_list(max_label + 1, NULL); - for (size_t i = 0; i <= max_label; i++) { - ((mp_obj_list_t *) list)->items[i] = mp_obj_new_list(0, NULL); - } - - float x_scale = self_in->roi->w / ((float) self_in->params->input_width); - float y_scale = self_in->roi->h / ((float) self_in->params->input_height); - // MAX == KeepAspectRatioByExpanding - MIN == KeepAspectRatio - float scale = IM_MIN(x_scale, y_scale); - int x_offset = fast_floorf((self_in->roi->w - (self_in->params->input_width * scale)) / 2.0f) + self_in->roi->x; - int y_offset = fast_floorf((self_in->roi->h - (self_in->params->input_height * scale)) / 2.0f) + self_in->roi->y; - - size_t len = list_size(&nms_bounding_boxes); - for (size_t i = 0; i < len; i++) { - py_tf_model_output_bounding_box_lnk_data_t lnk_data; - list_pop_front(&nms_bounding_boxes, &lnk_data); - py_tf_classification_obj_t *o = m_new_obj(py_tf_classification_obj_t); - o->base.type = &py_tf_classification_type; - o->x = mp_obj_new_int(fast_floorf(lnk_data.rect.x * scale) + x_offset); - o->y = mp_obj_new_int(fast_floorf(lnk_data.rect.y * scale) + y_offset); - o->w = mp_obj_new_int(fast_floorf(lnk_data.rect.w * scale)); - o->h = mp_obj_new_int(fast_floorf(lnk_data.rect.h * scale)); - o->output = mp_obj_new_float(lnk_data.score); - mp_obj_list_append(((mp_obj_list_t *) list)->items[lnk_data.label_index], o); - } - - return list; + return py_image_from_struct(&img); } -STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_tf_model_output_get_bounding_boxes_obj, 1, py_tf_model_output_get_bounding_boxes); +STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_tf_model_output_get_image_obj, 1, py_tf_model_output_get_image); -STATIC const mp_rom_map_elem_t py_tf_model_output_locals_table[] = { - { MP_ROM_QSTR(MP_QSTR_add_bounding_box), MP_ROM_PTR(&py_tf_model_output_add_bounding_box_obj) }, - { MP_ROM_QSTR(MP_QSTR_get_bounding_boxes), MP_ROM_PTR(&py_tf_model_output_get_bounding_boxes_obj) }, +STATIC const mp_rom_map_elem_t py_tf_model_output_locals_dict_table[] = { + { MP_ROM_QSTR(MP_QSTR_get_image), MP_ROM_PTR(&py_tf_model_output_get_image_obj) }, }; -STATIC MP_DEFINE_CONST_DICT(py_tf_model_output_locals_dict, py_tf_model_output_locals_table); +STATIC MP_DEFINE_CONST_DICT(py_tf_model_output_locals_dict, py_tf_model_output_locals_dict_table); STATIC MP_DEFINE_CONST_OBJ_TYPE( py_tf_model_output_type, MP_QSTR_tf_model_output, MP_TYPE_FLAG_NONE, - attr, py_tf_model_output_attr, subscr, py_tf_model_output_subscr, locals_dict, &py_tf_model_output_locals_dict ); @@ -412,18 +218,17 @@ STATIC void py_tf_input_callback(void *callback_data, float fscale = 1.0f, fadd = 0.0f; switch (arg->scale) { - case PY_TF_SCALE_0_1: + case PY_TF_SCALE_0_1: // convert 0->255 to 0->1 fscale = 1.0f / 255.0f; break; - case PY_TF_SCALE_S1_1: + case PY_TF_SCALE_S1_1: // convert 0->255 to -1->1 fscale = 2.0f / 255.0f; fadd = -1.0f; break; - case PY_TF_SCALE_S128_127: - fscale = 255.0f / 127.0f; + case PY_TF_SCALE_S128_127: // convert 0->255 to -128->127 fadd = -128.0f; break; - case PY_TF_SCALE_NONE: + case PY_TF_SCALE_NONE: // convert 0->255 to 0->255 default: break; } @@ -628,45 +433,6 @@ STATIC void py_tf_regression_input_callback(void *callback_data, } } -STATIC void py_tf_segment_output_callback(void *callback_data, - void *model_output, - libtf_parameters_t *params) { - mp_obj_t *arg = (mp_obj_t *) callback_data; - - int shift = (params->output_datatype == LIBTF_DATATYPE_INT8) ? PY_TF_GRAYSCALE_MID : 0; - - *arg = mp_obj_new_list(params->output_channels, NULL); - - for (int i = 0, ii = params->output_channels; i < ii; i++) { - - image_t img = { - .w = params->output_width, - .h = params->output_height, - .pixfmt = PIXFORMAT_GRAYSCALE, - .pixels = xalloc(params->output_width * params->output_height * sizeof(uint8_t)) - }; - - ((mp_obj_list_t *) *arg)->items[i] = py_image_from_struct(&img); - - for (int y = 0, yy = params->output_height, xx = params->output_width; y < yy; y++) { - int row = y * xx * ii; - uint8_t *row_ptr = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(&img, y); - - for (int x = 0; x < xx; x++) { - int col = x * ii; - - if (params->output_datatype == LIBTF_DATATYPE_FLOAT) { - IMAGE_PUT_GRAYSCALE_PIXEL_FAST(row_ptr, x, - ((float *) model_output)[row + col + i] * PY_TF_GRAYSCALE_RANGE); - } else { - IMAGE_PUT_GRAYSCALE_PIXEL_FAST(row_ptr, x, - ((uint8_t *) model_output)[row + col + i] ^ shift); - } - } - } - } -} - typedef struct py_tf_predict_callback_data { mp_obj_t model; rectangle_t *roi; @@ -678,18 +444,25 @@ STATIC void py_tf_predict_output_callback(void *callback_data, void *model_output, libtf_parameters_t *params) { py_tf_predict_callback_data_t *arg = (py_tf_predict_callback_data_t *) callback_data; + py_tf_model_obj_t *model = MP_OBJ_TO_PTR(arg->model); + mp_obj_t rect = mp_obj_new_tuple(4, (mp_obj_t []) {mp_obj_new_int(arg->roi->x), + mp_obj_new_int(arg->roi->y), + mp_obj_new_int(arg->roi->w), + mp_obj_new_int(arg->roi->h)}); + + // This will support multiple output tensors once the API is updated. + mp_obj_list_t *list = MP_OBJ_TO_PTR(mp_obj_new_list(0, NULL)); + py_tf_model_output_obj_t *o = m_new_obj(py_tf_model_output_obj_t); o->base.type = &py_tf_model_output_type; - o->roi = arg->roi; o->model_output = model_output; o->params = params; o->output_size = params->output_height * params->output_width * params->output_channels; - o->rect = mp_obj_new_tuple(4, (mp_obj_t []) {mp_obj_new_int(arg->roi->x), - mp_obj_new_int(arg->roi->y), - mp_obj_new_int(arg->roi->w), - mp_obj_new_int(arg->roi->h)}); - list_init(&o->bounding_boxes, sizeof(py_tf_model_output_bounding_box_lnk_data_t)); - *(arg->out) = mp_call_function_2(arg->callback, arg->model, o); + mp_obj_list_append(list, o); + + model->output_list = MP_OBJ_FROM_PTR(list); + *(arg->out) = mp_call_function_2(arg->callback, model, rect); + model->output_list = mp_const_none; } // TF Model Object. @@ -712,175 +485,6 @@ STATIC void py_tf_model_print(const mp_print_t *print, mp_obj_t self_in, mp_prin (double) self->params.output_scale, self->params.output_zero_point); } -STATIC mp_obj_t py_tf_model_segment(uint n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { - enum { ARG_roi, ARG_scale, ARG_mean, ARG_stdev }; - static const mp_arg_t allowed_args[] = { - { MP_QSTR_roi, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} }, - { MP_QSTR_scale, MP_ARG_INT | MP_ARG_KW_ONLY, {.u_int = PY_TF_SCALE_0_1} }, - { MP_QSTR_mean, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} }, - { MP_QSTR_stdev, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} }, - }; - - // Parse args. - mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; - mp_arg_parse_all(n_args - 2, pos_args + 2, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); - - image_t *image = py_helper_arg_to_image(pos_args[1], ARG_IMAGE_ANY); - rectangle_t roi = py_helper_arg_to_roi(args[ARG_roi].u_obj, image); - - fb_alloc_mark(); - py_tf_alloc_log_buffer(); - - py_tf_model_obj_t *model = MP_OBJ_TO_PTR(pos_args[0]); - uint8_t *tensor_arena = fb_alloc(model->params.tensor_arena_size, FB_ALLOC_PREFER_SPEED | FB_ALLOC_CACHE_ALIGN); - - py_tf_input_callback_data_t py_tf_input_callback_data = { - .img = image, - .roi = &roi, - .scale = args[ARG_scale].u_int, - .mean = {0.0f, 0.0f, 0.0f}, - .stdev = {1.0f, 1.0f, 1.0f} - }; - py_helper_arg_to_float_array(args[ARG_mean].u_obj, py_tf_input_callback_data.mean, 3); - py_helper_arg_to_float_array(args[ARG_stdev].u_obj, py_tf_input_callback_data.stdev, 3); - - mp_obj_t py_tf_segment_output_callback_data; - - if (libtf_invoke(model->data, - tensor_arena, - &model->params, - py_tf_input_callback, - &py_tf_input_callback_data, - py_tf_segment_output_callback, - &py_tf_segment_output_callback_data) != 0) { - // Note can't use MP_ERROR_TEXT here. - mp_raise_msg(&mp_type_OSError, (mp_rom_error_text_t) py_tf_log_buffer); - } - - fb_alloc_free_till_mark(); - - return py_tf_segment_output_callback_data; -} -STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_tf_model_segment_obj, 2, py_tf_model_segment); - -STATIC mp_obj_t py_tf_model_detect(uint n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { - enum { ARG_roi, ARG_thresholds, ARG_invert, ARG_scale, ARG_mean, ARG_stdev }; - static const mp_arg_t allowed_args[] = { - { MP_QSTR_roi, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} }, - { MP_QSTR_thresholds, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} }, - { MP_QSTR_invert, MP_ARG_INT | MP_ARG_KW_ONLY, {.u_bool = false } }, - { MP_QSTR_scale, MP_ARG_INT | MP_ARG_KW_ONLY, {.u_int = PY_TF_SCALE_0_1} }, - { MP_QSTR_mean, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} }, - { MP_QSTR_stdev, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} }, - }; - - // Parse args. - mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; - mp_arg_parse_all(n_args - 2, pos_args + 2, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); - - image_t *image = py_helper_arg_to_image(pos_args[1], ARG_IMAGE_ANY); - rectangle_t roi = py_helper_arg_to_roi(args[ARG_roi].u_obj, image); - bool invert = args[ARG_invert].u_int; - - fb_alloc_mark(); - py_tf_alloc_log_buffer(); - - py_tf_model_obj_t *model = MP_OBJ_TO_PTR(pos_args[0]); - uint8_t *tensor_arena = fb_alloc(model->params.tensor_arena_size, FB_ALLOC_PREFER_SPEED | FB_ALLOC_CACHE_ALIGN); - - py_tf_input_callback_data_t py_tf_input_callback_data = { - .img = image, - .roi = &roi, - .scale = args[ARG_scale].u_int, - .mean = {0.0f, 0.0f, 0.0f}, - .stdev = {1.0f, 1.0f, 1.0f} - }; - py_helper_arg_to_float_array(args[ARG_mean].u_obj, py_tf_input_callback_data.mean, 3); - py_helper_arg_to_float_array(args[ARG_stdev].u_obj, py_tf_input_callback_data.stdev, 3); - - mp_obj_t py_tf_segment_output_callback_data; - - if (libtf_invoke(model->data, - tensor_arena, - &model->params, - py_tf_input_callback, - &py_tf_input_callback_data, - py_tf_segment_output_callback, - &py_tf_segment_output_callback_data) != 0) { - // Note can't use MP_ERROR_TEXT here. - mp_raise_msg(&mp_type_OSError, (mp_rom_error_text_t) py_tf_log_buffer); - } - - list_t thresholds; - list_init(&thresholds, sizeof(color_thresholds_list_lnk_data_t)); - py_helper_arg_to_thresholds(args[ARG_thresholds].u_obj, &thresholds); - - if (!list_size(&thresholds)) { - color_thresholds_list_lnk_data_t lnk_data; - lnk_data.LMin = PY_TF_GRAYSCALE_MID; - lnk_data.LMax = PY_TF_GRAYSCALE_RANGE; - lnk_data.AMin = COLOR_A_MIN; - lnk_data.AMax = COLOR_A_MAX; - lnk_data.BMin = COLOR_B_MIN; - lnk_data.BMax = COLOR_B_MAX; - list_push_back(&thresholds, &lnk_data); - } - - mp_obj_list_t *img_list = (mp_obj_list_t *) py_tf_segment_output_callback_data; - mp_obj_list_t *out_list = mp_obj_new_list(img_list->len, NULL); - - float fscale = 1.f / PY_TF_GRAYSCALE_RANGE; - for (int i = 0, ii = img_list->len; i < ii; i++) { - image_t *img = py_image_cobj(img_list->items[i]); - float x_scale = roi.w / ((float) img->w); - float y_scale = roi.h / ((float) img->h); - // MAX == KeepAspectRatioByExpanding - MIN == KeepAspectRatio - float scale = IM_MIN(x_scale, y_scale); - int x_offset = fast_floorf((roi.w - (img->w * scale)) / 2.0f) + roi.x; - int y_offset = fast_floorf((roi.h - (img->h * scale)) / 2.0f) + roi.y; - - list_t out; - imlib_find_blobs(&out, img, &((rectangle_t) {0, 0, img->w, img->h}), 1, 1, - &thresholds, invert, 1, 1, false, 0, - NULL, NULL, NULL, NULL, 0, 0); - - mp_obj_list_t *objects_list = mp_obj_new_list(list_size(&out), NULL); - for (int j = 0, jj = list_size(&out); j < jj; j++) { - find_blobs_list_lnk_data_t lnk_data; - list_pop_front(&out, &lnk_data); - - histogram_t hist; - hist.LBinCount = PY_TF_GRAYSCALE_RANGE + 1; - hist.ABinCount = 0; - hist.BBinCount = 0; - hist.LBins = fb_alloc(hist.LBinCount * sizeof(float), FB_ALLOC_NO_HINT); - hist.ABins = NULL; - hist.BBins = NULL; - imlib_get_histogram(&hist, img, &lnk_data.rect, &thresholds, invert, NULL); - - statistics_t stats; - imlib_get_statistics(&stats, img->pixfmt, &hist); - fb_free(); // fb_alloc(hist.LBinCount * sizeof(float), FB_ALLOC_NO_HINT); - - py_tf_classification_obj_t *o = m_new_obj(py_tf_classification_obj_t); - o->base.type = &py_tf_classification_type; - o->x = mp_obj_new_int(fast_floorf(lnk_data.rect.x * scale) + x_offset); - o->y = mp_obj_new_int(fast_floorf(lnk_data.rect.y * scale) + y_offset); - o->w = mp_obj_new_int(fast_floorf(lnk_data.rect.w * scale)); - o->h = mp_obj_new_int(fast_floorf(lnk_data.rect.h * scale)); - o->output = mp_obj_new_float(stats.LMean * fscale); - objects_list->items[j] = o; - } - - out_list->items[i] = objects_list; - } - - fb_alloc_free_till_mark(); - - return out_list; -} -STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_tf_model_detect_obj, 2, py_tf_model_detect); - STATIC mp_obj_t py_tf_model_predict(uint n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { enum { ARG_roi, ARG_callback, ARG_scale, ARG_mean, ARG_stdev }; static const mp_arg_t allowed_args[] = { @@ -998,6 +602,9 @@ STATIC void py_tf_model_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) { case MP_QSTR_output_zero_point: dest[0] = mp_obj_new_int(self->params.output_zero_point); break; + case MP_QSTR_output: + dest[0] = self->output_list; + break; default: // Continue lookup in locals_dict. dest[1] = MP_OBJ_SENTINEL; @@ -1075,6 +682,8 @@ mp_obj_t py_tf_model_make_new(const mp_obj_type_t *type, size_t n_args, size_t n mp_obj_new_int(model->params.output_width), mp_obj_new_int(model->params.output_channels)}); + model->output_list = mp_const_none; + if (model->fb_alloc) { // The model data will Not be free'd on exceptions. fb_alloc_mark_permanent(); @@ -1100,10 +709,6 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_model_deinit_obj, py_tf_model_deinit); STATIC const mp_rom_map_elem_t py_tf_model_locals_dict_table[] = { { MP_ROM_QSTR(MP_QSTR___del__), MP_ROM_PTR(&py_tf_model_deinit_obj) }, - { MP_ROM_QSTR(MP_QSTR_classify), MP_ROM_PTR(&py_tf_model_predict_obj) }, - { MP_ROM_QSTR(MP_QSTR_segment), MP_ROM_PTR(&py_tf_model_segment_obj) }, - { MP_ROM_QSTR(MP_QSTR_detect), MP_ROM_PTR(&py_tf_model_detect_obj) }, - { MP_ROM_QSTR(MP_QSTR_regression), MP_ROM_PTR(&py_tf_model_predict_obj) }, { MP_ROM_QSTR(MP_QSTR_predict), MP_ROM_PTR(&py_tf_model_predict_obj) }, }; @@ -1119,6 +724,8 @@ STATIC MP_DEFINE_CONST_OBJ_TYPE( locals_dict, &py_tf_model_locals_dict ); +extern const mp_obj_type_t py_tf_nms_type; + STATIC const mp_rom_map_elem_t py_tf_globals_dict_table[] = { { MP_ROM_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(MP_QSTR_tf) }, { MP_ROM_QSTR(MP_QSTR_SCALE_NONE), MP_ROM_INT(PY_TF_SCALE_NONE) }, @@ -1126,8 +733,7 @@ STATIC const mp_rom_map_elem_t py_tf_globals_dict_table[] = { { MP_ROM_QSTR(MP_QSTR_SCALE_S1_1), MP_ROM_INT(PY_TF_SCALE_S1_1) }, { MP_ROM_QSTR(MP_QSTR_SCALE_S128_127), MP_ROM_INT(PY_TF_SCALE_S128_127) }, { MP_ROM_QSTR(MP_QSTR_Model), MP_ROM_PTR(&py_tf_model_type) }, - { MP_ROM_QSTR(MP_QSTR_load), MP_ROM_PTR(&py_tf_model_type) }, - { MP_ROM_QSTR(MP_QSTR_load_builtin_model), MP_ROM_PTR(&py_tf_model_type) }, + { MP_ROM_QSTR(MP_QSTR_NMS), MP_ROM_PTR(&py_tf_nms_type) }, }; STATIC MP_DEFINE_CONST_DICT(py_tf_globals_dict, py_tf_globals_dict_table); diff --git a/src/omv/modules/py_tf.h b/src/omv/modules/py_tf.h index baedae0ca..67e4fff0c 100644 --- a/src/omv/modules/py_tf.h +++ b/src/omv/modules/py_tf.h @@ -21,6 +21,7 @@ typedef struct py_tf_model_obj { bool fb_alloc; mp_obj_t input_shape; mp_obj_t output_shape; + mp_obj_t output_list; libtf_parameters_t params; } py_tf_model_obj_t; diff --git a/src/omv/modules/py_tf_nms.c b/src/omv/modules/py_tf_nms.c new file mode 100644 index 000000000..8a46efaa6 --- /dev/null +++ b/src/omv/modules/py_tf_nms.c @@ -0,0 +1,143 @@ +/* + * This file is part of the OpenMV project. + * + * Copyright (c) 2013-2024 Ibrahim Abdelkader + * Copyright (c) 2013-2024 Kwabena W. Agyeman + * + * This work is licensed under the MIT license, see the file LICENSE for details. + * + * Python NMS class. + */ +#include "imlib_config.h" + +#ifdef IMLIB_ENABLE_TF +#include "py/runtime.h" +#include "py_helper.h" + +// TF NMS Object. +typedef struct py_tf_nms_obj { + mp_obj_base_t base; + int window_w; + int window_h; + rectangle_t roi; + list_t bounding_boxes; +} py_tf_nms_obj_t; + +const mp_obj_type_t py_tf_nms_type; + +// The use of mp_arg_parse_all() is deliberately avoided here to ensure this method remains fast. +STATIC mp_obj_t py_tf_nms_add_bounding_box(uint n_args, const mp_obj_t *pos_args) { + enum { ARG_self, ARG_xmin, ARG_ymin, ARG_xmax, ARG_ymax, ARG_score, ARG_label_index }; + py_tf_nms_obj_t *self_in = MP_OBJ_TO_PTR(pos_args[ARG_self]); + + bounding_box_lnk_data_t lnk_data; + lnk_data.score = mp_obj_get_float(pos_args[ARG_score]); + + if ((lnk_data.score >= 0.0f) && (lnk_data.score <= 1.0f)) { + float xmin = IM_CLAMP(mp_obj_get_float(pos_args[ARG_xmin]), 0.0f, ((float) self_in->window_w)); + float ymin = IM_CLAMP(mp_obj_get_float(pos_args[ARG_ymin]), 0.0f, ((float) self_in->window_h)); + float xmax = IM_CLAMP(mp_obj_get_float(pos_args[ARG_xmax]), 0.0f, ((float) self_in->window_w)); + float ymax = IM_CLAMP(mp_obj_get_float(pos_args[ARG_ymax]), 0.0f, ((float) self_in->window_h)); + + lnk_data.rect.w = fast_floorf(xmax - xmin); + lnk_data.rect.h = fast_floorf(ymax - ymin); + + if ((lnk_data.rect.w > 0) && (lnk_data.rect.h > 0)) { + lnk_data.rect.x = fast_floorf(xmin); + lnk_data.rect.y = fast_floorf(ymin); + lnk_data.label_index = mp_obj_get_int(pos_args[ARG_label_index]); + rectangle_nms_add_bounding_box(&self_in->bounding_boxes, &lnk_data); + } + } + + return mp_const_none; +} +STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(py_tf_nms_add_bounding_box_obj, 7, 7, py_tf_nms_add_bounding_box); + +STATIC mp_obj_t py_tf_nms_get_bounding_boxes(uint n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + enum { ARG_threshold, ARG_sigma }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_threshold, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE } }, + { MP_QSTR_sigma, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE } }, + }; + + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args - 1, pos_args + 1, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + py_tf_nms_obj_t *self_in = MP_OBJ_TO_PTR(pos_args[0]); + float threshold = py_helper_arg_to_float(args[ARG_threshold].u_obj, 0.1f); + float sigma = py_helper_arg_to_float(args[ARG_sigma].u_obj, 0.1f); + int max_label = rectangle_nms_get_bounding_boxes(&self_in->bounding_boxes, threshold, sigma); + rectangle_map_bounding_boxes(&self_in->bounding_boxes, self_in->window_w, self_in->window_h, &self_in->roi); + + // Create a list per class label. + mp_obj_list_t *list = MP_OBJ_TO_PTR(mp_obj_new_list(max_label + 1, NULL)); + for (size_t i = 0; i <= max_label; i++) { + list->items[i] = mp_obj_new_list(0, NULL); + } + + list_for_each(it, (&self_in->bounding_boxes)) { + bounding_box_lnk_data_t *lnk_data = (bounding_box_lnk_data_t *) it->data; + mp_obj_t rect = mp_obj_new_tuple(4, (mp_obj_t []) {mp_obj_new_int(lnk_data->rect.x), + mp_obj_new_int(lnk_data->rect.y), + mp_obj_new_int(lnk_data->rect.w), + mp_obj_new_int(lnk_data->rect.h)}); + mp_obj_t o = mp_obj_new_tuple(2, (mp_obj_t []) {rect, mp_obj_new_float(lnk_data->score)}); + mp_obj_list_append(list->items[lnk_data->label_index], o); + } + + return list; +} +STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_tf_nms_get_bounding_boxes_obj, 1, py_tf_nms_get_bounding_boxes); + +mp_obj_t py_tf_nms_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *all_args) { + enum { ARG_window_w, ARG_window_h, ARG_roi }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_window_w, MP_ARG_INT | MP_ARG_REQUIRED, {.u_int = 0 } }, + { MP_QSTR_window_h, MP_ARG_INT | MP_ARG_REQUIRED, {.u_int = 0 } }, + { MP_QSTR_roi, MP_ARG_OBJ | MP_ARG_REQUIRED, {.u_rom_obj = MP_ROM_NONE} }, + }; + + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all_kw_array(n_args, n_kw, all_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Extract the ROI manually as we do not have an image to validate against. + mp_obj_t *roi_obj; + mp_obj_get_array_fixed_n(args[ARG_roi].u_obj, 4, &roi_obj); + + rectangle_t roi = { + .x = mp_obj_get_int(roi_obj[0]), + .y = mp_obj_get_int(roi_obj[1]), + .w = mp_obj_get_int(roi_obj[2]), + .h = mp_obj_get_int(roi_obj[3]) + }; + + if ((roi.w < 1) || (roi.h < 1)) { + mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Invalid ROI dimensions!")); + } + + py_tf_nms_obj_t *model = m_new_obj(py_tf_nms_obj_t); + model->base.type = &py_tf_nms_type; + model->window_w = args[ARG_window_w].u_int; + model->window_h = args[ARG_window_h].u_int; + model->roi = roi; + list_init(&model->bounding_boxes, sizeof(bounding_box_lnk_data_t)); + return MP_OBJ_FROM_PTR(model); +} + +STATIC const mp_rom_map_elem_t py_tf_nms_locals_table[] = { + { MP_ROM_QSTR(MP_QSTR_add_bounding_box), MP_ROM_PTR(&py_tf_nms_add_bounding_box_obj) }, + { MP_ROM_QSTR(MP_QSTR_get_bounding_boxes), MP_ROM_PTR(&py_tf_nms_get_bounding_boxes_obj) }, +}; + +STATIC MP_DEFINE_CONST_DICT(py_tf_nms_locals_dict, py_tf_nms_locals_table); + +MP_DEFINE_CONST_OBJ_TYPE( + py_tf_nms_type, + MP_QSTR_tf_nms, + MP_TYPE_FLAG_NONE, + make_new, py_tf_nms_make_new, + locals_dict, &py_tf_nms_locals_dict + ); + +#endif // IMLIB_ENABLE_TF