modules/py_tf: Remove classification object.

This commit is contained in:
Kwabena W. Agyeman 2024-06-19 21:00:45 -07:00 committed by iabdalkader
parent 54e31b163b
commit 3863c38228
3 changed files with 26 additions and 141 deletions

View File

@ -1,5 +1,5 @@
# This work is licensed under the MIT license.
# Copyright (c) 2013-2023 OpenMV LLC. All rights reserved.
# Copyright (c) 2013-2024 OpenMV LLC. All rights reserved.
# https://github.com/openmv/openmv/blob/master/LICENSE
#
# TensorFlow Lite Mobilenet V1 Example
@ -12,15 +12,11 @@
# default model is not really usable for anything. You have to use transfer
# learning to apply the model to a target problem by re-training the model.
#
# NOTE: This example only works on the OpenMV Cam H7 Pro (that has SDRAM) and better!
# NOTE: This example only works on the OpenMV Cam H7 Plus (that has SDRAM) and better!
# To get the models please see the CNN Network library in OpenMV IDE under
# Tools -> Machine Vision. The labels are there too.
# You should insert a microSD card into your camera and copy-paste the mobilenet_labels.txt
# file and your chosen model into the root folder for this script to work.
#
# In this example we slide the detector window over the image and get a list
# of activations. Note that use a CNN with a sliding window is extremely compute
# expensive so for an exhaustive search do not expect the CNN to be real-time.
import sensor
import time
@ -41,6 +37,8 @@ mobilenet = "mobilenet_v%s_%s_%s_quant.tflite" % (
mobilenet_width,
mobilenet_resolution,
)
net = tf.Model(mobilenet, load_to_fb=True)
labels = [line.rstrip("\n") for line in open("mobilenet_labels.txt")]
clock = time.clock()
@ -49,31 +47,12 @@ while True:
img = sensor.snapshot()
# net.classify() will run the network on an roi in the image (or on the whole image if the roi is not
# specified). A classification score output vector will be generated for each location. At each scale the
# detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide.
# If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note
# the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after
# sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1)
# down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%.
# Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small...
# default settings just do one detection... change them to search the image...
# Setting x_overlap=-1 forces the window to stay centered in the ROI in the x direction always. If
# y_overlap is not -1 the method will search in all vertical positions.
# Setting y_overlap=-1 forces the window to stay centered in the ROI in the y direction always. If
# x_overlap is not -1 the method will search in all horizontal positions.
for obj in tf.classify(
mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0
):
print("**********\nTop 5 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect())
img.draw_rectangle(obj.rect())
# This combines the labels and confidence values into a list of tuples
# and then sorts that list by the confidence values.
sorted_list = sorted(
zip(labels, obj.output()), key=lambda x: x[1], reverse=True
)
for i in range(5):
print("%s = %f" % (sorted_list[i][0], sorted_list[i][1]))
print("**********\nTop 5 Detections")
# This combines the labels and confidence values into a list of tuples
# and then sorts that list by the confidence values.
sorted_list = sorted(
zip(labels, net.predict(img)), key=lambda x: x[1], reverse=True
)
for i in range(5):
print("%s = %f" % (sorted_list[i][0], sorted_list[i][1]))
print(clock.fps(), "fps")

View File

@ -1,5 +1,5 @@
# This work is licensed under the MIT license.
# Copyright (c) 2013-2023 OpenMV LLC. All rights reserved.
# Copyright (c) 2013-2024 OpenMV LLC. All rights reserved.
# https://github.com/openmv/openmv/blob/master/LICENSE
#
# TensorFlow Lite Object Detection Example
@ -20,10 +20,10 @@ sensor.skip_frames(time=2000) # Let the camera adjust.
min_confidence = 0.4
# Load built-in FOMO face detection model
labels, net = tf.load_builtin_model("fomo_face_detection")
labels, net = tf.Model("fomo_face_detection")
# Alternatively, models can be loaded from the filesystem storage.
# net = tf.load('<object_detection_network>', load_to_fb=True)
# net = tf.Model('<object_detection_network>', load_to_fb=True)
# labels = [line.rstrip('\n') for line in open("labels.txt")]
colors = [ # Add more colors if you are detecting more than 7 types of classes at once.
@ -55,11 +55,10 @@ while True:
continue # no detections for this class?
print("********** %s **********" % labels[i])
for d in detection_list:
[x, y, w, h] = d.rect()
for (x, y, w, h), score in detection_list:
center_x = math.floor(x + (w / 2))
center_y = math.floor(y + (h / 2))
print(f"x {center_x}\ty {center_y}")
print(f"x {center_x}\ty {center_y}\tscore {score}")
img.draw_circle((center_x, center_y, 12), color=colors[i], thickness=2)
print(clock.fps(), "fps", end="\n")

View File

@ -27,7 +27,6 @@
#define PY_TF_LOG_BUFFER_SIZE (512)
#define PY_TF_GRAYSCALE_RANGE ((COLOR_GRAYSCALE_MAX) -(COLOR_GRAYSCALE_MIN))
#define PY_TF_GRAYSCALE_MID (((PY_TF_GRAYSCALE_RANGE) +1) / 2)
#define PY_TF_CLASSIFICATION_OBJ_SIZE (5)
typedef enum {
PY_TF_SCALE_NONE,
@ -62,98 +61,6 @@ STATIC const char *py_tf_map_datatype(libtf_datatype_t datatype) {
}
}
// TF Classification Object
typedef struct py_tf_classification_obj {
mp_obj_base_t base;
mp_obj_t x, y, w, h, output;
} py_tf_classification_obj_t;
STATIC void py_tf_classification_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
py_tf_classification_obj_t *self = self_in;
mp_printf(print,
"{\"x\":%d, \"y\":%d, \"w\":%d, \"h\":%d, \"output\":",
mp_obj_get_int(self->x),
mp_obj_get_int(self->y),
mp_obj_get_int(self->w),
mp_obj_get_int(self->h));
mp_obj_print_helper(print, self->output, kind);
mp_printf(print, "}");
}
STATIC mp_obj_t py_tf_classification_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
if (value == MP_OBJ_SENTINEL) {
// load
py_tf_classification_obj_t *self = self_in;
if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) {
mp_bound_slice_t slice;
if (!mp_seq_get_fast_slice_indexes(PY_TF_CLASSIFICATION_OBJ_SIZE, index, &slice)) {
mp_raise_msg(&mp_type_OSError, MP_ERROR_TEXT("only slices with step=1 (aka None) are supported"));
}
mp_obj_tuple_t *result = mp_obj_new_tuple(slice.stop - slice.start, NULL);
mp_seq_copy(result->items, &(self->x) + slice.start, result->len, mp_obj_t);
return result;
}
switch (mp_get_index(self->base.type, PY_TF_CLASSIFICATION_OBJ_SIZE, index, false)) {
case 0: return self->x;
case 1: return self->y;
case 2: return self->w;
case 3: return self->h;
case 4: return self->output;
}
}
return MP_OBJ_NULL; // op not supported
}
mp_obj_t py_tf_classification_rect(mp_obj_t self_in) {
return mp_obj_new_tuple(4, (mp_obj_t []) {((py_tf_classification_obj_t *) self_in)->x,
((py_tf_classification_obj_t *) self_in)->y,
((py_tf_classification_obj_t *) self_in)->w,
((py_tf_classification_obj_t *) self_in)->h});
}
mp_obj_t py_tf_classification_x(mp_obj_t self_in) {
return ((py_tf_classification_obj_t *) self_in)->x;
}
mp_obj_t py_tf_classification_y(mp_obj_t self_in) {
return ((py_tf_classification_obj_t *) self_in)->y;
}
mp_obj_t py_tf_classification_w(mp_obj_t self_in) {
return ((py_tf_classification_obj_t *) self_in)->w;
}
mp_obj_t py_tf_classification_h(mp_obj_t self_in) {
return ((py_tf_classification_obj_t *) self_in)->h;
}
mp_obj_t py_tf_classification_output(mp_obj_t self_in) {
return ((py_tf_classification_obj_t *) self_in)->output;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_classification_rect_obj, py_tf_classification_rect);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_classification_x_obj, py_tf_classification_x);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_classification_y_obj, py_tf_classification_y);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_classification_w_obj, py_tf_classification_w);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_classification_h_obj, py_tf_classification_h);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_classification_output_obj, py_tf_classification_output);
STATIC const mp_rom_map_elem_t py_tf_classification_locals_dict_table[] = {
{ MP_ROM_QSTR(MP_QSTR_rect), MP_ROM_PTR(&py_tf_classification_rect_obj) },
{ MP_ROM_QSTR(MP_QSTR_x), MP_ROM_PTR(&py_tf_classification_x_obj) },
{ MP_ROM_QSTR(MP_QSTR_y), MP_ROM_PTR(&py_tf_classification_y_obj) },
{ MP_ROM_QSTR(MP_QSTR_w), MP_ROM_PTR(&py_tf_classification_w_obj) },
{ MP_ROM_QSTR(MP_QSTR_h), MP_ROM_PTR(&py_tf_classification_h_obj) },
{ MP_ROM_QSTR(MP_QSTR_output), MP_ROM_PTR(&py_tf_classification_output_obj) }
};
STATIC MP_DEFINE_CONST_DICT(py_tf_classification_locals_dict, py_tf_classification_locals_dict_table);
MP_DEFINE_CONST_OBJ_TYPE(
py_tf_classification_type,
MP_QSTR_tf_classification,
MP_TYPE_FLAG_NONE,
print, py_tf_classification_print,
subscr, py_tf_classification_subscr,
locals_dict, &py_tf_classification_locals_dict
);
// TF Model Output Object.
typedef struct py_tf_model_output_obj {
mp_obj_base_t base;
@ -695,14 +602,15 @@ STATIC mp_obj_t py_tf_model_detect(uint n_args, const mp_obj_t *pos_args, mp_map
imlib_get_statistics(&stats, img->pixfmt, &hist);
fb_free(); // fb_alloc(hist.LBinCount * sizeof(float), FB_ALLOC_NO_HINT);
py_tf_classification_obj_t *o = m_new_obj(py_tf_classification_obj_t);
o->base.type = &py_tf_classification_type;
o->x = mp_obj_new_int(fast_floorf(lnk_data.rect.x * scale) + x_offset);
o->y = mp_obj_new_int(fast_floorf(lnk_data.rect.y * scale) + y_offset);
o->w = mp_obj_new_int(fast_floorf(lnk_data.rect.w * scale));
o->h = mp_obj_new_int(fast_floorf(lnk_data.rect.h * scale));
o->output = mp_obj_new_float(stats.LMean * fscale);
objects_list->items[j] = o;
mp_obj_t rect = mp_obj_new_tuple(4, (mp_obj_t []) {
mp_obj_new_int(fast_floorf(lnk_data.rect.x * scale) + x_offset),
mp_obj_new_int(fast_floorf(lnk_data.rect.y * scale) + y_offset),
mp_obj_new_int(fast_floorf(lnk_data.rect.w * scale)),
mp_obj_new_int(fast_floorf(lnk_data.rect.h * scale))
});
objects_list->items[j] = mp_obj_new_tuple(2, (mp_obj_t []) {
rect, mp_obj_new_float(stats.LMean * fscale)
});
}
out_list->items[i] = objects_list;
@ -938,7 +846,6 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_model_deinit_obj, py_tf_model_deinit);
STATIC const mp_rom_map_elem_t py_tf_model_locals_dict_table[] = {
{ MP_ROM_QSTR(MP_QSTR___del__), MP_ROM_PTR(&py_tf_model_deinit_obj) },
{ MP_ROM_QSTR(MP_QSTR_classify), MP_ROM_PTR(&py_tf_model_predict_obj) },
{ MP_ROM_QSTR(MP_QSTR_segment), MP_ROM_PTR(&py_tf_model_segment_obj) },
{ MP_ROM_QSTR(MP_QSTR_detect), MP_ROM_PTR(&py_tf_model_detect_obj) },
{ MP_ROM_QSTR(MP_QSTR_regression), MP_ROM_PTR(&py_tf_model_predict_obj) },