diff --git a/scripts/examples/25-Machine-Learning/tf_mobilenet_search_whole_window.py b/scripts/examples/25-Machine-Learning/tf_mobilenet_search_whole_window.py new file mode 100644 index 000000000..92ce31381 --- /dev/null +++ b/scripts/examples/25-Machine-Learning/tf_mobilenet_search_whole_window.py @@ -0,0 +1,58 @@ +# TensorFlow Lite Mobilenet V1 Example +# +# Google's Mobilenet V1 detects 1000 classes of objects +# +# WARNING: Mobilenet is trained on ImageNet and isn't meant to classify anything +# in the real world. It's just designed to score well on the ImageNet dataset. +# This example just shows off running mobilenet on the OpenMV Cam. However, the +# default model is not really usable for anything. You have to use transfer +# learning to apply the model to a target problem by re-training the model. +# +# NOTE: This example only works on the OpenMV Cam H7 Pro (that has SDRAM) and better! +# To get the models please see the CNN Network library in OpenMV IDE under +# Tools -> Machine Vision. The labels are there too. +# +# In this example we slide the detector window over the image and get a list +# of activations. Note that use a CNN with a sliding window is extremely compute +# expensive so for an exhaustive search do not expect the CNN to be real-time. + +import sensor, image, time, os, tf + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.set_windowing((240, 240)) # Set 240x240 window. +sensor.skip_frames(time=2000) # Let the camera adjust. + +mobilenet_version = "1" # 1 +mobilenet_width = "0.5" # 1.0, 0.75, 0.50, 0.25 +mobilenet_resolution = "128" # 224, 192, 160, 128 + +mobilenet = "mobilenet_v%s_%s_%s_quant.tflite" % (mobilenet_version, mobilenet_width, mobilenet_resolution) +labels = [line.rstrip('\n') for line in open("mobilenet_labels.txt")] + +clock = time.clock() +while(True): + clock.tick() + + img = sensor.snapshot() + + # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not + # specified). A classification score output vector will be generated for each location. At each scale the + # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. + # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note + # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after + # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) + # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. + # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... + + # default settings just do one detection... change them to search the image... + for obj in tf.classify(mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0): + print("**********\nTop 5 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) + img.draw_rectangle(obj.rect()) + # This combines the labels and confidence values into a list of tuples + # and then sorts that list by the confidence values. + sorted_list = sorted(zip(labels, obj.output()), key = lambda x: x[1], reverse = True) + for i in range(5): + print("%s = %f" % (sorted_list[i][0], sorted_list[i][1])) + print(clock.fps(), "fps") diff --git a/scripts/examples/25-Machine-Learning/tf_mobilenet_serach_just_center.py b/scripts/examples/25-Machine-Learning/tf_mobilenet_serach_just_center.py new file mode 100644 index 000000000..1371a877f --- /dev/null +++ b/scripts/examples/25-Machine-Learning/tf_mobilenet_serach_just_center.py @@ -0,0 +1,64 @@ +# TensorFlow Lite Mobilenet V1 Example +# +# Google's Mobilenet V1 detects 1000 classes of objects +# +# WARNING: Mobilenet is trained on ImageNet and isn't meant to classify anything +# in the real world. It's just designed to score well on the ImageNet dataset. +# This example just shows off running mobilenet on the OpenMV Cam. However, the +# default model is not really usable for anything. You have to use transfer +# learning to apply the model to a target problem by re-training the model. +# +# NOTE: This example only works on the OpenMV Cam H7 Pro (that has SDRAM) and better! +# To get the models please see the CNN Network library in OpenMV IDE under +# Tools -> Machine Vision. The labels are there too. +# +# In this example we slide the detector window over the image and get a list +# of activations. Note that use a CNN with a sliding window is extremely compute +# expensive so for an exhaustive search do not expect the CNN to be real-time. + +import sensor, image, time, os, tf + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.set_windowing((240, 240)) # Set 240x240 window. +sensor.skip_frames(time=2000) # Let the camera adjust. + +mobilenet_version = "1" # 1 +mobilenet_width = "0.5" # 1.0, 0.75, 0.50, 0.25 +mobilenet_resolution = "128" # 224, 192, 160, 128 + +mobilenet = "mobilenet_v%s_%s_%s_quant.tflite" % (mobilenet_version, mobilenet_width, mobilenet_resolution) +labels = [line.rstrip('\n') for line in open("mobilenet_labels.txt")] + +clock = time.clock() +while(True): + clock.tick() + + img = sensor.snapshot() + + # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not + # specified). A classification score output vector will be generated for each location. At each scale the + # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. + # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note + # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after + # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) + # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. + # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... + + # Setting x_overlap=-1 forces the window to stay centered in the ROI in the x direction always. If + # y_overlap is not -1 the method will search in all vertical positions. + + # Setting y_overlap=-1 forces the window to stay centered in the ROI in the y direction always. If + # x_overlap is not -1 the method will serach in all horizontal positions. + + # default settings just do one detection... change them to search the image... + for obj in tf.classify(mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=-1, y_overlap=-1): + print("**********\nTop 5 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) + img.draw_rectangle(obj.rect()) + # This combines the labels and confidence values into a list of tuples + # and then sorts that list by the confidence values. + sorted_list = sorted(zip(labels, obj.output()), key = lambda x: x[1], reverse = True) + for i in range(5): + print("%s = %f" % (sorted_list[i][0], sorted_list[i][1])) + print(clock.fps(), "fps") diff --git a/src/libtf/cortex-m4/libtf.a b/src/libtf/cortex-m4/libtf.a index bd6d760b1..aac3ad254 100644 Binary files a/src/libtf/cortex-m4/libtf.a and b/src/libtf/cortex-m4/libtf.a differ diff --git a/src/libtf/cortex-m7/libtf.a b/src/libtf/cortex-m7/libtf.a index 1ceda66b4..9b028e249 100644 Binary files a/src/libtf/cortex-m7/libtf.a and b/src/libtf/cortex-m7/libtf.a differ diff --git a/src/omv/py/py_tf.c b/src/omv/py/py_tf.c index 56947dce1..548acbbe2 100644 --- a/src/omv/py/py_tf.c +++ b/src/omv/py/py_tf.c @@ -137,6 +137,7 @@ STATIC mp_obj_t int_py_tf_load(mp_obj_t path_obj, bool mode) } fb_alloc_mark(); + uint32_t tensor_arena_size; uint8_t *tensor_arena = fb_alloc_all(&tensor_arena_size, FB_ALLOC_PREFER_SIZE); @@ -153,6 +154,21 @@ STATIC mp_obj_t int_py_tf_load(mp_obj_t path_obj, bool mode) return tf_model; } +STATIC mp_obj_t py_tf_load_xalloc(mp_obj_t path_obj) +{ + return int_py_tf_load(path_obj, false); +} +STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_load_obj, py_tf_load_xalloc); + +STATIC py_tf_model_obj_t *py_tf_load_fb_alloc(mp_obj_t path_obj) +{ + if (MP_OBJ_IS_TYPE(path_obj, &py_tf_model_type)) { + return (py_tf_model_obj_t *) path_obj; + } else { + return (py_tf_model_obj_t *) int_py_tf_load(path_obj, true); + } +} + typedef struct py_tf_input_data_callback_data { image_t *img; rectangle_t *roi; @@ -286,7 +302,9 @@ STATIC void py_tf_classify_output_data_callback(void *callback_data, STATIC mp_obj_t py_tf_classify(uint n_args, const mp_obj_t *args, mp_map_t *kw_args) { - py_tf_model_obj_t *arg_model; + fb_alloc_mark(); + + py_tf_model_obj_t *arg_model = py_tf_load_fb_alloc(args[0]); image_t *arg_img = py_helper_arg_to_image_mutable(args[1]); rectangle_t roi; @@ -304,14 +322,6 @@ STATIC mp_obj_t py_tf_classify(uint n_args, const mp_obj_t *args, mp_map_t *kw_a float arg_y_overlap = py_helper_keyword_float(n_args, args, 6, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_y_overlap), 0.0f); PY_ASSERT_TRUE_MSG(((0.0f <= arg_y_overlap) && (arg_y_overlap < 1.0f)) || (arg_y_overlap == -1.0f), "0 <= y_overlap < 1"); - fb_alloc_mark(); - - if (MP_OBJ_IS_TYPE(args[0], &py_tf_model_type)) { - arg_model = (py_tf_model_obj_t *) args[0]; - } else { - arg_model = int_py_tf_load(args[0], true); - } - uint32_t tensor_arena_size; uint8_t *tensor_arena = fb_alloc_all(&tensor_arena_size, FB_ALLOC_PREFER_SIZE); @@ -404,20 +414,14 @@ STATIC void py_tf_segment_output_data_callback(void *callback_data, STATIC mp_obj_t py_tf_segment(uint n_args, const mp_obj_t *args, mp_map_t *kw_args) { - py_tf_model_obj_t *arg_model; + fb_alloc_mark(); + + py_tf_model_obj_t *arg_model = py_tf_load_fb_alloc(args[0]); image_t *arg_img = py_helper_arg_to_image_mutable(args[1]); rectangle_t roi; py_helper_keyword_rectangle_roi(arg_img, n_args, args, 2, kw_args, &roi); - fb_alloc_mark(); - - if (MP_OBJ_IS_TYPE(args[0], &py_tf_model_type)) { - arg_model = (py_tf_model_obj_t *) args[0]; - } else { - arg_model = int_py_tf_load(args[0], true); - } - uint32_t tensor_arena_size; uint8_t *tensor_arena = fb_alloc_all(&tensor_arena_size, FB_ALLOC_PREFER_SIZE); @@ -470,12 +474,6 @@ STATIC const mp_obj_type_t py_tf_model_type = { .locals_dict = (mp_obj_t) &locals_dict }; -STATIC mp_obj_t py_tf_load(mp_obj_t path_obj) -{ - return int_py_tf_load(path_obj, false); -} -STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_load_obj, py_tf_load); - #endif // IMLIB_ENABLE_TF STATIC const mp_rom_map_elem_t globals_dict_table[] = {