diff --git a/src/omv/fb_alloc.c b/src/omv/fb_alloc.c index 258616030..11926c7e5 100644 --- a/src/omv/fb_alloc.c +++ b/src/omv/fb_alloc.c @@ -13,7 +13,6 @@ extern char _fballoc; static char *pointer = &_fballoc; -static int marks = 0; #if defined(FB_ALLOC_STATS) static uint32_t alloc_bytes; @@ -26,6 +25,10 @@ extern char _fballoc_overlay; static char *pointer_overlay = &_fballoc_overlay; #endif +// fb_alloc_free_till_mark() will not free past this. +// Use fb_alloc_free_till_mark_permanent() instead. +#define FB_PERMANENT_FLAG 0x2 + static char *fb_alloc_min_address() { return (char *) (framebuffer_get_buffer() + framebuffer_get_frame_size()); @@ -46,7 +49,6 @@ __weak NORETURN void fb_alloc_fail() void fb_alloc_init0() { pointer = &_fballoc; - marks = 0; #if defined(OMV_FB_OVERLAY_MEMORY) pointer_overlay = &_fballoc_overlay; #endif @@ -74,18 +76,25 @@ void fb_alloc_mark() // we will use a size value of 4 as a marker in the alloc stack. *((uint32_t *) new_pointer) = sizeof(uint32_t); // Save size. pointer = new_pointer; - marks += 1; #if defined(FB_ALLOC_STATS) alloc_bytes = 0; alloc_bytes_peak = 0; #endif } -void fb_alloc_free_till_mark() +static void int_fb_alloc_free_till_mark(bool free_permanent) { - if (!marks) return; + // Previously there was a marks counting method used to provide a semaphore lock for this code: + // + // https://github.com/openmv/openmv/commit/c982617523766018fda70c15818f643ee8b1fd33 + // + // This does not really help you in complex memory allocation operations where you want to be + // able to unwind things until after a certain point. It also did not handle preventing + // fb_alloc_free_till_mark() from running in recursive call situations (see find_blobs()). while (pointer < &_fballoc) { uint32_t size = *((uint32_t *) pointer); + if ((!free_permanent) && (size & FB_PERMANENT_FLAG)) return; + size &= ~FB_PERMANENT_FLAG; #if defined(OMV_FB_OVERLAY_MEMORY) if (size & FB_OVERLAY_MEMORY_FLAG) { // Check for fast flag. size &= ~FB_OVERLAY_MEMORY_FLAG; // Remove it. @@ -95,12 +104,26 @@ void fb_alloc_free_till_mark() pointer += size; // Get size and pop. if (size == sizeof(uint32_t)) break; // Break on first marker. } - marks -= 1; #if defined(FB_ALLOC_STATS) printf("fb_alloc peak memory: %lu\n", alloc_bytes_peak); #endif } +void fb_alloc_free_till_mark() +{ + int_fb_alloc_free_till_mark(false); +} + +void fb_alloc_mark_permanent() +{ + if (pointer < &_fballoc) *((uint32_t *) pointer) |= FB_PERMANENT_FLAG; +} + +void fb_alloc_free_till_mark_past_mark_permanent() +{ + int_fb_alloc_free_till_mark(true); +} + // returns null pointer without error if size==0 void *fb_alloc(uint32_t size, int hints) { @@ -199,6 +222,7 @@ void fb_free() { if (pointer < &_fballoc) { uint32_t size = *((uint32_t *) pointer); + size &= ~FB_PERMANENT_FLAG; #if defined(OMV_FB_OVERLAY_MEMORY) if (size & FB_OVERLAY_MEMORY_FLAG) { // Check for fast flag. size &= ~FB_OVERLAY_MEMORY_FLAG; // Remove it. @@ -216,6 +240,7 @@ void fb_free_all() { while (pointer < &_fballoc) { uint32_t size = *((uint32_t *) pointer); + size &= ~FB_PERMANENT_FLAG; #if defined(OMV_FB_OVERLAY_MEMORY) if (size & FB_OVERLAY_MEMORY_FLAG) { // Check for fast flag. size &= ~FB_OVERLAY_MEMORY_FLAG; // Remove it. @@ -227,5 +252,4 @@ void fb_free_all() #endif pointer += size; // Get size and pop. } - marks = 0; } diff --git a/src/omv/fb_alloc.h b/src/omv/fb_alloc.h index 68277813d..7d799eef5 100644 --- a/src/omv/fb_alloc.h +++ b/src/omv/fb_alloc.h @@ -5,6 +5,36 @@ * * Interface for using extra frame buffer RAM as a stack. * + * Theory of operation: + * + * The frame buffer stack may be used to allocate large areas of RAM very quickly. You can allocate + * memory using fb_alloc() which returns a poiner to an allocated region of memory equal in size to + * the amount requested. If the memory is not avaiable fb_alloc() will generate an exception. + * + * After RAM is allocated with fb_alloc() you can free it with fb_free() in the order of allocs. + * + * Now, to prevent leaking allocated regions on the frame buffer stack all fb_alloc()s should be + * preceded by fb_alloc_mark() which starts an fb_alloc() region (which may have many fb_alloc()s + * in it). This ensures that if an exception occurs all fb_alloc()s are freed in the region. + * + * This is because all exceptions call fb_alloc_free_till_mark() to free the previously allocated + * region. Your code should call fb_alloc_free_till_mark() to free previously allocated memory also + * once you are done with it. This will cleanup all allocs along with the alloced mark. + * + * You may conveniently use fb_alloc_free_till_mark() to avoid having to manually free all + * previous allocs in one go very easily. + * + * Now, it can be tricky to allocate a region permanently that you do not want freed because + * exceptions pop the frame buffer stack using fb_alloc_free_till_mark(). Additionally, you may + * actually want exceptions to do this until you know an allocation operation that has multiple + * steps has succeeded. To handle these situations call fb_alloc_mark_permanent() after a complex + * operation to prevent fb_alloc_free_till_mark() from freeing past the last marked alloc. + * + * When you want deallocate this permanent region just call fb_alloc_free_till_mark_permanent() + * which will ignore the permanent mark and free backwards until it hits the previously allocated + * mark. + * + * Note that fb_free() and fb_free_all() do not respect any marks and permanent regions. */ #ifndef __FB_ALLOC_H__ #define __FB_ALLOC_H__ @@ -18,6 +48,8 @@ void fb_alloc_init0(); uint32_t fb_avail(); void fb_alloc_mark(); void fb_alloc_free_till_mark(); +void fb_alloc_mark_permanent(); // tag memory that should not be popped on exception +void fb_alloc_free_till_mark_past_mark_permanent(); // frees past marked permanent allocations void *fb_alloc(uint32_t size, int hints); void *fb_alloc0(uint32_t size, int hints); void *fb_alloc_all(uint32_t *size, int hints); // returns pointer and sets size diff --git a/src/omv/img/blob.c b/src/omv/img/blob.c index 79b9901ba..1d4d25ecf 100644 --- a/src/omv/img/blob.c +++ b/src/omv/img/blob.c @@ -406,7 +406,17 @@ void imlib_find_blobs(list_t *out, image_t *ptr, rectangle_t *roi, unsigned int bin_up(y_hist_bins, ptr->h, y_hist_bins_max, &lnk_blob.y_hist_bins, &lnk_blob.y_hist_bins_count); } - if (((threshold_cb_arg == NULL) || threshold_cb(threshold_cb_arg, &lnk_blob))) { + bool add_to_list = threshold_cb_arg == NULL; + if (!add_to_list) { + // Protect ourselves from caught exceptions in the callback + // code from freeing our fb_alloc() stack. + fb_alloc_mark(); + fb_alloc_mark_permanent(); + add_to_list = threshold_cb(threshold_cb_arg, &lnk_blob); + fb_alloc_free_till_mark_past_mark_permanent(); + } + + if (add_to_list) { list_push_back(out, &lnk_blob); } else { if (lnk_blob.x_hist_bins) xfree(lnk_blob.x_hist_bins); @@ -666,7 +676,17 @@ void imlib_find_blobs(list_t *out, image_t *ptr, rectangle_t *roi, unsigned int bin_up(y_hist_bins, ptr->h, y_hist_bins_max, &lnk_blob.y_hist_bins, &lnk_blob.y_hist_bins_count); } - if (((threshold_cb_arg == NULL) || threshold_cb(threshold_cb_arg, &lnk_blob))) { + bool add_to_list = threshold_cb_arg == NULL; + if (!add_to_list) { + // Protect ourselves from caught exceptions in the callback + // code from freeing our fb_alloc() stack. + fb_alloc_mark(); + fb_alloc_mark_permanent(); + add_to_list = threshold_cb(threshold_cb_arg, &lnk_blob); + fb_alloc_free_till_mark_past_mark_permanent(); + } + + if (add_to_list) { list_push_back(out, &lnk_blob); } else { if (lnk_blob.x_hist_bins) xfree(lnk_blob.x_hist_bins); @@ -926,7 +946,17 @@ void imlib_find_blobs(list_t *out, image_t *ptr, rectangle_t *roi, unsigned int bin_up(y_hist_bins, ptr->h, y_hist_bins_max, &lnk_blob.y_hist_bins, &lnk_blob.y_hist_bins_count); } - if (((threshold_cb_arg == NULL) || threshold_cb(threshold_cb_arg, &lnk_blob))) { + bool add_to_list = threshold_cb_arg == NULL; + if (!add_to_list) { + // Protect ourselves from caught exceptions in the callback + // code from freeing our fb_alloc() stack. + fb_alloc_mark(); + fb_alloc_mark_permanent(); + add_to_list = threshold_cb(threshold_cb_arg, &lnk_blob); + fb_alloc_free_till_mark_past_mark_permanent(); + } + + if (add_to_list) { list_push_back(out, &lnk_blob); } else { if (lnk_blob.x_hist_bins) xfree(lnk_blob.x_hist_bins); diff --git a/src/omv/py/py_sensor.c b/src/omv/py/py_sensor.c index e5d222437..52b53f374 100644 --- a/src/omv/py/py_sensor.c +++ b/src/omv/py/py_sensor.c @@ -206,12 +206,13 @@ static mp_obj_t py_sensor_alloc_extra_fb(mp_obj_t w_obj, mp_obj_t h_obj, mp_obj_ fb_alloc_mark(); ((image_t *) py_image_cobj(r))->pixels = fb_alloc0(image_size(&img), FB_ALLOC_NO_HINT); + fb_alloc_mark_permanent(); // pixels will not be popped on exception return r; } static mp_obj_t py_sensor_dealloc_extra_fb() { - fb_alloc_free_till_mark(); + fb_alloc_free_till_mark_past_mark_permanent(); return mp_const_none; } diff --git a/src/omv/py/py_tf.c b/src/omv/py/py_tf.c index c6c059abf..3b3e6b8fc 100644 --- a/src/omv/py/py_tf.c +++ b/src/omv/py/py_tf.c @@ -184,6 +184,8 @@ STATIC mp_obj_t int_py_tf_load(mp_obj_t path_obj, bool alloc_mode, bool helper_m if ((!helper_mode) && (!alloc_mode)) { fb_alloc_free_till_mark(); + } else if ((!helper_mode) && alloc_mode) { + fb_alloc_mark_permanent(); // tf_model->model_data will not be popped on exception. } return tf_model; @@ -197,7 +199,7 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_tf_load_obj, 1, py_tf_load); STATIC mp_obj_t py_tf_free_from_fb() { - fb_alloc_free_till_mark(); + fb_alloc_free_till_mark_past_mark_permanent(); return mp_const_none; } STATIC MP_DEFINE_CONST_FUN_OBJ_0(py_tf_free_from_fb_obj, py_tf_free_from_fb);