New Blob Code

The new API is backwards compatible with the previous one except for
advanced features. The new blob code uses a flood fill algorithm that is
3x faster in filling out blobs that the previous code. On the M7 the
performance cap of 30 FPS is usually reached.

Additionally, blobs are objects with named attributes now so you don't
have to index access them anymore. However, index access is still
supported.
This commit is contained in:
Kwabena W. Agyeman 2016-12-27 19:10:24 -05:00
parent 6a8ce01e79
commit af15ec6eb3
7 changed files with 806 additions and 503 deletions

View File

@ -1,347 +1,599 @@
/*
* This file is part of the OpenMV project.
* Copyright (c) 2013/2014 Ibrahim Abdelkader <i.abdalkader@gmail.com>
/* This file is part of the OpenMV project.
* Copyright (c) 2013-2017 Ibrahim Abdelkader <iabdalkader@openmv.io> & Kwabena W. Agyeman <kwagyeman@openmv.io>
* This work is licensed under the MIT license, see the file LICENSE for details.
*
* Blob and color code/marker detection code...
*
*/
#include <string.h>
#include "fb_alloc.h"
#include "xalloc.h"
#include "imlib.h"
#include "common.h"
ALWAYS_INLINE static uint8_t *init_mask(rectangle_t *roi)
typedef struct xylf
{
return fb_alloc0(((roi->w+7)/8)*roi->h);
int16_t x, y, l, r;
}
xylf_t;
ALWAYS_INLINE static void deinit_mask()
void imlib_find_blobs(list_t *out, new_image_t *ptr, rectangle_t *roi,
list_t *thresholds, bool invert, unsigned int area_threshold, unsigned int pixels_threshold,
bool merge, int margin)
{
fb_free();
}
bitmap_t bitmap; // Same size as the image so we don't have to translate.
bitmap_alloc(&bitmap, ptr->w * ptr->h);
ALWAYS_INLINE static void set_mask_pixel(rectangle_t *roi, uint8_t *mask, int x, int y)
{
mask[(((roi->w+7)/8)*y)+(x/8)] |= (1 << (x%8));
}
size_t lifo_len = (roi->w * 2) + (roi->h * 2); // Use the perimeter as the flood fill max depth.
lifo_t lifo;
lifo_alloc(&lifo, lifo_len, sizeof(xylf_t));
ALWAYS_INLINE static bool get_not_mask_pixel(rectangle_t *roi, uint8_t *mask, int x, int y)
{
return !((mask[(((roi->w+7)/8)*y)+(x/8)] >> (x%8)) & 1);
}
list_init(out, sizeof(find_blobs_list_lnk_data_t));
typedef struct stack_queue {
int head_p, tail_p, size;
point_t *data_p;
} stack_queue_t;
size_t code = 0;
for (list_lnk_t *it = iterator_start_from_head(thresholds); it; it = iterator_next(it)) {
color_thresholds_list_lnk_data_t lnk_data;
iterator_get(thresholds, it, &lnk_data);
ALWAYS_INLINE static stack_queue_t *init_stack_queue(rectangle_t *roi)
{
stack_queue_t *sq = fb_alloc(sizeof(stack_queue_t));
sq->head_p = 0;
sq->tail_p = 0;
// The size here is the perimeter in pixels around the roi. It's the perimeter
// around the roi vs the roi perimeter so that we can't run out of space while
// executing the wildfire algorithm for new points. Additionally, this also
// takes care of the pointer comparison issue since it will never get full.
sq->size = (((roi->w+2)*2)-2)+(((roi->h+2)*2)-2);
sq->data_p = fb_alloc(sq->size*sizeof(point_t));
return sq;
}
switch(ptr->type) {
case IMAGE_TYPE_BINARY: {
for (int y = roi->y, yy = roi->y + roi->h; y < yy; y++) {
uint32_t *row_ptr = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(ptr, y);
size_t row_index = BITMAP_COMPUTE_ROW_INDEX(ptr, y);
for (int x = roi->x, xx = roi->x + roi->w; x < xx; x++) {
if ((!bitmap_bit_get(&bitmap, BITMAP_COMPUTE_INDEX(row_index, x)))
&& COLOR_THRESHOLD_BINARY(IMAGE_GET_BINARY_PIXEL_FAST(row_ptr, x), &lnk_data, invert)) {
int old_x = x;
int old_y = y;
ALWAYS_INLINE static void deinit_stack_queue()
{
fb_free();
fb_free();
}
ALWAYS_INLINE static void stack_queue_push(stack_queue_t *sq, int x, int y)
{
sq->data_p[sq->head_p] = (point_t) {.x = x, .y = y};
sq->head_p = (sq->head_p + 1) % sq->size;
}
ALWAYS_INLINE static point_t stack_queue_pop(stack_queue_t *sq)
{
point_t p = sq->data_p[sq->tail_p];
sq->tail_p = (sq->tail_p + 1) % sq->size;
return p;
}
ALWAYS_INLINE static bool stack_queue_not_empty(stack_queue_t *sq)
{
return sq->head_p != sq->tail_p;
}
ALWAYS_INLINE static bool threshold_gs(image_t *img, int x, int y, simple_color_t l_thresholds, simple_color_t h_thresholds, bool invert)
{
int pixel = IM_GET_GS_PIXEL(img, x, y);
return invert ^
((l_thresholds.G <= pixel) &&
(pixel <= h_thresholds.G));
}
ALWAYS_INLINE static bool threshold_rgb565(image_t *img, int x, int y, simple_color_t l_thresholds, simple_color_t h_thresholds, bool invert)
{
int pixel = IM_GET_RGB565_PIXEL(img, x, y);
const int lab_l = IM_RGB5652L(pixel);
const int lab_a = IM_RGB5652A(pixel);
const int lab_b = IM_RGB5652B(pixel);
return invert ^
((l_thresholds.L <= lab_l) &&
(lab_l <= h_thresholds.L) &&
(l_thresholds.A <= lab_a) &&
(lab_a <= h_thresholds.A) &&
(l_thresholds.B <= lab_b) &&
(lab_b <= h_thresholds.B));
}
ALWAYS_INLINE static bool threshold(image_t *img, int x, int y, simple_color_t l_thresholds, simple_color_t h_thresholds, bool invert)
{
if (IM_IS_GS(img)) {
return threshold_gs(img, x, y, l_thresholds, h_thresholds, invert);
} else {
return threshold_rgb565(img, x, y, l_thresholds, h_thresholds, invert);
}
}
array_t *imlib_find_blobs(image_t *img,
int num_thresholds, simple_color_t *l_thresholds, simple_color_t *h_thresholds,
bool invert, rectangle_t *r,
bool (*f_fun)(void*,void*,color_blob_t*), void *f_fun_arg_0, void *f_fun_arg_1)
{
// We're using a modified wildfire algorithm below where instead of using a
// the stack we use a queue along with a burn mask to filter out already
// visited pixels. For each color blob in the image, where a color blob is
// an area of connected pixels that all are within a threshold, the algorithm
// computes the bounding box around all those pixels, number of pixels in the
// blob, centroid, and blob orientation. The algorithm then returns a list
// of all the blobs in the image. Note that blobs can be mapped back to colors
// by their blob code number.
rectangle_t rect;
if (!rectangle_subimg(img, r, &rect)) {
return NULL;
}
uint8_t *mask = init_mask(&rect);
stack_queue_t *sq = init_stack_queue(&rect);
array_t *blobs_list;
array_alloc(&blobs_list, xfree);
for (int n = 0; n < num_thresholds; n++) {
for (int i = 0; i < rect.h; i++) {
for (int j = 0; j < rect.w; j++) {
int x = (rect.x + j); // in img
int y = (rect.y + i); // in img
if (get_not_mask_pixel(&rect, mask, j, i) // in roi
&& threshold(img, x, y, l_thresholds[n], h_thresholds[n], invert)) { // in img
int blob_x1 = x;
int blob_y1 = y;
int blob_x2 = x;
int blob_y2 = y;
int blob_pixels = 1;
int blob_cx = x;
int blob_cy = y;
int blob_a = x*x; // equal to (x-mx)^2
int blob_b = x*y; // equal to (x-mx)*(y-my)
int blob_c = y*y; // equal to (y-my)^2
set_mask_pixel(&rect, mask, j, i); // in roi
stack_queue_push(sq, x, y); // in img
do {
point_t p = stack_queue_pop(sq);
for (int a = -1; a <= 1; a++) {
for (int b = -1; b <= 1; b++) {
int c = (p.x + b); // in img
int d = (p.y + a); // in img
int e = (c - rect.x); // in roi
int f = (d - rect.y); // in roi
if (IM_X_INSIDE(&rect, e) // in roi
&& IM_Y_INSIDE(&rect, f) // in roi
&& get_not_mask_pixel(&rect, mask, e, f) // in roi
&& threshold(img, c, d, l_thresholds[n], h_thresholds[n], invert)) { // in img
blob_x1 = IM_MIN(blob_x1, c);
blob_y1 = IM_MIN(blob_y1, d);
blob_x2 = IM_MAX(blob_x2, c);
blob_y2 = IM_MAX(blob_y2, d);
int blob_pixels = 0;
int blob_cx = 0;
int blob_cy = 0;
long long blob_a = 0;
long long blob_b = 0;
long long blob_c = 0;
// Scanline Flood Fill Algorithm //
for(;;) {
int left = x, right = x;
uint32_t *row = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(ptr, y);
size_t index = BITMAP_COMPUTE_ROW_INDEX(ptr, y);
while ((left > roi->x)
&& (!bitmap_bit_get(&bitmap, BITMAP_COMPUTE_INDEX(index, left - 1)))
&& COLOR_THRESHOLD_BINARY(IMAGE_GET_BINARY_PIXEL_FAST(row, left - 1), &lnk_data, invert)) {
left--;
}
while ((right < (roi->x + roi->w - 1))
&& (!bitmap_bit_get(&bitmap, BITMAP_COMPUTE_INDEX(index, right + 1)))
&& COLOR_THRESHOLD_BINARY(IMAGE_GET_BINARY_PIXEL_FAST(row, right + 1), &lnk_data, invert)) {
right++;
}
blob_x1 = IM_MIN(blob_x1, left);
blob_y1 = IM_MIN(blob_y1, y);
blob_x2 = IM_MAX(blob_x2, right);
blob_y2 = IM_MAX(blob_y2, y);
for (int i = left; i <= right; i++) {
bitmap_bit_set(&bitmap, BITMAP_COMPUTE_INDEX(index, i));
blob_pixels += 1;
blob_cx += c;
blob_cy += d;
blob_a += c*c;
blob_b += c*d;
blob_c += d*d;
set_mask_pixel(&rect, mask, e, f); // in roi
stack_queue_push(sq, c, d); // in img
blob_cx += i;
blob_cy += y;
blob_a += i*i;
blob_b += i*y;
blob_c += y*y;
}
bool break_out = false;
for(;;) {
if (lifo_size(&lifo) < lifo_len) {
if (y > roi->y) {
row = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(ptr, y - 1);
index = BITMAP_COMPUTE_ROW_INDEX(ptr, y - 1);
bool recurse = false;
for (int i = left; i <= right; i++) {
if ((!bitmap_bit_get(&bitmap, BITMAP_COMPUTE_INDEX(index, i)))
&& COLOR_THRESHOLD_BINARY(IMAGE_GET_BINARY_PIXEL_FAST(row, i), &lnk_data, invert)) {
xylf_t context;
context.x = x;
context.y = y;
context.l = left;
context.r = right;
lifo_enqueue(&lifo, &context);
x = i;
y = y - 1;
recurse = true;
break;
}
}
if (recurse) {
break;
}
}
if (y < (roi->y + roi->h - 1)) {
row = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(ptr, y + 1);
index = BITMAP_COMPUTE_ROW_INDEX(ptr, y + 1);
bool recurse = false;
for (int i = left; i <= right; i++) {
if ((!bitmap_bit_get(&bitmap, BITMAP_COMPUTE_INDEX(index, i)))
&& COLOR_THRESHOLD_BINARY(IMAGE_GET_BINARY_PIXEL_FAST(row, i), &lnk_data, invert)) {
xylf_t context;
context.x = x;
context.y = y;
context.l = left;
context.r = right;
lifo_enqueue(&lifo, &context);
x = i;
y = y + 1;
recurse = true;
break;
}
}
if (recurse) {
break;
}
}
}
} while(stack_queue_not_empty(sq));
int mx = (blob_cx/blob_pixels); // x centroid
int my = (blob_cy/blob_pixels); // y centroid
// The below equations were derived by translating the orientation
// calculation from a double pass algorithm to single pass.
blob_a -= (mx*blob_cx)+(mx*blob_cx);
blob_a += blob_pixels*mx*mx;
blob_b -= (mx*blob_cy)+(my*blob_cx);
blob_b += blob_pixels*mx*my;
blob_c -= (my*blob_cy)+(my*blob_cy);
blob_c += blob_pixels*my*my;
// Compute the final blob orientation from a, b, and c sums.
float o = ((blob_a!=blob_c)?fast_atan2f(blob_b,blob_a-blob_c):0.0)/2.0;
color_blob_t cb;
cb.x = blob_x1;
cb.y = blob_y1;
cb.w = blob_x2-blob_x1+1;
cb.h = blob_y2-blob_y1+1;
cb.pixels = blob_pixels;
cb.cx = mx;
cb.cy = my;
cb.rotation = o;
cb.code = 1<<n;
cb.count = 1;
// We allocate in the below code to sped things up.
if ((f_fun != NULL) && (f_fun_arg_0 != NULL) && (f_fun_arg_1 != NULL)) {
if (f_fun(f_fun_arg_0, f_fun_arg_1, &cb)) {
color_blob_t *cb2 = xalloc(sizeof(color_blob_t));
memcpy(cb2, &cb, sizeof(color_blob_t));
array_push_back(blobs_list, cb2);
if (!lifo_size(&lifo)) {
break_out = true;
break;
}
xylf_t context;
lifo_dequeue(&lifo, &context);
x = context.x;
y = context.y;
left = context.l;
right = context.r;
}
if (break_out) {
break;
}
}
// http://www.cse.usf.edu/~r1k/MachineVisionBook/MachineVision.files/MachineVision_Chapter2.pdf
// https://www.strchr.com/standard_deviation_in_one_pass
//
// a = sigma(x*x) + (mx*sigma(x)) + (mx*sigma(x)) + (sigma()*mx*mx)
// b = sigma(x*y) + (mx*sigma(y)) + (my*sigma(x)) + (sigma()*mx*my)
// c = sigma(y*y) + (my*sigma(y)) + (my*sigma(y)) + (sigma()*my*my)
//
// blob_a = sigma(x*x)
// blob_b = sigma(x*y)
// blob_c = sigma(y*y)
// blob_cx = sigma(x)
// blob_cy = sigma(y)
// blob_pixels = sigma()
int mx = blob_cx / blob_pixels; // x centroid
int my = blob_cy / blob_pixels; // y centroid
int small_blob_a = blob_a - ((mx * blob_cx) + (mx * blob_cx)) + (blob_pixels * mx * mx);
int small_blob_b = blob_b - ((mx * blob_cy) + (my * blob_cx)) + (blob_pixels * mx * my);
int small_blob_c = blob_c - ((my * blob_cy) + (my * blob_cy)) + (blob_pixels * my * my);
find_blobs_list_lnk_data_t lnk_blob;
lnk_blob.rect.x = blob_x1;
lnk_blob.rect.y = blob_y1;
lnk_blob.rect.w = blob_x2 - blob_x1;
lnk_blob.rect.h = blob_y2 - blob_y1;
lnk_blob.pixels = blob_pixels;
lnk_blob.centroid.x = mx;
lnk_blob.centroid.y = my;
lnk_blob.rotation = (small_blob_a != small_blob_c) ? (fast_atan2f(2 * small_blob_b, small_blob_a - small_blob_c) / 2.0f) : 0.0f;
lnk_blob.code = 1 << code;
lnk_blob.count = 1;
if (((lnk_blob.rect.w * lnk_blob.rect.h) >= area_threshold) && (lnk_blob.pixels >= pixels_threshold)) {
list_push_back(out, &lnk_blob);
}
x = old_x;
y = old_y;
}
}
}
break;
}
case IMAGE_TYPE_GRAYSCALE: {
for (int y = roi->y, yy = roi->y + roi->h; y < yy; y++) {
uint8_t *row_ptr = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(ptr, y);
size_t row_index = BITMAP_COMPUTE_ROW_INDEX(ptr, y);
for (int x = roi->x, xx = roi->x + roi->w; x < xx; x++) {
if ((!bitmap_bit_get(&bitmap, BITMAP_COMPUTE_INDEX(row_index, x)))
&& COLOR_THRESHOLD_GRAYSCALE(IMAGE_GET_GRAYSCALE_PIXEL_FAST(row_ptr, x), &lnk_data, invert)) {
int old_x = x;
int old_y = y;
int blob_x1 = x;
int blob_y1 = y;
int blob_x2 = x;
int blob_y2 = y;
int blob_pixels = 0;
int blob_cx = 0;
int blob_cy = 0;
long long blob_a = 0;
long long blob_b = 0;
long long blob_c = 0;
// Scanline Flood Fill Algorithm //
for(;;) {
int left = x, right = x;
uint8_t *row = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(ptr, y);
size_t index = BITMAP_COMPUTE_ROW_INDEX(ptr, y);
while ((left > roi->x)
&& (!bitmap_bit_get(&bitmap, BITMAP_COMPUTE_INDEX(index, left - 1)))
&& COLOR_THRESHOLD_GRAYSCALE(IMAGE_GET_GRAYSCALE_PIXEL_FAST(row, left - 1), &lnk_data, invert)) {
left--;
}
while ((right < (roi->x + roi->w - 1))
&& (!bitmap_bit_get(&bitmap, BITMAP_COMPUTE_INDEX(index, right + 1)))
&& COLOR_THRESHOLD_GRAYSCALE(IMAGE_GET_GRAYSCALE_PIXEL_FAST(row, right + 1), &lnk_data, invert)) {
right++;
}
blob_x1 = IM_MIN(blob_x1, left);
blob_y1 = IM_MIN(blob_y1, y);
blob_x2 = IM_MAX(blob_x2, right);
blob_y2 = IM_MAX(blob_y2, y);
for (int i = left; i <= right; i++) {
bitmap_bit_set(&bitmap, BITMAP_COMPUTE_INDEX(index, i));
blob_pixels += 1;
blob_cx += i;
blob_cy += y;
blob_a += i*i;
blob_b += i*y;
blob_c += y*y;
}
bool break_out = false;
for(;;) {
if (lifo_size(&lifo) < lifo_len) {
if (y > roi->y) {
row = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(ptr, y - 1);
index = BITMAP_COMPUTE_ROW_INDEX(ptr, y - 1);
bool recurse = false;
for (int i = left; i <= right; i++) {
if ((!bitmap_bit_get(&bitmap, BITMAP_COMPUTE_INDEX(index, i)))
&& COLOR_THRESHOLD_GRAYSCALE(IMAGE_GET_GRAYSCALE_PIXEL_FAST(row, i), &lnk_data, invert)) {
xylf_t context;
context.x = x;
context.y = y;
context.l = left;
context.r = right;
lifo_enqueue(&lifo, &context);
x = i;
y = y - 1;
recurse = true;
break;
}
}
if (recurse) {
break;
}
}
if (y < (roi->y + roi->h - 1)) {
row = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(ptr, y + 1);
index = BITMAP_COMPUTE_ROW_INDEX(ptr, y + 1);
bool recurse = false;
for (int i = left; i <= right; i++) {
if ((!bitmap_bit_get(&bitmap, BITMAP_COMPUTE_INDEX(index, i)))
&& COLOR_THRESHOLD_GRAYSCALE(IMAGE_GET_GRAYSCALE_PIXEL_FAST(row, i), &lnk_data, invert)) {
xylf_t context;
context.x = x;
context.y = y;
context.l = left;
context.r = right;
lifo_enqueue(&lifo, &context);
x = i;
y = y + 1;
recurse = true;
break;
}
}
if (recurse) {
break;
}
}
}
if (!lifo_size(&lifo)) {
break_out = true;
break;
}
xylf_t context;
lifo_dequeue(&lifo, &context);
x = context.x;
y = context.y;
left = context.l;
right = context.r;
}
if (break_out) {
break;
}
}
// http://www.cse.usf.edu/~r1k/MachineVisionBook/MachineVision.files/MachineVision_Chapter2.pdf
// https://www.strchr.com/standard_deviation_in_one_pass
//
// a = sigma(x*x) + (mx*sigma(x)) + (mx*sigma(x)) + (sigma()*mx*mx)
// b = sigma(x*y) + (mx*sigma(y)) + (my*sigma(x)) + (sigma()*mx*my)
// c = sigma(y*y) + (my*sigma(y)) + (my*sigma(y)) + (sigma()*my*my)
//
// blob_a = sigma(x*x)
// blob_b = sigma(x*y)
// blob_c = sigma(y*y)
// blob_cx = sigma(x)
// blob_cy = sigma(y)
// blob_pixels = sigma()
int mx = blob_cx / blob_pixels; // x centroid
int my = blob_cy / blob_pixels; // y centroid
int small_blob_a = blob_a - ((mx * blob_cx) + (mx * blob_cx)) + (blob_pixels * mx * mx);
int small_blob_b = blob_b - ((mx * blob_cy) + (my * blob_cx)) + (blob_pixels * mx * my);
int small_blob_c = blob_c - ((my * blob_cy) + (my * blob_cy)) + (blob_pixels * my * my);
find_blobs_list_lnk_data_t lnk_blob;
lnk_blob.rect.x = blob_x1;
lnk_blob.rect.y = blob_y1;
lnk_blob.rect.w = blob_x2 - blob_x1;
lnk_blob.rect.h = blob_y2 - blob_y1;
lnk_blob.pixels = blob_pixels;
lnk_blob.centroid.x = mx;
lnk_blob.centroid.y = my;
lnk_blob.rotation = (small_blob_a != small_blob_c) ? (fast_atan2f(2 * small_blob_b, small_blob_a - small_blob_c) / 2.0f) : 0.0f;
lnk_blob.code = 1 << code;
lnk_blob.count = 1;
if (((lnk_blob.rect.w * lnk_blob.rect.h) >= area_threshold) && (lnk_blob.pixels >= pixels_threshold)) {
list_push_back(out, &lnk_blob);
}
x = old_x;
y = old_y;
}
}
}
break;
}
case IMAGE_TYPE_RGB565: {
for (int y = roi->y, yy = roi->y + roi->h; y < yy; y++) {
uint16_t *row_ptr = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(ptr, y);
size_t row_index = BITMAP_COMPUTE_ROW_INDEX(ptr, y);
for (int x = roi->x, xx = roi->x + roi->w; x < xx; x++) {
if ((!bitmap_bit_get(&bitmap, BITMAP_COMPUTE_INDEX(row_index, x)))
&& COLOR_THRESHOLD_RGB565(IMAGE_GET_RGB565_PIXEL_FAST(row_ptr, x), &lnk_data, invert)) {
int old_x = x;
int old_y = y;
int blob_x1 = x;
int blob_y1 = y;
int blob_x2 = x;
int blob_y2 = y;
int blob_pixels = 0;
int blob_cx = 0;
int blob_cy = 0;
long long blob_a = 0;
long long blob_b = 0;
long long blob_c = 0;
// Scanline Flood Fill Algorithm //
for(;;) {
int left = x, right = x;
uint16_t *row = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(ptr, y);
size_t index = BITMAP_COMPUTE_ROW_INDEX(ptr, y);
while ((left > roi->x)
&& (!bitmap_bit_get(&bitmap, BITMAP_COMPUTE_INDEX(index, left - 1)))
&& COLOR_THRESHOLD_RGB565(IMAGE_GET_RGB565_PIXEL_FAST(row, left - 1), &lnk_data, invert)) {
left--;
}
while ((right < (roi->x + roi->w - 1))
&& (!bitmap_bit_get(&bitmap, BITMAP_COMPUTE_INDEX(index, right + 1)))
&& COLOR_THRESHOLD_RGB565(IMAGE_GET_RGB565_PIXEL_FAST(row, right + 1), &lnk_data, invert)) {
right++;
}
blob_x1 = IM_MIN(blob_x1, left);
blob_y1 = IM_MIN(blob_y1, y);
blob_x2 = IM_MAX(blob_x2, right);
blob_y2 = IM_MAX(blob_y2, y);
for (int i = left; i <= right; i++) {
bitmap_bit_set(&bitmap, BITMAP_COMPUTE_INDEX(index, i));
blob_pixels += 1;
blob_cx += i;
blob_cy += y;
blob_a += i*i;
blob_b += i*y;
blob_c += y*y;
}
bool break_out = false;
for(;;) {
if (lifo_size(&lifo) < lifo_len) {
if (y > roi->y) {
row = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(ptr, y - 1);
index = BITMAP_COMPUTE_ROW_INDEX(ptr, y - 1);
bool recurse = false;
for (int i = left; i <= right; i++) {
if ((!bitmap_bit_get(&bitmap, BITMAP_COMPUTE_INDEX(index, i)))
&& COLOR_THRESHOLD_RGB565(IMAGE_GET_RGB565_PIXEL_FAST(row, i), &lnk_data, invert)) {
xylf_t context;
context.x = x;
context.y = y;
context.l = left;
context.r = right;
lifo_enqueue(&lifo, &context);
x = i;
y = y - 1;
recurse = true;
break;
}
}
if (recurse) {
break;
}
}
if (y < (roi->y + roi->h - 1)) {
row = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(ptr, y + 1);
index = BITMAP_COMPUTE_ROW_INDEX(ptr, y + 1);
bool recurse = false;
for (int i = left; i <= right; i++) {
if ((!bitmap_bit_get(&bitmap, BITMAP_COMPUTE_INDEX(index, i)))
&& COLOR_THRESHOLD_RGB565(IMAGE_GET_RGB565_PIXEL_FAST(row, i), &lnk_data, invert)) {
xylf_t context;
context.x = x;
context.y = y;
context.l = left;
context.r = right;
lifo_enqueue(&lifo, &context);
x = i;
y = y + 1;
recurse = true;
break;
}
}
if (recurse) {
break;
}
}
}
if (!lifo_size(&lifo)) {
break_out = true;
break;
}
xylf_t context;
lifo_dequeue(&lifo, &context);
x = context.x;
y = context.y;
left = context.l;
right = context.r;
}
if (break_out) {
break;
}
}
// http://www.cse.usf.edu/~r1k/MachineVisionBook/MachineVision.files/MachineVision_Chapter2.pdf
// https://www.strchr.com/standard_deviation_in_one_pass
//
// a = sigma(x*x) + (mx*sigma(x)) + (mx*sigma(x)) + (sigma()*mx*mx)
// b = sigma(x*y) + (mx*sigma(y)) + (my*sigma(x)) + (sigma()*mx*my)
// c = sigma(y*y) + (my*sigma(y)) + (my*sigma(y)) + (sigma()*my*my)
//
// blob_a = sigma(x*x)
// blob_b = sigma(x*y)
// blob_c = sigma(y*y)
// blob_cx = sigma(x)
// blob_cy = sigma(y)
// blob_pixels = sigma()
int mx = blob_cx / blob_pixels; // x centroid
int my = blob_cy / blob_pixels; // y centroid
int small_blob_a = blob_a - ((mx * blob_cx) + (mx * blob_cx)) + (blob_pixels * mx * mx);
int small_blob_b = blob_b - ((mx * blob_cy) + (my * blob_cx)) + (blob_pixels * mx * my);
int small_blob_c = blob_c - ((my * blob_cy) + (my * blob_cy)) + (blob_pixels * my * my);
find_blobs_list_lnk_data_t lnk_blob;
lnk_blob.rect.x = blob_x1;
lnk_blob.rect.y = blob_y1;
lnk_blob.rect.w = blob_x2 - blob_x1;
lnk_blob.rect.h = blob_y2 - blob_y1;
lnk_blob.pixels = blob_pixels;
lnk_blob.centroid.x = mx;
lnk_blob.centroid.y = my;
lnk_blob.rotation = (small_blob_a != small_blob_c) ? (fast_atan2f(2 * small_blob_b, small_blob_a - small_blob_c) / 2.0f) : 0.0f;
lnk_blob.code = 1 << code;
lnk_blob.count = 1;
if (((lnk_blob.rect.w * lnk_blob.rect.h) >= area_threshold) && (lnk_blob.pixels >= pixels_threshold)) {
list_push_back(out, &lnk_blob);
}
x = old_x;
y = old_y;
}
}
}
break;
}
default: {
break;
}
}
code += 1;
}
lifo_free(&lifo);
bitmap_free(&bitmap);
if (merge) {
for(;;) {
bool merge_occured = false;
list_t out_temp;
list_init(&out_temp, sizeof(find_blobs_list_lnk_data_t));
while(list_size(out)) {
find_blobs_list_lnk_data_t lnk_blob;
list_pop_front(out, &lnk_blob);
for (size_t k = 0, l = list_size(out); k < l; k++) {
find_blobs_list_lnk_data_t tmp_blob;
list_pop_front(out, &tmp_blob);
rectangle_t temp;
temp.x = IM_MAX(IM_MIN(tmp_blob.rect.x - margin, INT16_MAX), INT16_MIN);
temp.y = IM_MAX(IM_MIN(tmp_blob.rect.y - margin, INT16_MAX), INT16_MIN);
temp.w = IM_MAX(IM_MIN(tmp_blob.rect.w + (margin * 2), INT16_MAX), 0);
temp.h = IM_MAX(IM_MIN(tmp_blob.rect.h + (margin * 2), INT16_MAX), 0);
if (rectangle_overlap(&(lnk_blob.rect), &temp)) {
rectangle_united(&(lnk_blob.rect), &(tmp_blob.rect));
lnk_blob.centroid.x = ((lnk_blob.centroid.x * lnk_blob.pixels) + (tmp_blob.centroid.x * tmp_blob.pixels)) / (lnk_blob.pixels + tmp_blob.pixels);
lnk_blob.centroid.y = ((lnk_blob.centroid.y * lnk_blob.pixels) + (tmp_blob.centroid.y * tmp_blob.pixels)) / (lnk_blob.pixels + tmp_blob.pixels);
lnk_blob.rotation = ((lnk_blob.rotation * lnk_blob.pixels) + (tmp_blob.rotation * tmp_blob.pixels)) / (lnk_blob.pixels + tmp_blob.pixels);
lnk_blob.pixels += tmp_blob.pixels; // won't overflow
lnk_blob.code |= tmp_blob.code;
lnk_blob.count = IM_MAX(IM_MIN(lnk_blob.count + tmp_blob.count, UINT16_MAX), 0);
merge_occured = true;
} else {
if (blob_pixels >= ((img->w*img->h)/1000)) {
color_blob_t *cb2 = xalloc(sizeof(color_blob_t));
memcpy(cb2, &cb, sizeof(color_blob_t));
array_push_back(blobs_list, cb2);
}
}
}
}
list_push_back(out, &tmp_blob);
}
}
deinit_stack_queue();
deinit_mask();
list_push_back(&out_temp, &lnk_blob);
}
return blobs_list;
}
array_t *imlib_find_markers(array_t *blobs_list, int margin,
bool (*f_fun)(void*,void*,color_blob_t*), void *f_fun_arg_0, void *f_fun_arg_1)
{
// After you have a list of blobs this function will merge blobs that
// intersect into one blob. The new merged big blob will have a bounding box
// that surronds all the merged blobs, pixels will include all the blobs,
// and centroids/orientations are averaged. Additionally, the new blob will
// have an extra code value with a bit set for each color that was merged
// into the blob along with the number of blobs merged. The color code
// provides a nice and easy user controllable way to get an idea of what
// colors are in a merged blob.
if (!array_length(blobs_list)) return NULL;
rectangle_t rect; // reusing mask from above - so we need a fake rect obj.
rect.x = 0;
rect.y = 0;
rect.w = array_length(blobs_list);
rect.h = 1;
uint8_t *mask = init_mask(&rect);
array_t *blobs_list_ret;
array_alloc(&blobs_list_ret, xfree);
for (int i = 0, ii = array_length(blobs_list); i < ii; i++) {
if (get_not_mask_pixel(&rect, mask, i, 0)) {
set_mask_pixel(&rect, mask, i, 0);
color_blob_t *cb0 = array_at(blobs_list, i);
int blob_x = cb0->x; // rect x
int blob_y = cb0->y; // rect y
int blob_w = cb0->w; // rect w
int blob_h = cb0->h; // rect h
int blob_pixels = cb0->pixels; // pixels
int blob_cx = cb0->cx; // centroid x
int blob_cy = cb0->cy; // centroid y
float blob_rotation = cb0->rotation; // rotation
int blob_code = cb0->code; // code bit
int blob_count = cb0->count; // blob count
for (int j = 0, jj = array_length(blobs_list); j < jj;) {
if (get_not_mask_pixel(&rect, mask, j, 0)) {
color_blob_t *cb1 = array_at(blobs_list, j);
rectangle_t t0, t1;
t0.x = blob_x - margin;
t0.y = blob_y - margin;
t0.w = blob_w + (2*margin);
t0.h = blob_h + (2*margin);
t1.x = cb1->x - margin;
t1.y = cb1->y - margin;
t1.w = cb1->w + (2*margin);
t1.h = cb1->h + (2*margin);
if (rectangle_intersects(&t0, &t1)) {
set_mask_pixel(&rect, mask, j, 0);
// Compute bounding rect...
int x2_0 = blob_x+blob_w-1;
int x2_1 = cb1->x+cb1->w-1;
int x2 = IM_MAX(x2_0, x2_1);
blob_x = IM_MIN(blob_x, cb1->x);
blob_w = x2-blob_x+1;
int y2_0 = blob_y+blob_h-1;
int y2_1 = cb1->y+cb1->h-1;
int y2 = IM_MAX(y2_0, y2_1);
blob_y = IM_MIN(blob_y, cb1->y);
blob_h = y2-blob_y+1;
// Update tracking info...
blob_pixels += cb1->pixels;
blob_cx += cb1->cx;
blob_cy += cb1->cy;
blob_rotation += cb1->rotation;
blob_code |= cb1->code;
blob_count += cb1->count;
// Start over if we merged so we don't miss something.
// Since our rect has grown we have to recheck blobs
// that didn't intersect previously.
j = 0;
continue;
}
}
j += 1;
}
blob_cx /= blob_count;
blob_cy /= blob_count;
blob_rotation /= blob_count;
// Build output object.
color_blob_t cb;
cb.x = blob_x;
cb.y = blob_y;
cb.w = blob_w;
cb.h = blob_h;
cb.pixels = blob_pixels;
cb.cx = blob_cx;
cb.cy = blob_cy;
cb.rotation = blob_rotation;
cb.code = blob_code;
cb.count = blob_count;
// We allocate in the below code to sped things up.
if ((f_fun != NULL) && (f_fun_arg_0 != NULL) && (f_fun_arg_1 != NULL)) {
if (f_fun(f_fun_arg_0, f_fun_arg_1, &cb)) {
color_blob_t *cb2 = xalloc(sizeof(color_blob_t));
memcpy(cb2, &cb, sizeof(color_blob_t));
array_push_back(blobs_list_ret, cb2);
}
} else {
color_blob_t *cb2 = xalloc(sizeof(color_blob_t));
memcpy(cb2, &cb, sizeof(color_blob_t));
array_push_back(blobs_list_ret, cb2);
}
}
}
deinit_mask();
return blobs_list_ret;
list_copy(out, &out_temp);
if (!merge_occured) {
break;
}
}
}
}

View File

@ -884,6 +884,16 @@ typedef enum jpeg_subsample {
JPEG_SUBSAMPLE_2x2 = 0x22, // 2x2 chroma subsampling
} jpeg_subsample_t;
typedef struct find_blobs_list_lnk_data
{
rectangle_t rect;
uint32_t pixels;
point_t centroid;
float rotation;
uint16_t code, count;
}
find_blobs_list_lnk_data_t;
typedef struct find_qrcodes_list_lnk_data
{
rectangle_t rect;
@ -989,14 +999,6 @@ void imlib_median_filter(image_t *img, const int ksize, const int percentile);
void imlib_histeq(image_t *img);
void imlib_mask_ellipse(image_t *img);
/* Color Tracking */
array_t *imlib_find_blobs(image_t *img,
int num_thresholds, simple_color_t *l_thresholds, simple_color_t *h_thresholds,
bool invert, rectangle_t *r,
bool (*f_fun)(void*,void*,color_blob_t*), void *f_fun_arg_0, void *f_fun_arg_1);
array_t *imlib_find_markers(array_t *blobs_list, int margin,
bool (*f_fun)(void*,void*,color_blob_t*), void *f_fun_arg_0, void *f_fun_arg_1);
/* Template Matching */
void imlib_midpoint_pool(image_t *img_i, image_t *img_o, int x_div, int y_div, const int bias);
void imlib_mean_pool(image_t *img_i, image_t *img_o, int x_div, int y_div);
@ -1065,6 +1067,10 @@ void imlib_find_hog(image_t *src, rectangle_t *roi, int cell_size);
// Lens correction
void imlib_lens_corr(image_t *src, float strength);
// Color Tracking
void imlib_find_blobs(list_t *out, new_image_t *ptr, rectangle_t *roi,
list_t *thresholds, bool invert, unsigned int area_threshold, unsigned int pixels_threshold,
bool merge, int margin);
// Codes
void imlib_find_qrcodes(list_t *out, new_image_t *ptr, rectangle_t *roi);

View File

@ -911,173 +911,207 @@ static mp_obj_t py_image_mask_ellipse(mp_obj_t img_obj)
return img_obj;
}
static bool py_image_find_blobs_f_fun(void *fun_obj, void *img_obj, color_blob_t *cb)
// Blob Object //
#define py_blob_obj_size 10
typedef struct py_blob_obj {
mp_obj_base_t base;
mp_obj_t x, y, w, h, pixels, cx, cy, rotation, code, count;
} py_blob_obj_t;
static void py_blob_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind)
{
mp_obj_t blob_obj[10] = {
mp_obj_new_int(cb->x),
mp_obj_new_int(cb->y),
mp_obj_new_int(cb->w),
mp_obj_new_int(cb->h),
mp_obj_new_int(cb->pixels),
mp_obj_new_int(cb->cx),
mp_obj_new_int(cb->cy),
mp_obj_new_float(cb->rotation),
mp_obj_new_int(cb->code),
mp_obj_new_int(cb->count)
};
return mp_obj_is_true(mp_call_function_2(fun_obj, img_obj, mp_obj_new_tuple(10, blob_obj)));
py_blob_obj_t *self = self_in;
mp_printf(print,
"{x:%d, y:%d, w:%d, h:%d, pixels:%d, cx:%d, cy:%d, rotation:%f, code:%d, count:%d}",
mp_obj_get_int(self->x),
mp_obj_get_int(self->y),
mp_obj_get_int(self->w),
mp_obj_get_int(self->h),
mp_obj_get_int(self->pixels),
mp_obj_get_int(self->cx),
mp_obj_get_int(self->cy),
(double) mp_obj_get_float(self->rotation),
mp_obj_get_int(self->code),
mp_obj_get_int(self->count));
}
static mp_obj_t py_blob_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value)
{
if (value == MP_OBJ_SENTINEL) { // load
py_blob_obj_t *self = self_in;
if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) {
mp_bound_slice_t slice;
if (!mp_seq_get_fast_slice_indexes(py_blob_obj_size, index, &slice)) {
mp_not_implemented("only slices with step=1 (aka None) are supported");
}
mp_obj_tuple_t *result = mp_obj_new_tuple(slice.stop - slice.start, NULL);
mp_seq_copy(result->items, &(self->x) + slice.start, result->len, mp_obj_t);
return result;
}
switch (mp_get_index(self->base.type, py_blob_obj_size, index, false)) {
case 0: return self->x;
case 1: return self->y;
case 2: return self->w;
case 3: return self->h;
case 4: return self->pixels;
case 5: return self->cx;
case 6: return self->cy;
case 7: return self->rotation;
case 8: return self->code;
case 9: return self->count;
}
}
return MP_OBJ_NULL; // op not supported
}
mp_obj_t py_blob_rect(mp_obj_t self_in)
{
return mp_obj_new_tuple(4, (mp_obj_t []) {((py_blob_obj_t *) self_in)->x,
((py_blob_obj_t *) self_in)->y,
((py_blob_obj_t *) self_in)->w,
((py_blob_obj_t *) self_in)->h});
}
mp_obj_t py_blob_x(mp_obj_t self_in) { return ((py_blob_obj_t *) self_in)->x; }
mp_obj_t py_blob_y(mp_obj_t self_in) { return ((py_blob_obj_t *) self_in)->y; }
mp_obj_t py_blob_w(mp_obj_t self_in) { return ((py_blob_obj_t *) self_in)->w; }
mp_obj_t py_blob_h(mp_obj_t self_in) { return ((py_blob_obj_t *) self_in)->h; }
mp_obj_t py_blob_pixels(mp_obj_t self_in) { return ((py_blob_obj_t *) self_in)->pixels; }
mp_obj_t py_blob_cx(mp_obj_t self_in) { return ((py_blob_obj_t *) self_in)->cx; }
mp_obj_t py_blob_cy(mp_obj_t self_in) { return ((py_blob_obj_t *) self_in)->cy; }
mp_obj_t py_blob_rotation(mp_obj_t self_in) { return ((py_blob_obj_t *) self_in)->rotation; }
mp_obj_t py_blob_code(mp_obj_t self_in) { return ((py_blob_obj_t *) self_in)->code; }
mp_obj_t py_blob_count(mp_obj_t self_in) { return ((py_blob_obj_t *) self_in)->count; }
mp_obj_t py_blob_area(mp_obj_t self_in) {
return mp_obj_new_int(mp_obj_get_int(((py_blob_obj_t *) self_in)->w) * mp_obj_get_int(((py_blob_obj_t *) self_in)->h));
}
mp_obj_t py_blob_density(mp_obj_t self_in) {
int area = mp_obj_get_int(((py_blob_obj_t *) self_in)->w) * mp_obj_get_int(((py_blob_obj_t *) self_in)->h);
if (area) return mp_obj_new_float(mp_obj_get_int(((py_blob_obj_t *) self_in)->pixels) / area);
return mp_obj_new_float(0.0f);
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_blob_rect_obj, py_blob_rect);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_blob_x_obj, py_blob_x);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_blob_y_obj, py_blob_y);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_blob_w_obj, py_blob_w);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_blob_h_obj, py_blob_h);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_blob_pixels_obj, py_blob_pixels);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_blob_cx_obj, py_blob_cx);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_blob_cy_obj, py_blob_cy);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_blob_rotation_obj, py_blob_rotation);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_blob_code_obj, py_blob_code);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_blob_count_obj, py_blob_count);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_blob_area_obj, py_blob_area);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_blob_density_obj, py_blob_density);
STATIC const mp_rom_map_elem_t py_blob_locals_dict_table[] = {
{ MP_ROM_QSTR(MP_QSTR_rect), MP_ROM_PTR(&py_blob_rect_obj) },
{ MP_ROM_QSTR(MP_QSTR_x), MP_ROM_PTR(&py_blob_x_obj) },
{ MP_ROM_QSTR(MP_QSTR_y), MP_ROM_PTR(&py_blob_y_obj) },
{ MP_ROM_QSTR(MP_QSTR_w), MP_ROM_PTR(&py_blob_w_obj) },
{ MP_ROM_QSTR(MP_QSTR_h), MP_ROM_PTR(&py_blob_h_obj) },
{ MP_ROM_QSTR(MP_QSTR_pixels), MP_ROM_PTR(&py_blob_pixels_obj) },
{ MP_ROM_QSTR(MP_QSTR_cx), MP_ROM_PTR(&py_blob_cx_obj) },
{ MP_ROM_QSTR(MP_QSTR_cy), MP_ROM_PTR(&py_blob_cy_obj) },
{ MP_ROM_QSTR(MP_QSTR_rotation), MP_ROM_PTR(&py_blob_rotation_obj) },
{ MP_ROM_QSTR(MP_QSTR_code), MP_ROM_PTR(&py_blob_code_obj) },
{ MP_ROM_QSTR(MP_QSTR_count), MP_ROM_PTR(&py_blob_count_obj) },
{ MP_ROM_QSTR(MP_QSTR_area), MP_ROM_PTR(&py_blob_area_obj) } ,
{ MP_ROM_QSTR(MP_QSTR_density), MP_ROM_PTR(&py_blob_density_obj) }
};
STATIC MP_DEFINE_CONST_DICT(py_blob_locals_dict, py_blob_locals_dict_table);
static const mp_obj_type_t py_blob_type = {
{ &mp_type_type },
.name = MP_QSTR_blob,
.print = py_blob_print,
.subscr = py_blob_subscr,
.locals_dict = (mp_obj_t) &py_blob_locals_dict,
};
static mp_obj_t py_image_find_blobs(uint n_args, const mp_obj_t *args, mp_map_t *kw_args)
{
image_t *arg_img = py_image_cobj(args[0]);
PY_ASSERT_FALSE_MSG(IM_IS_JPEG(arg_img),
"Operation not supported on JPEG");
mp_uint_t arg_t_len;
mp_obj_t *arg_t;
mp_obj_get_array(args[1], &arg_t_len, &arg_t);
if (!arg_t_len) return mp_obj_new_list(0, NULL); // return an empty array to be iteratable
// Transfer to new image type.
new_image_t image;
image_init(&image, (arg_img->bpp == 2) ? IMAGE_TYPE_RGB565 : IMAGE_TYPE_GRAYSCALE, arg_img->w, arg_img->h);
image.size = arg_img->bpp * arg_img->w * arg_img->h;
image.data = arg_img->pixels;
simple_color_t l_t[arg_t_len], u_t[arg_t_len];
if (IM_IS_GS(arg_img)) {
for (int i=0; i<arg_t_len; i++) {
mp_obj_t *temp;
mp_obj_get_array_fixed_n(arg_t[i], 2, &temp);
int lo = mp_obj_get_int(temp[0]);
int hi = mp_obj_get_int(temp[1]);
// Swap ranges if they are wrong.
l_t[i].G = IM_MIN(lo, hi);
u_t[i].G = IM_MAX(lo, hi);
rectangle_t roi;
py_helper_lookup_rectangle(kw_args, arg_img, &roi);
mp_uint_t arg_thresholds_len;
mp_obj_t *arg_thresholds;
mp_obj_get_array(args[1], &arg_thresholds_len, &arg_thresholds);
if (!arg_thresholds_len) {
return mp_obj_new_list(0, NULL);
}
} else {
for (int i=0; i<arg_t_len; i++) {
mp_obj_t *temp;
mp_obj_get_array_fixed_n(arg_t[i], 6, &temp);
int l_lo = mp_obj_get_int(temp[0]);
int l_hi = mp_obj_get_int(temp[1]);
int a_lo = mp_obj_get_int(temp[2]);
int a_hi = mp_obj_get_int(temp[3]);
int b_lo = mp_obj_get_int(temp[4]);
int b_hi = mp_obj_get_int(temp[5]);
// Swap ranges if they are wrong.
l_t[i].L = IM_MIN(l_lo, l_hi);
u_t[i].L = IM_MAX(l_lo, l_hi);
l_t[i].A = IM_MIN(a_lo, a_hi);
u_t[i].A = IM_MAX(a_lo, a_hi);
l_t[i].B = IM_MIN(b_lo, b_hi);
u_t[i].B = IM_MAX(b_lo, b_hi);
list_t thresholds;
list_init(&thresholds, sizeof(color_thresholds_list_lnk_data_t));
for(mp_uint_t i = 0; i < arg_thresholds_len; i++) {
mp_uint_t arg_threshold_len;
mp_obj_t *arg_threshold;
mp_obj_get_array(arg_thresholds[i], &arg_threshold_len, &arg_threshold);
if (arg_threshold_len) {
color_thresholds_list_lnk_data_t lnk_data;
lnk_data.LMin = (arg_threshold_len > 0) ? IM_MAX(IM_MIN(mp_obj_get_int(arg_threshold[0]), IM_MAX(COLOR_L_MAX, COLOR_GRAYSCALE_MAX)), IM_MIN(COLOR_L_MIN, COLOR_GRAYSCALE_MIN)) : 0;
lnk_data.LMax = (arg_threshold_len > 1) ? IM_MAX(IM_MIN(mp_obj_get_int(arg_threshold[1]), IM_MAX(COLOR_L_MAX, COLOR_GRAYSCALE_MAX)), IM_MIN(COLOR_L_MIN, COLOR_GRAYSCALE_MIN)) : 0;
lnk_data.AMin = (arg_threshold_len > 2) ? IM_MAX(IM_MIN(mp_obj_get_int(arg_threshold[2]), COLOR_A_MAX), COLOR_A_MIN) : 0;
lnk_data.AMax = (arg_threshold_len > 3) ? IM_MAX(IM_MIN(mp_obj_get_int(arg_threshold[3]), COLOR_A_MAX), COLOR_A_MIN) : 0;
lnk_data.BMin = (arg_threshold_len > 4) ? IM_MAX(IM_MIN(mp_obj_get_int(arg_threshold[4]), COLOR_B_MAX), COLOR_B_MIN) : 0;
lnk_data.BMax = (arg_threshold_len > 5) ? IM_MAX(IM_MIN(mp_obj_get_int(arg_threshold[5]), COLOR_B_MAX), COLOR_B_MIN) : 0;
color_thresholds_list_lnk_data_t lnk_data_tmp;
memcpy(&lnk_data_tmp, &lnk_data, sizeof(color_thresholds_list_lnk_data_t));
lnk_data.LMin = IM_MIN(lnk_data_tmp.LMin, lnk_data_tmp.LMax);
lnk_data.LMax = IM_MAX(lnk_data_tmp.LMin, lnk_data_tmp.LMax);
lnk_data.AMin = IM_MIN(lnk_data_tmp.AMin, lnk_data_tmp.AMax);
lnk_data.AMax = IM_MAX(lnk_data_tmp.AMin, lnk_data_tmp.AMax);
lnk_data.BMin = IM_MIN(lnk_data_tmp.BMin, lnk_data_tmp.BMax);
lnk_data.BMax = IM_MAX(lnk_data_tmp.BMin, lnk_data_tmp.BMax);
list_push_back(&thresholds, &lnk_data);
}
}
rectangle_t arg_r;
py_helper_lookup_rectangle(kw_args, arg_img, &arg_r);
bool invert = py_helper_lookup_int(kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_invert), false);
unsigned int area_threshold = py_helper_lookup_int(kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_area_threshold), 10);
unsigned int pixels_threshold = py_helper_lookup_int(kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_pixels_threshold), 10);
bool merge = py_helper_lookup_int(kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_merge), false);
int margin = py_helper_lookup_int(kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_margin), 0);
mp_map_elem_t *kw_arg = mp_map_lookup(kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_feature_filter), MP_MAP_LOOKUP);
mp_obj_t kw_val = (kw_arg != NULL) ? kw_arg->value : NULL;
// TODO: Need to set fb_alloc trap here to recover from any exception...
list_t out;
imlib_find_blobs(&out, &image, &roi, &thresholds, invert, area_threshold, pixels_threshold, merge, margin);
list_free(&thresholds);
mp_obj_list_t *objects_list = mp_obj_new_list(list_size(&out), NULL);
int arg_invert = py_helper_lookup_int(kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_invert), 0);
array_t *blobs_list = imlib_find_blobs(arg_img, arg_t_len, l_t, u_t, arg_invert ? 1 : 0, &arg_r,
py_image_find_blobs_f_fun, kw_val, args[0]);
if (blobs_list == NULL) {
return mp_obj_new_list(0, NULL); // return an empty array to be iteratable
for (size_t i = 0; list_size(&out); i++) {
find_blobs_list_lnk_data_t lnk_data;
list_pop_front(&out, &lnk_data);
py_blob_obj_t *o = m_new_obj(py_blob_obj_t);
o->base.type = &py_blob_type;
o->x = mp_obj_new_int(lnk_data.rect.x);
o->y = mp_obj_new_int(lnk_data.rect.y);
o->w = mp_obj_new_int(lnk_data.rect.w);
o->h = mp_obj_new_int(lnk_data.rect.h);
o->pixels = mp_obj_new_int(lnk_data.pixels);
o->cx = mp_obj_new_int(lnk_data.centroid.x);
o->cy = mp_obj_new_int(lnk_data.centroid.y);
o->rotation = mp_obj_new_float(lnk_data.rotation);
o->code = mp_obj_new_int(lnk_data.code);
o->count = mp_obj_new_int(lnk_data.count);
objects_list->items[i] = o;
}
mp_obj_t objects_list = mp_obj_new_list(0, NULL);
for (int i=0, j=array_length(blobs_list); i<j; i++) {
color_blob_t *cb = array_at(blobs_list, i);
mp_obj_t blob_obj[10] = {
mp_obj_new_int(cb->x),
mp_obj_new_int(cb->y),
mp_obj_new_int(cb->w),
mp_obj_new_int(cb->h),
mp_obj_new_int(cb->pixels),
mp_obj_new_int(cb->cx),
mp_obj_new_int(cb->cy),
mp_obj_new_float(cb->rotation),
mp_obj_new_int(cb->code),
mp_obj_new_int(cb->count)
};
mp_obj_list_append(objects_list, mp_obj_new_tuple(10, blob_obj));
}
array_free(blobs_list);
return objects_list;
}
static bool py_image_find_markers_f_fun(void *fun_obj, void *img_obj, color_blob_t *cb)
{
mp_obj_t blob_obj[10] = {
mp_obj_new_int(cb->x),
mp_obj_new_int(cb->y),
mp_obj_new_int(cb->w),
mp_obj_new_int(cb->h),
mp_obj_new_int(cb->pixels),
mp_obj_new_int(cb->cx),
mp_obj_new_int(cb->cy),
mp_obj_new_float(cb->rotation),
mp_obj_new_int(cb->code),
mp_obj_new_int(cb->count)
};
return mp_obj_is_true(mp_call_function_2(fun_obj, img_obj, mp_obj_new_tuple(10, blob_obj)));
}
static mp_obj_t py_image_find_markers(uint n_args, const mp_obj_t *args, mp_map_t *kw_args)
{
image_t *arg_img = py_image_cobj(args[0]);
PY_ASSERT_FALSE_MSG(IM_IS_JPEG(arg_img),
"Operation not supported on JPEG");
int margin = py_helper_lookup_int(kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_margin), 2);
mp_map_elem_t *kw_arg = mp_map_lookup(kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_feature_filter), MP_MAP_LOOKUP);
mp_obj_t kw_val = (kw_arg != NULL) ? kw_arg->value : NULL;
mp_uint_t arg_t_len;
mp_obj_t *arg_t;
mp_obj_get_array(args[1], &arg_t_len, &arg_t);
if (!arg_t_len) return mp_obj_new_list(0, NULL); // return an empty array to be iteratable
array_t *blobs_list;
array_alloc_init(&blobs_list, xfree, arg_t_len);
for (int i=0; i<arg_t_len; i++) {
mp_obj_t *temp;
mp_obj_get_array_fixed_n(arg_t[i], 10, &temp);
color_blob_t *cb = xalloc(sizeof(color_blob_t));
cb->x = mp_obj_get_int(temp[0]);
cb->y = mp_obj_get_int(temp[1]);
cb->w = mp_obj_get_int(temp[2]);
cb->h = mp_obj_get_int(temp[3]);
cb->pixels = mp_obj_get_int(temp[4]);
cb->cx = mp_obj_get_int(temp[5]);
cb->cy = mp_obj_get_int(temp[6]);
cb->rotation = mp_obj_get_float(temp[7]);
cb->code = mp_obj_get_int(temp[8]);
cb->count = mp_obj_get_int(temp[9]);
array_push_back(blobs_list, cb);
}
array_t *blobs_list_ret = imlib_find_markers(blobs_list, margin,
py_image_find_markers_f_fun, kw_val, args[0]);
if (blobs_list_ret == NULL) {
return mp_obj_new_list(0, NULL); // return an empty array to be iteratable
}
array_free(blobs_list);
mp_obj_t objects_list = mp_obj_new_list(0, NULL);
for (int i=0, j=array_length(blobs_list_ret); i<j; i++) {
color_blob_t *cb = array_at(blobs_list_ret, i);
mp_obj_t blob_obj[10] = {
mp_obj_new_int(cb->x),
mp_obj_new_int(cb->y),
mp_obj_new_int(cb->w),
mp_obj_new_int(cb->h),
mp_obj_new_int(cb->pixels),
mp_obj_new_int(cb->cx),
mp_obj_new_int(cb->cy),
mp_obj_new_float(cb->rotation),
mp_obj_new_int(cb->code),
mp_obj_new_int(cb->count)
};
mp_obj_list_append(objects_list, mp_obj_new_tuple(10, blob_obj));
}
array_free(blobs_list_ret);
return objects_list;
}
@ -1669,7 +1703,6 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_2(py_image_lens_corr_obj, py_image_lens_corr);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_image_mask_ellipse_obj, py_image_mask_ellipse);
/* Color Tracking */
STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_image_find_blobs_obj, 2, py_image_find_blobs);
STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_image_find_markers_obj, 2, py_image_find_markers);
/* Code Detection */
STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_image_find_qrcodes_obj, 1, py_image_find_qrcodes);
/* Template Matching */
@ -1740,7 +1773,6 @@ static const mp_map_elem_t locals_dict_table[] = {
{MP_OBJ_NEW_QSTR(MP_QSTR_mask_ellipse), (mp_obj_t)&py_image_mask_ellipse_obj},
/* Color Tracking */
{MP_OBJ_NEW_QSTR(MP_QSTR_find_blobs), (mp_obj_t)&py_image_find_blobs_obj},
{MP_OBJ_NEW_QSTR(MP_QSTR_find_markers), (mp_obj_t)&py_image_find_markers_obj},
/* Code Detection */
{MP_OBJ_NEW_QSTR(MP_QSTR_find_qrcodes), (mp_obj_t)&py_image_find_qrcodes_obj},
/* Template Matching */

View File

@ -65,8 +65,6 @@ Q(mean)
Q(mode)
Q(median)
Q(gaussian)
Q(find_blobs)
Q(find_markers)
Q(midpoint_pool)
Q(midpoint_pooled)
Q(mean_pool)
@ -95,8 +93,6 @@ Q(mul)
Q(add)
Q(bias)
Q(percentile)
Q(feature_filter)
Q(margin)
Q(normalized)
Q(lens_corr)
@ -309,16 +305,39 @@ Q(CPUFREQ_216MHZ)
Q(get_frequency)
Q(set_frequency)
// Find QRCcode
Q(find_qrcodes)
// Find Blobs
Q(find_blobs)
Q(area_threshold)
Q(pixels_threshold)
Q(merge)
Q(margin)
// duplicate Q(roi)
// QRCode Object
Q(qrcode)
// Blob Object
Q(blob)
Q(rect)
Q(x)
Q(y)
Q(w)
Q(h)
Q(pixels)
Q(cx)
Q(cy)
Q(rotation)
Q(code)
Q(count)
Q(area)
Q(density)
// Find QRCodes
Q(find_qrcodes)
// duplicate Q(roi)
// QRCode Object
Q(qrcode)
// duplicate Q(rect)
// duplicate Q(x)
// duplicate Q(y)
// duplicate Q(w)
// duplicate Q(h)
Q(payload)
Q(version)
Q(ecc_level)

View File

@ -22,12 +22,10 @@ while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.
blobs = img.find_blobs([green_threshold])
if blobs:
for b in blobs:
for b in img.find_blobs([green_threshold], area_threshold=30, pixels_threshold=30, merge=True):
# Draw a rect around the blob.
img.draw_rectangle(b[0:4]) # rect
img.draw_cross(b[5], b[6]) # cx, cy
img.draw_rectangle(b.rect()) # rect
img.draw_cross(b.cx(), b.cy()) # cx, cy
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
# connected to your computer. The FPS should increase once disconnected.

View File

@ -43,9 +43,7 @@ while(True):
centroid_sum = 0
for r in ROIS:
blobs = img.find_blobs(GRAYSCALE_THRESHOLD, roi=r[0:4]) # r[0:4] is roi tuple.
merged_blobs = img.find_markers(blobs) # merge overlapping blobs
merged_blobs = img.find_blobs(GRAYSCALE_THRESHOLD, roi=r[0:4], merge=True) # r[0:4] is roi tuple.
if merged_blobs:
# Find the index of the blob with the most pixels.
most_pixels = 0

View File

@ -33,10 +33,8 @@ while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.
blobs = img.find_blobs([red_threshold, blue_threshold])
merged_blobs = img.find_markers(blobs)
if merged_blobs:
for b in merged_blobs:
# margin=2 means blobs can be 2 pixels away from each other to merge
for b in img.find_blobs([red_threshold, blue_threshold], area_threshold=30, pixels_threhsold=30, merge=True, margin=2):
# Draw a rect around the blob.
img.draw_rectangle(b[0:4]) # rect
img.draw_cross(b[5], b[6]) # cx, cy