diff --git a/src/omv/img/imlib.h b/src/omv/img/imlib.h index b98146821..55df5272b 100644 --- a/src/omv/img/imlib.h +++ b/src/omv/img/imlib.h @@ -1134,6 +1134,8 @@ void imlib_lens_corr(image_t *img, float strength, float zoom); void imlib_get_histogram(histogram_t *out, image_t *ptr, rectangle_t *roi); void imlib_get_percentile(percentile_t *out, image_bpp_t bpp, histogram_t *ptr, float percentile); void imlib_get_statistics(statistics_t *out, image_bpp_t bpp, histogram_t *ptr); +bool imlib_get_regression(find_lines_list_lnk_data_t *out, image_t *ptr, rectangle_t *roi, unsigned int x_stride, unsigned int y_stride, + list_t *thresholds, bool invert, bool robust); // Color Tracking void imlib_find_blobs(list_t *out, image_t *ptr, rectangle_t *roi, unsigned int x_stride, unsigned int y_stride, list_t *thresholds, bool invert, unsigned int area_threshold, unsigned int pixels_threshold, diff --git a/src/omv/img/stats.c b/src/omv/img/stats.c index 9a60171b9..c140d94d5 100644 --- a/src/omv/img/stats.c +++ b/src/omv/img/stats.c @@ -425,3 +425,307 @@ void imlib_get_statistics(statistics_t *out, image_bpp_t bpp, histogram_t *ptr) } } } + +static int get_median(int *array, int array_sum, int array_len) +{ + const int median_threshold = (array_sum + 1) / 2; + int median_count = 0; + + for (int i = 0; i < array_len; i++) { + if ((median_count < median_threshold) && (median_threshold <= (median_count + array[i]))) return i; + median_count += array[i]; + } + + return array_len - 1; +} + +static int get_median_l(long long *array, long long array_sum, int array_len) +{ + const long long median_threshold = (array_sum + 1) / 2; + long long median_count = 0; + + for (int i = 0; i < array_len; i++) { + if ((median_count < median_threshold) && (median_threshold <= (median_count + array[i]))) return i; + median_count += array[i]; + } + + return array_len - 1; +} + +bool imlib_get_regression(find_lines_list_lnk_data_t *out, image_t *ptr, rectangle_t *roi, unsigned int x_stride, unsigned int y_stride, + list_t *thresholds, bool invert, bool robust) +{ + bool result = false; + memset(out, 0, sizeof(find_lines_list_lnk_data_t)); + + if (!robust) { // Least Squares + int blob_pixels = 0; + int blob_cx = 0; + int blob_cy = 0; + long long blob_a = 0; + long long blob_b = 0; + long long blob_c = 0; + + for (list_lnk_t *it = iterator_start_from_head(thresholds); it; it = iterator_next(it)) { + color_thresholds_list_lnk_data_t lnk_data; + iterator_get(thresholds, it, &lnk_data); + + switch (ptr->bpp) { + case IMAGE_BPP_BINARY: { + for (int y = roi->y, yy = roi->y + roi->h; y < yy; y += y_stride) { + uint32_t *row_ptr = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(ptr, y); + for (int x = roi->x + (y % x_stride), xx = roi->x + roi->w; x < xx; x += x_stride) { + if (COLOR_THRESHOLD_BINARY(IMAGE_GET_BINARY_PIXEL_FAST(row_ptr, x), &lnk_data, invert)) { + blob_pixels += 1; + blob_cx += x; + blob_cy += y; + blob_a += x*x; + blob_b += x*y; + blob_c += y*y; + } + } + } + break; + } + case IMAGE_BPP_GRAYSCALE: { + for (int y = roi->y, yy = roi->y + roi->h; y < yy; y += y_stride) { + uint8_t *row_ptr = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(ptr, y); + for (int x = roi->x + (y % x_stride), xx = roi->x + roi->w; x < xx; x += x_stride) { + if (COLOR_THRESHOLD_GRAYSCALE(IMAGE_GET_GRAYSCALE_PIXEL_FAST(row_ptr, x), &lnk_data, invert)) { + blob_pixels += 1; + blob_cx += x; + blob_cy += y; + blob_a += x*x; + blob_b += x*y; + blob_c += y*y; + } + } + } + break; + } + case IMAGE_BPP_RGB565: { + for (int y = roi->y, yy = roi->y + roi->h; y < yy; y += y_stride) { + uint16_t *row_ptr = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(ptr, y); + for (int x = roi->x + (y % x_stride), xx = roi->x + roi->w; x < xx; x += x_stride) { + if (COLOR_THRESHOLD_RGB565(IMAGE_GET_RGB565_PIXEL_FAST(row_ptr, x), &lnk_data, invert)) { + blob_pixels += 1; + blob_cx += x; + blob_cy += y; + blob_a += x*x; + blob_b += x*y; + blob_c += y*y; + } + } + } + break; + } + default: { + break; + } + } + } + + if (blob_pixels) { + // http://www.cse.usf.edu/~r1k/MachineVisionBook/MachineVision.files/MachineVision_Chapter2.pdf + // https://www.strchr.com/standard_deviation_in_one_pass + // + // a = sigma(x*x) + (mx*sigma(x)) + (mx*sigma(x)) + (sigma()*mx*mx) + // b = sigma(x*y) + (mx*sigma(y)) + (my*sigma(x)) + (sigma()*mx*my) + // c = sigma(y*y) + (my*sigma(y)) + (my*sigma(y)) + (sigma()*my*my) + // + // blob_a = sigma(x*x) + // blob_b = sigma(x*y) + // blob_c = sigma(y*y) + // blob_cx = sigma(x) + // blob_cy = sigma(y) + // blob_pixels = sigma() + + int mx = blob_cx / blob_pixels; // x centroid + int my = blob_cy / blob_pixels; // y centroid + int small_blob_a = blob_a - ((mx * blob_cx) + (mx * blob_cx)) + (blob_pixels * mx * mx); + int small_blob_b = blob_b - ((mx * blob_cy) + (my * blob_cx)) + (blob_pixels * mx * my); + int small_blob_c = blob_c - ((my * blob_cy) + (my * blob_cy)) + (blob_pixels * my * my); + + float rotation = ((small_blob_a != small_blob_c) ? (fast_atan2f(2 * small_blob_b, small_blob_a - small_blob_c) / 2.0f) : 1.570796f) + 1.570796f; // PI/2 + + out->theta = fast_roundf(rotation * 57.295780) % 180; // * (180 / PI) + if (out->theta < 0) out->theta += 180; + out->rho = fast_roundf(((mx - roi->x) * cos_table[out->theta]) + ((my - roi->y) * sin_table[out->theta])); + + float part0 = (small_blob_a + small_blob_c) / 2.0f; + float f_b = (float) small_blob_b; + float f_a_c = (float) (small_blob_a - small_blob_c); + float part1 = fast_sqrtf((4 * f_b * f_b) + (f_a_c * f_a_c)) / 2.0f; + float p_add = fast_sqrtf(part0 + part1); + float p_sub = fast_sqrtf(part0 - part1); + float e_min = IM_MIN(p_add, p_sub); + float e_max = IM_MAX(p_add, p_sub); + out->magnitude = fast_roundf(e_max / e_min) - 1; // Circle -> [0, INF) -> Line + + if ((45 <= out->theta) && (out->theta < 135)) { + // y = (r - x cos(t)) / sin(t) + out->line.x1 = 0; + out->line.y1 = fast_roundf((out->rho - (out->line.x1 * cos_table[out->theta])) / sin_table[out->theta]); + out->line.x2 = roi->w - 1; + out->line.y2 = fast_roundf((out->rho - (out->line.x2 * cos_table[out->theta])) / sin_table[out->theta]); + } else { + // x = (r - y sin(t)) / cos(t); + out->line.y1 = 0; + out->line.x1 = fast_roundf((out->rho - (out->line.y1 * sin_table[out->theta])) / cos_table[out->theta]); + out->line.y2 = roi->h - 1; + out->line.x2 = fast_roundf((out->rho - (out->line.y2 * sin_table[out->theta])) / cos_table[out->theta]); + } + + if(lb_clip_line(&out->line, 0, 0, roi->w, roi->h)) { + out->line.x1 += roi->x; + out->line.y1 += roi->y; + out->line.x2 += roi->x; + out->line.y2 += roi->y; + result = true; + } else { + memset(out, 0, sizeof(find_lines_list_lnk_data_t)); + } + } + } else { // Theil-Sen Estimator + int blob_pixels = 0; + + fifo_t fifo; + fifo_alloc(&fifo, roi->w * roi->h, sizeof(point_t)); + int *x_histogram = fb_alloc0(ptr->w * sizeof(int)); // Not roi so we don't have to adjust, we can burn the RAM. + int *y_histogram = fb_alloc0(ptr->h * sizeof(int)); // Not roi so we don't have to adjust, we can burn the RAM. + + for (list_lnk_t *it = iterator_start_from_head(thresholds); it; it = iterator_next(it)) { + color_thresholds_list_lnk_data_t lnk_data; + iterator_get(thresholds, it, &lnk_data); + + switch (ptr->bpp) { + case IMAGE_BPP_BINARY: { + for (int y = roi->y, yy = roi->y + roi->h; y < yy; y += y_stride) { + uint32_t *row_ptr = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(ptr, y); + for (int x = roi->x + (y % x_stride), xx = roi->x + roi->w; x < xx; x += x_stride) { + if (COLOR_THRESHOLD_BINARY(IMAGE_GET_BINARY_PIXEL_FAST(row_ptr, x), &lnk_data, invert)) { + blob_pixels += 1; + x_histogram[x]++; + y_histogram[y]++; + + point_t p; + point_init(&p, x, y); + fifo_enqueue(&fifo, &p); + } + } + } + break; + } + case IMAGE_BPP_GRAYSCALE: { + for (int y = roi->y, yy = roi->y + roi->h; y < yy; y += y_stride) { + uint8_t *row_ptr = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(ptr, y); + for (int x = roi->x + (y % x_stride), xx = roi->x + roi->w; x < xx; x += x_stride) { + if (COLOR_THRESHOLD_GRAYSCALE(IMAGE_GET_GRAYSCALE_PIXEL_FAST(row_ptr, x), &lnk_data, invert)) { + blob_pixels += 1; + x_histogram[x]++; + y_histogram[y]++; + + point_t p; + point_init(&p, x, y); + fifo_enqueue(&fifo, &p); + } + } + } + break; + } + case IMAGE_BPP_RGB565: { + for (int y = roi->y, yy = roi->y + roi->h; y < yy; y += y_stride) { + uint16_t *row_ptr = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(ptr, y); + for (int x = roi->x + (y % x_stride), xx = roi->x + roi->w; x < xx; x += x_stride) { + if (COLOR_THRESHOLD_RGB565(IMAGE_GET_RGB565_PIXEL_FAST(row_ptr, x), &lnk_data, invert)) { + blob_pixels += 1; + x_histogram[x]++; + y_histogram[y]++; + + point_t p; + point_init(&p, x, y); + fifo_enqueue(&fifo, &p); + } + } + } + break; + } + default: { + break; + } + } + } + + if (blob_pixels) { + long long delta_sum = (fifo_size(&fifo) * (fifo_size(&fifo) - 1)) / 2; + + if (delta_sum) { + // The code below computes the average slope between all pairs of points. + // This is a N^2 operation that can easily blow up if the image is not threshold carefully... + long long *x_delta_histogram = fb_alloc0((2 * ptr->w) * sizeof(long long)); // Not roi so we don't have to adjust, we can burn the RAM. + long long *y_delta_histogram = fb_alloc0((2 * ptr->h) * sizeof(long long)); // Not roi so we don't have to adjust, we can burn the RAM. + + while (fifo_is_not_empty(&fifo)) { + point_t p0; + fifo_dequeue(&fifo, &p0); + + for (size_t i = 0, j = fifo_size(&fifo); i < j; i++) { + point_t p1; + fifo_dequeue(&fifo, &p1); + + x_delta_histogram[p0.x - p1.x + ptr->w]++; // Note we allocated 1 extra above so we can do ptr->w instead of (ptr->w-1). + y_delta_histogram[p0.y - p1.y + ptr->h]++; // Note we allocated 1 extra above so we can do ptr->h instead of (ptr->h-1). + + fifo_enqueue(&fifo, &p1); + } + } + + int mx = get_median(x_histogram, blob_pixels, ptr->w); // Output doesn't need adjustment. + int my = get_median(y_histogram, blob_pixels, ptr->h); // Output doesn't need adjustment. + int mdx = get_median_l(x_delta_histogram, delta_sum, 2 * ptr->w) - ptr->w; // Fix offset. + int mdy = get_median_l(y_delta_histogram, delta_sum, 2 * ptr->h) - ptr->h; // Fix offset. + + float rotation = (mdx ? fast_atan2f(mdy, mdx) : 1.570796f) + 1.570796f; // PI/2 + + out->theta = fast_roundf(rotation * 57.295780) % 180; // * (180 / PI) + if (out->theta < 0) out->theta += 180; + out->rho = fast_roundf(((mx - roi->x) * cos_table[out->theta]) + ((my - roi->y) * sin_table[out->theta])); + + out->magnitude = fast_roundf(fast_sqrtf((mdx * mdx) + (mdy * mdy))); + + if ((45 <= out->theta) && (out->theta < 135)) { + // y = (r - x cos(t)) / sin(t) + out->line.x1 = 0; + out->line.y1 = fast_roundf((out->rho - (out->line.x1 * cos_table[out->theta])) / sin_table[out->theta]); + out->line.x2 = roi->w - 1; + out->line.y2 = fast_roundf((out->rho - (out->line.x2 * cos_table[out->theta])) / sin_table[out->theta]); + } else { + // x = (r - y sin(t)) / cos(t); + out->line.y1 = 0; + out->line.x1 = fast_roundf((out->rho - (out->line.y1 * sin_table[out->theta])) / cos_table[out->theta]); + out->line.y2 = roi->h - 1; + out->line.x2 = fast_roundf((out->rho - (out->line.y2 * sin_table[out->theta])) / cos_table[out->theta]); + } + + if(lb_clip_line(&out->line, 0, 0, roi->w, roi->h)) { + out->line.x1 += roi->x; + out->line.y1 += roi->y; + out->line.x2 += roi->x; + out->line.y2 += roi->y; + result = true; + } else { + memset(out, 0, sizeof(find_lines_list_lnk_data_t)); + } + + fb_free(); // y_delta_histogram + fb_free(); // x_delta_histogram + } + } + + fb_free(); // y_histogram + fb_free(); // x_histogram + fifo_free(&fifo); + } + + return result; +} diff --git a/src/omv/py/py_image.c b/src/omv/py/py_image.c index e64fcea73..5f87427a4 100644 --- a/src/omv/py/py_image.c +++ b/src/omv/py/py_image.c @@ -1964,6 +1964,175 @@ static mp_obj_t py_image_get_statistics(uint n_args, const mp_obj_t *args, mp_ma return o; } +// Line Object // +#define py_line_obj_size 8 +typedef struct py_line_obj { + mp_obj_base_t base; + mp_obj_t x1, y1, x2, y2, length, magnitude, theta, rho; +} py_line_obj_t; + +static void py_line_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) +{ + py_line_obj_t *self = self_in; + mp_printf(print, + "{x1:%d, y1:%d, x2:%d, y2:%d, length:%d, magnitude:%d, theta:%d, rho:%d}", + mp_obj_get_int(self->x1), + mp_obj_get_int(self->y1), + mp_obj_get_int(self->x2), + mp_obj_get_int(self->y2), + mp_obj_get_int(self->length), + mp_obj_get_int(self->magnitude), + mp_obj_get_int(self->theta), + mp_obj_get_int(self->rho)); +} + +static mp_obj_t py_line_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) +{ + if (value == MP_OBJ_SENTINEL) { // load + py_line_obj_t *self = self_in; + if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) { + mp_bound_slice_t slice; + if (!mp_seq_get_fast_slice_indexes(py_line_obj_size, index, &slice)) { + mp_not_implemented("only slices with step=1 (aka None) are supported"); + } + mp_obj_tuple_t *result = mp_obj_new_tuple(slice.stop - slice.start, NULL); + mp_seq_copy(result->items, &(self->x1) + slice.start, result->len, mp_obj_t); + return result; + } + switch (mp_get_index(self->base.type, py_line_obj_size, index, false)) { + case 0: return self->x1; + case 1: return self->y1; + case 2: return self->x2; + case 3: return self->y2; + case 4: return self->length; + case 5: return self->magnitude; + case 6: return self->theta; + case 7: return self->rho; + } + } + return MP_OBJ_NULL; // op not supported +} + +mp_obj_t py_line_line(mp_obj_t self_in) +{ + return mp_obj_new_tuple(4, (mp_obj_t []) {((py_line_obj_t *) self_in)->x1, + ((py_line_obj_t *) self_in)->y1, + ((py_line_obj_t *) self_in)->x2, + ((py_line_obj_t *) self_in)->y2}); +} + +mp_obj_t py_line_x1(mp_obj_t self_in) { return ((py_line_obj_t *) self_in)->x1; } +mp_obj_t py_line_y1(mp_obj_t self_in) { return ((py_line_obj_t *) self_in)->y1; } +mp_obj_t py_line_x2(mp_obj_t self_in) { return ((py_line_obj_t *) self_in)->x2; } +mp_obj_t py_line_y2(mp_obj_t self_in) { return ((py_line_obj_t *) self_in)->y2; } +mp_obj_t py_line_length(mp_obj_t self_in) { return ((py_line_obj_t *) self_in)->length; } +mp_obj_t py_line_magnitude(mp_obj_t self_in) { return ((py_line_obj_t *) self_in)->magnitude; } +mp_obj_t py_line_theta(mp_obj_t self_in) { return ((py_line_obj_t *) self_in)->theta; } +mp_obj_t py_line_rho(mp_obj_t self_in) { return ((py_line_obj_t *) self_in)->rho; } + +STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_line_line_obj, py_line_line); +STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_line_x1_obj, py_line_x1); +STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_line_y1_obj, py_line_y1); +STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_line_x2_obj, py_line_x2); +STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_line_y2_obj, py_line_y2); +STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_line_length_obj, py_line_length); +STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_line_magnitude_obj, py_line_magnitude); +STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_line_theta_obj, py_line_theta); +STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_line_rho_obj, py_line_rho); + +STATIC const mp_rom_map_elem_t py_line_locals_dict_table[] = { + { MP_ROM_QSTR(MP_QSTR_line), MP_ROM_PTR(&py_line_line_obj) }, + { MP_ROM_QSTR(MP_QSTR_x1), MP_ROM_PTR(&py_line_x1_obj) }, + { MP_ROM_QSTR(MP_QSTR_y1), MP_ROM_PTR(&py_line_y1_obj) }, + { MP_ROM_QSTR(MP_QSTR_x2), MP_ROM_PTR(&py_line_x2_obj) }, + { MP_ROM_QSTR(MP_QSTR_y2), MP_ROM_PTR(&py_line_y2_obj) }, + { MP_ROM_QSTR(MP_QSTR_length), MP_ROM_PTR(&py_line_length_obj) }, + { MP_ROM_QSTR(MP_QSTR_magnitude), MP_ROM_PTR(&py_line_magnitude_obj) }, + { MP_ROM_QSTR(MP_QSTR_theta), MP_ROM_PTR(&py_line_theta_obj) }, + { MP_ROM_QSTR(MP_QSTR_rho), MP_ROM_PTR(&py_line_rho_obj) }, +}; + +STATIC MP_DEFINE_CONST_DICT(py_line_locals_dict, py_line_locals_dict_table); + +static const mp_obj_type_t py_line_type = { + { &mp_type_type }, + .name = MP_QSTR_line, + .print = py_line_print, + .subscr = py_line_subscr, + .locals_dict = (mp_obj_t) &py_line_locals_dict, +}; + +static mp_obj_t py_image_get_regression(uint n_args, const mp_obj_t *args, mp_map_t *kw_args) +{ + image_t *arg_img = py_image_cobj(args[0]); + PY_ASSERT_FALSE_MSG(IM_IS_JPEG(arg_img), "Operation not supported on JPEG or RAW frames."); + + rectangle_t roi; + py_helper_lookup_rectangle(kw_args, arg_img, &roi); + + mp_uint_t arg_thresholds_len; + mp_obj_t *arg_thresholds; + mp_obj_get_array(args[1], &arg_thresholds_len, &arg_thresholds); + if (!arg_thresholds_len) return mp_const_none; + + list_t thresholds; + list_init(&thresholds, sizeof(color_thresholds_list_lnk_data_t)); + + for(mp_uint_t i = 0; i < arg_thresholds_len; i++) { + mp_uint_t arg_threshold_len; + mp_obj_t *arg_threshold; + mp_obj_get_array(arg_thresholds[i], &arg_threshold_len, &arg_threshold); + if (arg_threshold_len) { + color_thresholds_list_lnk_data_t lnk_data; + lnk_data.LMin = (arg_threshold_len > 0) ? IM_MAX(IM_MIN(mp_obj_get_int(arg_threshold[0]), + IM_MAX(COLOR_L_MAX, COLOR_GRAYSCALE_MAX)), IM_MIN(COLOR_L_MIN, COLOR_GRAYSCALE_MIN)) : IM_MIN(COLOR_L_MIN, COLOR_GRAYSCALE_MIN); + lnk_data.LMax = (arg_threshold_len > 1) ? IM_MAX(IM_MIN(mp_obj_get_int(arg_threshold[1]), + IM_MAX(COLOR_L_MAX, COLOR_GRAYSCALE_MAX)), IM_MIN(COLOR_L_MIN, COLOR_GRAYSCALE_MIN)) : IM_MAX(COLOR_L_MAX, COLOR_GRAYSCALE_MAX); + lnk_data.AMin = (arg_threshold_len > 2) ? IM_MAX(IM_MIN(mp_obj_get_int(arg_threshold[2]), COLOR_A_MAX), COLOR_A_MIN) : COLOR_A_MIN; + lnk_data.AMax = (arg_threshold_len > 3) ? IM_MAX(IM_MIN(mp_obj_get_int(arg_threshold[3]), COLOR_A_MAX), COLOR_A_MIN) : COLOR_A_MAX; + lnk_data.BMin = (arg_threshold_len > 4) ? IM_MAX(IM_MIN(mp_obj_get_int(arg_threshold[4]), COLOR_B_MAX), COLOR_B_MIN) : COLOR_B_MIN; + lnk_data.BMax = (arg_threshold_len > 5) ? IM_MAX(IM_MIN(mp_obj_get_int(arg_threshold[5]), COLOR_B_MAX), COLOR_B_MIN) : COLOR_B_MAX; + color_thresholds_list_lnk_data_t lnk_data_tmp; + memcpy(&lnk_data_tmp, &lnk_data, sizeof(color_thresholds_list_lnk_data_t)); + lnk_data.LMin = IM_MIN(lnk_data_tmp.LMin, lnk_data_tmp.LMax); + lnk_data.LMax = IM_MAX(lnk_data_tmp.LMin, lnk_data_tmp.LMax); + lnk_data.AMin = IM_MIN(lnk_data_tmp.AMin, lnk_data_tmp.AMax); + lnk_data.AMax = IM_MAX(lnk_data_tmp.AMin, lnk_data_tmp.AMax); + lnk_data.BMin = IM_MIN(lnk_data_tmp.BMin, lnk_data_tmp.BMax); + lnk_data.BMax = IM_MAX(lnk_data_tmp.BMin, lnk_data_tmp.BMax); + list_push_back(&thresholds, &lnk_data); + } + } + + unsigned int x_stride = py_helper_lookup_int(kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_x_stride), 2); + PY_ASSERT_TRUE_MSG(x_stride > 0, "x_stride must not be zero."); + unsigned int y_stride = py_helper_lookup_int(kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_y_stride), 1); + PY_ASSERT_TRUE_MSG(y_stride > 0, "y_stride must not be zero."); + bool invert = py_helper_lookup_int(kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_invert), false); + bool robust = py_helper_lookup_int(kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_robust), false); + + find_lines_list_lnk_data_t out; + fb_alloc_mark(); + if (!imlib_get_regression(&out, arg_img, &roi, x_stride, y_stride, &thresholds, invert, robust)) return mp_const_none; + fb_alloc_free_till_mark(); + list_free(&thresholds); + + py_line_obj_t *o = m_new_obj(py_line_obj_t); + o->base.type = &py_line_type; + o->x1 = mp_obj_new_int(out.line.x1); + o->y1 = mp_obj_new_int(out.line.y1); + o->x2 = mp_obj_new_int(out.line.x2); + o->y2 = mp_obj_new_int(out.line.y2); + int x_diff = out.line.x2 - out.line.x1; + int y_diff = out.line.y2 - out.line.y1; + o->length = mp_obj_new_int(fast_roundf(fast_sqrtf((x_diff * x_diff) + (y_diff * y_diff)))); + o->magnitude = mp_obj_new_int(out.magnitude); + o->theta = mp_obj_new_int(out.theta); + o->rho = mp_obj_new_int(out.rho); + + return o; +} + // Blob Object // #define py_blob_obj_size 10 typedef struct py_blob_obj { @@ -2223,104 +2392,6 @@ static mp_obj_t py_image_find_blobs(uint n_args, const mp_obj_t *args, mp_map_t return objects_list; } -// Line Object // -#define py_line_obj_size 8 -typedef struct py_line_obj { - mp_obj_base_t base; - mp_obj_t x1, y1, x2, y2, length, magnitude, theta, rho; -} py_line_obj_t; - -static void py_line_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) -{ - py_line_obj_t *self = self_in; - mp_printf(print, - "{x1:%d, y1:%d, x2:%d, y2:%d, length:%d, magnitude:%d, theta:%d, rho:%d}", - mp_obj_get_int(self->x1), - mp_obj_get_int(self->y1), - mp_obj_get_int(self->x2), - mp_obj_get_int(self->y2), - mp_obj_get_int(self->length), - mp_obj_get_int(self->magnitude), - mp_obj_get_int(self->theta), - mp_obj_get_int(self->rho)); -} - -static mp_obj_t py_line_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) -{ - if (value == MP_OBJ_SENTINEL) { // load - py_line_obj_t *self = self_in; - if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) { - mp_bound_slice_t slice; - if (!mp_seq_get_fast_slice_indexes(py_line_obj_size, index, &slice)) { - mp_not_implemented("only slices with step=1 (aka None) are supported"); - } - mp_obj_tuple_t *result = mp_obj_new_tuple(slice.stop - slice.start, NULL); - mp_seq_copy(result->items, &(self->x1) + slice.start, result->len, mp_obj_t); - return result; - } - switch (mp_get_index(self->base.type, py_line_obj_size, index, false)) { - case 0: return self->x1; - case 1: return self->y1; - case 2: return self->x2; - case 3: return self->y2; - case 4: return self->length; - case 5: return self->magnitude; - case 6: return self->theta; - case 7: return self->rho; - } - } - return MP_OBJ_NULL; // op not supported -} - -mp_obj_t py_line_line(mp_obj_t self_in) -{ - return mp_obj_new_tuple(4, (mp_obj_t []) {((py_line_obj_t *) self_in)->x1, - ((py_line_obj_t *) self_in)->y1, - ((py_line_obj_t *) self_in)->x2, - ((py_line_obj_t *) self_in)->y2}); -} - -mp_obj_t py_line_x1(mp_obj_t self_in) { return ((py_line_obj_t *) self_in)->x1; } -mp_obj_t py_line_y1(mp_obj_t self_in) { return ((py_line_obj_t *) self_in)->y1; } -mp_obj_t py_line_x2(mp_obj_t self_in) { return ((py_line_obj_t *) self_in)->x2; } -mp_obj_t py_line_y2(mp_obj_t self_in) { return ((py_line_obj_t *) self_in)->y2; } -mp_obj_t py_line_length(mp_obj_t self_in) { return ((py_line_obj_t *) self_in)->length; } -mp_obj_t py_line_magnitude(mp_obj_t self_in) { return ((py_line_obj_t *) self_in)->magnitude; } -mp_obj_t py_line_theta(mp_obj_t self_in) { return ((py_line_obj_t *) self_in)->theta; } -mp_obj_t py_line_rho(mp_obj_t self_in) { return ((py_line_obj_t *) self_in)->rho; } - -STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_line_line_obj, py_line_line); -STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_line_x1_obj, py_line_x1); -STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_line_y1_obj, py_line_y1); -STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_line_x2_obj, py_line_x2); -STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_line_y2_obj, py_line_y2); -STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_line_length_obj, py_line_length); -STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_line_magnitude_obj, py_line_magnitude); -STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_line_theta_obj, py_line_theta); -STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_line_rho_obj, py_line_rho); - -STATIC const mp_rom_map_elem_t py_line_locals_dict_table[] = { - { MP_ROM_QSTR(MP_QSTR_line), MP_ROM_PTR(&py_line_line_obj) }, - { MP_ROM_QSTR(MP_QSTR_x1), MP_ROM_PTR(&py_line_x1_obj) }, - { MP_ROM_QSTR(MP_QSTR_y1), MP_ROM_PTR(&py_line_y1_obj) }, - { MP_ROM_QSTR(MP_QSTR_x2), MP_ROM_PTR(&py_line_x2_obj) }, - { MP_ROM_QSTR(MP_QSTR_y2), MP_ROM_PTR(&py_line_y2_obj) }, - { MP_ROM_QSTR(MP_QSTR_length), MP_ROM_PTR(&py_line_length_obj) }, - { MP_ROM_QSTR(MP_QSTR_magnitude), MP_ROM_PTR(&py_line_magnitude_obj) }, - { MP_ROM_QSTR(MP_QSTR_theta), MP_ROM_PTR(&py_line_theta_obj) }, - { MP_ROM_QSTR(MP_QSTR_rho), MP_ROM_PTR(&py_line_rho_obj) }, -}; - -STATIC MP_DEFINE_CONST_DICT(py_line_locals_dict, py_line_locals_dict_table); - -static const mp_obj_type_t py_line_type = { - { &mp_type_type }, - .name = MP_QSTR_line, - .print = py_line_print, - .subscr = py_line_subscr, - .locals_dict = (mp_obj_t) &py_line_locals_dict, -}; - static mp_obj_t py_image_find_lines(uint n_args, const mp_obj_t *args, mp_map_t *kw_args) { image_t *arg_img = py_image_cobj(args[0]); @@ -3485,6 +3556,7 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_image_mask_ellipse_obj, py_image_mask_ellips /* Image Statistics */ STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_image_get_histogram_obj, 1, py_image_get_histogram); STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_image_get_statistics_obj, 1, py_image_get_statistics); +STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_image_get_regression_obj, 2, py_image_get_regression); /* Color Tracking */ STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_image_find_blobs_obj, 2, py_image_find_blobs); /* Shape Detection */ @@ -3579,6 +3651,7 @@ static const mp_map_elem_t locals_dict_table[] = { {MP_OBJ_NEW_QSTR(MP_QSTR_get_stats), (mp_obj_t)&py_image_get_statistics_obj}, {MP_OBJ_NEW_QSTR(MP_QSTR_get_statistics), (mp_obj_t)&py_image_get_statistics_obj}, {MP_OBJ_NEW_QSTR(MP_QSTR_statistics), (mp_obj_t)&py_image_get_statistics_obj}, + {MP_OBJ_NEW_QSTR(MP_QSTR_get_regression), (mp_obj_t)&py_image_get_regression_obj}, /* Color Tracking */ {MP_OBJ_NEW_QSTR(MP_QSTR_find_blobs), (mp_obj_t)&py_image_find_blobs_obj}, /* Shape Detection */ diff --git a/src/omv/py/qstrdefsomv.h b/src/omv/py/qstrdefsomv.h index f4fd038ec..6cdfd579e 100644 --- a/src/omv/py/qstrdefsomv.h +++ b/src/omv/py/qstrdefsomv.h @@ -407,10 +407,29 @@ Q(b_max) Q(b_lq) Q(b_uq) -// Find Blobs -Q(find_blobs) +// Get Regression +Q(get_regression) +// duplicate Q(roi) Q(x_stride) Q(y_stride) +// duplicate Q(invert) +Q(robust) +// Line Object +Q(line) +// duplicate Q(line) +Q(x1) +Q(y1) +Q(x2) +Q(y2) +Q(length) +Q(magnitude) +Q(theta) +Q(rho) + +// Find Blobs +Q(find_blobs) +// duplicate Q(x_stride) +// duplicate Q(y_stride) Q(area_threshold) Q(pixels_threshold) Q(merge) @@ -442,17 +461,6 @@ Q(find_lines) // duplicate Q(threshold) Q(theta_margin) Q(rho_margin) -// Line Object -Q(line) -// duplicate Q(line) -Q(x1) -Q(y1) -Q(x2) -Q(y2) -Q(length) -Q(magnitude) -Q(theta) -Q(rho) // Find Line Segments Q(find_line_segments) diff --git a/usr/examples/09-Feature-Detection/linear_regression_fast.py b/usr/examples/09-Feature-Detection/linear_regression_fast.py new file mode 100644 index 000000000..f200e4ace --- /dev/null +++ b/usr/examples/09-Feature-Detection/linear_regression_fast.py @@ -0,0 +1,43 @@ +# Fast Linear Regression Example +# +# This example shows off how to use the get_regression() method on your OpenMV Cam +# to get the linear regression of a ROI. Using this method you can easily build +# a robot which can track lines which all point in the same general direction +# but are not actually connected. Use find_blobs() on lines that are nicely +# connected for better filtering options and control. +# +# This is called the fast linear regression because we use the least-squares +# method to fit the line. However, this method is NOT GOOD FOR ANY images that +# have a lot (or really any) outlier points which corrupt the line fit... + +THRESHOLD = (0, 100) # Grayscale threshold for dark things... +BINARY_VISIBLE = True # Does binary first so you can see what the linear regression + # is being run on... might lower FPS though. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.QQVGA) +sensor.skip_frames(time = 2000) +clock = time.clock() + +while(True): + clock.tick() + img = sensor.snapshot().binary([THRESHOLD]) if BINARY_VISIBLE else sensor.snapshot() + + # Returns a line object similar to line objects returned by find_lines() and + # find_line_segments(). You have x1(), y1(), x2(), y2(), length(), + # theta() (rotation in degrees), rho(), and magnitude(). + # + # magnitude() represents how well the linear regression worked. It goes from + # (0, INF] where 0 is returned for a circle. The more linear the + # scene is the higher the magnitude. + line = img.get_regression([(255,255) if BINARY_VISIBLE else THRESHOLD]) + + if (line): img.draw_line(line.line(), color = 127) + print("FPS %f, mag = %s" % (clock.fps(), str(line.magnitude()) if (line) else "N/A")) + +# About negative rho values: +# +# A [theta+0:-rho] tuple is the same as [theta+180:+rho]. diff --git a/usr/examples/09-Feature-Detection/linear_regression_robust.py b/usr/examples/09-Feature-Detection/linear_regression_robust.py new file mode 100644 index 000000000..9f24c618d --- /dev/null +++ b/usr/examples/09-Feature-Detection/linear_regression_robust.py @@ -0,0 +1,45 @@ +# Robust Linear Regression Example +# +# This example shows off how to use the get_regression() method on your OpenMV Cam +# to get the linear regression of a ROI. Using this method you can easily build +# a robot which can track lines which all point in the same general direction +# but are not actually connected. Use find_blobs() on lines that are nicely +# connected for better filtering options and control. +# +# We're using the robust=True argument for get_regression() in this script which +# computes the linear regression using a much more robust algorithm... but potentially +# much slower. The robust algorithm runs in O(N^2) time on the image. So, YOU NEED +# TO LIMIT THE NUMBER OF PIXELS the robust algorithm works on or it can actually +# take seconds for the algorithm to give you a result... THRESHOLD VERY CAREFULLY! + +THRESHOLD = (0, 100) # Grayscale threshold for dark things... +BINARY_VISIBLE = True # Does binary first so you can see what the linear regression + # is being run on... might lower FPS though. + +import sensor, image, time + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.QQQVGA) # 80x60 (4,800 pixels) - O(N^2) max = 2,3040,000. +sensor.skip_frames(time = 2000) # WARNING: If you use QQVGA it may take seconds +clock = time.clock() # to process a frame sometimes. + +while(True): + clock.tick() + img = sensor.snapshot().binary([THRESHOLD]) if BINARY_VISIBLE else sensor.snapshot() + + # Returns a line object similar to line objects returned by find_lines() and + # find_line_segments(). You have x1(), y1(), x2(), y2(), length(), + # theta() (rotation in degrees), rho(), and magnitude(). + # + # magnitude() represents how well the linear regression worked. It means something + # different for the robust linear regression. In general, the larger the value the + # better... + line = img.get_regression([(255,255) if BINARY_VISIBLE else THRESHOLD], robust = True) + + if (line): img.draw_line(line.line(), color = 127) + print("FPS %f, mag = %s" % (clock.fps(), str(line.magnitude()) if (line) else "N/A")) + +# About negative rho values: +# +# A [theta+0:-rho] tuple is the same as [theta+180:+rho].