diff --git a/src/omv/boards/OPENMV3/omv_boardconfig.h b/src/omv/boards/OPENMV3/omv_boardconfig.h index 9cb5a47e5..1feb6292d 100644 --- a/src/omv/boards/OPENMV3/omv_boardconfig.h +++ b/src/omv/boards/OPENMV3/omv_boardconfig.h @@ -56,6 +56,11 @@ // Enable Barcodes (42 KB). #define OMV_ENABLE_BARCODES +// Enable Phase Correlation +#ifdef OMV_ENABLE_ROTATION_CORR +#define OMV_ENABLE_PHASE_CORRELATION +#endif + // Enable LENET (200+ KB). #define OMV_ENABLE_LENET diff --git a/src/omv/img/apriltag.c b/src/omv/img/apriltag.c index a9fe06fe5..58663e685 100644 --- a/src/omv/img/apriltag.c +++ b/src/omv/img/apriltag.c @@ -12196,7 +12196,7 @@ void imlib_rotation_corr(image_t *img, float x_rotation, float y_rotation, float matd_t *A1 = matd_create(4, 3); MATD_EL(A1, 0, 0) = 1; MATD_EL(A1, 0, 1) = 0; MATD_EL(A1, 0, 2) = -img->w / 2.0; MATD_EL(A1, 1, 0) = 0; MATD_EL(A1, 1, 1) = 1; MATD_EL(A1, 1, 2) = -img->h / 2.0; - MATD_EL(A1, 2, 0) = 0; MATD_EL(A1, 2, 1) = 0; MATD_EL(A1, 2, 2) = 1; + MATD_EL(A1, 2, 0) = 0; MATD_EL(A1, 2, 1) = 0; MATD_EL(A1, 2, 2) = 0; MATD_EL(A1, 3, 0) = 0; MATD_EL(A1, 3, 1) = 0; MATD_EL(A1, 3, 2) = 1; // needed for h translation matd_t *RX = matd_create(4, 4); @@ -12232,7 +12232,8 @@ void imlib_rotation_corr(image_t *img, float x_rotation, float y_rotation, float matd_t *T1 = matd_op("M*M", R, A1); matd_t *T2 = matd_op("M*M", T, T1); - matd_t *T3 = matd_op("(M*M)^-1", A2, T2); + matd_t *T3 = matd_op("M*M", A2, T2); + matd_t *T4 = matd_inverse(T3); switch(img->bpp) { case IMAGE_BPP_BINARY: { @@ -12241,10 +12242,10 @@ void imlib_rotation_corr(image_t *img, float x_rotation, float y_rotation, float memcpy(tmp, img->data, ((img->w + UINT32_T_MASK) >> UINT32_T_SHIFT) * img->h); memset(img->data, 0, ((img->w + UINT32_T_MASK) >> UINT32_T_SHIFT) * img->h); - for (int y = 0, yy = img->h; y < yy; y++) { + if (T4) for (int y = 0, yy = img->h; y < yy; y++) { uint32_t *row_ptr = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(img, y); for (int x = 0, xx = img->w; x < xx; x++) { - float sourceX, sourceY; homography_project(T3, x, y, &sourceX, &sourceY); + float sourceX, sourceY; homography_project(T4, x, y, &sourceX, &sourceY); int sourceX2 = round(sourceX); int sourceY2 = round(sourceY); @@ -12265,10 +12266,10 @@ void imlib_rotation_corr(image_t *img, float x_rotation, float y_rotation, float memcpy(tmp, img->data, img->w * img->h * sizeof(uint8_t)); memset(img->data, 0, img->w * img->h * sizeof(uint8_t)); - for (int y = 0, yy = img->h; y < yy; y++) { + if (T4) for (int y = 0, yy = img->h; y < yy; y++) { uint8_t *row_ptr = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(img, y); for (int x = 0, xx = img->w; x < xx; x++) { - float sourceX, sourceY; homography_project(T3, x, y, &sourceX, &sourceY); + float sourceX, sourceY; homography_project(T4, x, y, &sourceX, &sourceY); int sourceX2 = round(sourceX); int sourceY2 = round(sourceY); @@ -12289,10 +12290,10 @@ void imlib_rotation_corr(image_t *img, float x_rotation, float y_rotation, float memcpy(tmp, img->data, img->w * img->h * sizeof(uint16_t)); memset(img->data, 0, img->w * img->h * sizeof(uint16_t)); - for (int y = 0, yy = img->h; y < yy; y++) { + if (T4) for (int y = 0, yy = img->h; y < yy; y++) { uint16_t *row_ptr = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(img, y); for (int x = 0, xx = img->w; x < xx; x++) { - float sourceX, sourceY; homography_project(T3, x, y, &sourceX, &sourceY); + float sourceX, sourceY; homography_project(T4, x, y, &sourceX, &sourceY); int sourceX2 = round(sourceX); int sourceY2 = round(sourceY); @@ -12312,6 +12313,7 @@ void imlib_rotation_corr(image_t *img, float x_rotation, float y_rotation, float } } + if (T4) matd_destroy(T4); matd_destroy(T3); matd_destroy(T2); matd_destroy(T1); diff --git a/src/omv/img/fft.c b/src/omv/img/fft.c index 8f59fae41..869697fc6 100644 --- a/src/omv/img/fft.c +++ b/src/omv/img/fft.c @@ -332,6 +332,18 @@ static void prepare_real_input(uint8_t *in, int in_len, float *out, int N_pow2) } } +static void prepare_real_input_again(float *in, int in_len, float *out, int N_pow2) +{ + for (int k = 0, l = 2 << N_pow2; k < l; k += 2) { + int m = bit_reverse(k, N_pow2); + out[m+0] = ((k+0) < in_len) ? in[(k*2)+0] : 0; + out[m+1] = ((k+1) < in_len) ? in[(k*2)+2] : 0; +// // Apply Hann Window (this is working on real numbers) +// out[m+0] *= get_hann(k+0, N_pow2); +// out[m+1] *= get_hann(k+1, N_pow2); + } +} + //// This works on complex numbers... //static void apply_hann_window(float *inout, int N_pow2, int stride) //{ @@ -466,6 +478,71 @@ void ifft1d_run(fft1d_controller_t *controller) fb_free(); } +void fft1d_mag(fft1d_controller_t *controller) +{ + for (int i = 0, j = 2 << controller->pow2; i < j; i += 2) { + float tmp_r = controller->data[i + 0]; + float tmp_i = controller->data[i + 1]; + controller->data[i + 0] = fast_sqrtf((tmp_r*tmp_r)+(tmp_i*tmp_i)); + controller->data[i + 1] = 0; + } +} + +void fft1d_phase(fft1d_controller_t *controller) +{ + for (int i = 0, j = 2 << controller->pow2; i < j; i += 2) { + float tmp_r = controller->data[i + 0]; + float tmp_i = controller->data[i + 1]; + controller->data[i + 0] = tmp_r ? fast_atan2f(tmp_i, tmp_r) : ((tmp_i < 0) ? (M_PI*1.5) : (M_PI*0.5)); + controller->data[i + 1] = 0; + } +} + +void fft1d_log(fft1d_controller_t *controller) +{ + for (int i = 0, j = 2 << controller->pow2; i < j; i += 2) { + float tmp_r = controller->data[i + 0]; + float tmp_i = controller->data[i + 1]; + controller->data[i + 0] = fast_log(fast_sqrtf((tmp_r*tmp_r)+(tmp_i*tmp_i))); + controller->data[i + 1] = tmp_r ? fast_atan2f(tmp_i, tmp_r) : ((tmp_i < 0) ? (M_PI*1.5) : (M_PI*0.5)); + } +} + +void fft1d_exp(fft1d_controller_t *controller) +{ + for (int i = 0, j = 2 << controller->pow2; i < j; i += 2) { + float tmp_r = controller->data[i + 0]; + float tmp_i = controller->data[i + 1]; + controller->data[i + 0] = fast_expf(tmp_r) * cosf(tmp_i); + controller->data[i + 1] = fast_expf(tmp_r) * sinf(tmp_i); + } +} + +void fft1d_swap(fft1d_controller_t *controller) +{ + for (int i = 0, j = ((1 << controller->pow2) / 2) * 2; i < j; i += 2) { + float tmp_r = controller->data[i + 0]; + float tmp_i = controller->data[i + 1]; + controller->data[i + 0] = controller->data[j + i + 0]; + controller->data[i + 1] = controller->data[j + i + 1]; + controller->data[j + i + 0] = tmp_r; + controller->data[j + i + 1] = tmp_i; + } +} + +void fft1d_run_again(fft1d_controller_t *controller) +{ + // We can speed up the FFT by packing data into both the real and imaginary + // values. This results in having to do an FFT of half the size normally. + + float *h_buffer = fb_alloc((1 << controller->pow2) * sizeof(float)); + prepare_real_input_again(controller->data, 1 << controller->pow2, + h_buffer, controller->pow2 - 1); + do_fft(h_buffer, controller->pow2 - 1, 1); + unpack_fft(h_buffer, controller->data, controller->pow2 - 1); + fb_free(); +} + /////////////////////////////////////////////////////////////////////////////// void fft2d_alloc(fft2d_controller_t *controller, image_t *img, rectangle_t *r) @@ -518,9 +595,8 @@ void fft2d_run(fft2d_controller_t *controller) // The above operates on the rows and this fft operates on the columns. To // avoid having to transpose the array the fft takes a stride input. - for (int i = 0, ii = (2 << controller->w_pow2); i < ii; i += 2) { + for (int i = 0, ii = 2 << controller->w_pow2; i < ii; i += 2) { float *p = controller->data + i; - // Vertical FFTs are full FFTs... // apply_hann_window(p, controller->h_pow2, (1 << controller->w_pow2)); prepare_complex_input(p, p, controller->h_pow2, (1 << controller->w_pow2)); do_fft(p, controller->h_pow2, (1 << controller->w_pow2)); @@ -530,18 +606,169 @@ void fft2d_run(fft2d_controller_t *controller) void ifft2d_run(fft2d_controller_t *controller) { // Do columns... - for (int i = 0, ii = (2 << controller->w_pow2); i < ii; i += 2) { + for (int i = 0, ii = 2 << controller->w_pow2; i < ii; i += 2) { float *p = controller->data + i; - // Vertical FFTs are full FFTs... prepare_complex_input(p, p, controller->h_pow2, (1 << controller->w_pow2)); do_ifft(p, controller->h_pow2, (1 << controller->w_pow2)); } // Do rows... - for (int i = 0; i < controller->r.h; i++) { + for (int i = 0, ii = 1 << controller->h_pow2; i < ii; i++) { fft1d_controller_t fft1d_controller_i; fft1d_controller_i.pow2 = controller->w_pow2; fft1d_controller_i.data = controller->data + (i * (2 << controller->w_pow2)); ifft1d_run(&fft1d_controller_i); } } + +void fft2d_mag(fft2d_controller_t *controller) +{ + for (int i = 0, j = (1 << controller->h_pow2) * (1 << controller->w_pow2) * 2; i < j; i += 2) { + float tmp_r = controller->data[i + 0]; + float tmp_i = controller->data[i + 1]; + controller->data[i + 0] = fast_sqrtf((tmp_r*tmp_r)+(tmp_i*tmp_i)); + controller->data[i + 1] = 0; + } +} + +void fft2d_phase(fft2d_controller_t *controller) +{ + for (int i = 0, j = (1 << controller->h_pow2) * (1 << controller->w_pow2) * 2; i < j; i += 2) { + float tmp_r = controller->data[i + 0]; + float tmp_i = controller->data[i + 1]; + controller->data[i + 0] = tmp_r ? fast_atan2f(tmp_i, tmp_r) : ((tmp_i < 0) ? (M_PI*1.5) : (M_PI*0.5)); + controller->data[i + 1] = 0; + } +} + +void fft2d_log(fft2d_controller_t *controller) +{ + for (int i = 0, j = (1 << controller->h_pow2) * (1 << controller->w_pow2) * 2; i < j; i += 2) { + float tmp_r = controller->data[i + 0]; + float tmp_i = controller->data[i + 1]; + controller->data[i + 0] = fast_log(fast_sqrtf((tmp_r*tmp_r)+(tmp_i*tmp_i))); + controller->data[i + 1] = tmp_r ? fast_atan2f(tmp_i, tmp_r) : ((tmp_i < 0) ? (M_PI*1.5) : (M_PI*0.5)); + } +} + +void fft2d_exp(fft2d_controller_t *controller) +{ + for (int i = 0, j = (1 << controller->h_pow2) * (1 << controller->w_pow2) * 2; i < j; i += 2) { + float tmp_r = controller->data[i + 0]; + float tmp_i = controller->data[i + 1]; + controller->data[i + 0] = fast_expf(tmp_r) * cosf(tmp_i); + controller->data[i + 1] = fast_expf(tmp_r) * sinf(tmp_i); + } +} + +void fft2d_swap(fft2d_controller_t *controller) +{ + // Do rows... + for (int i = 0, ii = 1 << controller->h_pow2; i < ii; i++) { + fft1d_controller_t fft1d_controller_i; + fft1d_controller_i.pow2 = controller->w_pow2; + fft1d_controller_i.data = controller->data + (i * (2 << controller->w_pow2)); + fft1d_swap(&fft1d_controller_i); + } + + // Do columns... + for (int x = 0, xx = 2 << controller->w_pow2; x < xx; x += 2) { + for (int y = 0, yy = (1 << controller->h_pow2) / 2; y < yy; y++) { + int i = (y * (2 << controller->w_pow2)) + x; + int j = yy * (2 << controller->w_pow2); + float tmp_r = controller->data[i + 0]; + float tmp_i = controller->data[i + 1]; + controller->data[i + 0] = controller->data[j + i + 0]; + controller->data[i + 1] = controller->data[j + i + 1]; + controller->data[j + i + 0] = tmp_r; + controller->data[j + i + 1] = tmp_i; + } + } +} + +void fft2d_linpolar(fft2d_controller_t *controller) +{ + int w = 1 << controller->w_pow2; + int h = 1 << controller->h_pow2; + int s = h * w * 2 * sizeof(float); + float *tmp = fb_alloc(s); + memcpy(tmp, controller->data, s); + memset(controller->data, 0, s); + + float w_2 = w / 2.0f; + float h_2 = h / 2.0f; + float rho_scale = fast_sqrtf((w_2 * w_2) + (h_2 * h_2)) / h; + float theta_scale = 360.0f / w; + + for (int y = 0; y < h; y++) { + float *row_ptr = controller->data + (y * w * 2); + float rho = y * rho_scale; + for (int x = 0; x < w; x++) { + int sourceX, sourceY; + int theta = 630 - fast_roundf(x * theta_scale); + if (theta >= 360) theta -= 360; + sourceX = fast_roundf((rho * cos_table[theta]) + w_2); + sourceY = fast_roundf((rho * sin_table[theta]) + h_2); + if ((0 <= sourceX) && (sourceX < w) && (0 <= sourceY) && (sourceY < h)) { + float *ptr = tmp + (sourceY * w * 2); + row_ptr[(x * 2) + 0] = ptr[(sourceX * 2) + 0]; + row_ptr[(x * 2) + 1] = ptr[(sourceX * 2) + 1]; + } + } + } + + fb_free(); +} + +void fft2d_logpolar(fft2d_controller_t *controller) +{ + int w = 1 << controller->w_pow2; + int h = 1 << controller->h_pow2; + int s = h * w * 2 * sizeof(float); + float *tmp = fb_alloc(s); + memcpy(tmp, controller->data, s); + memset(controller->data, 0, s); + + float w_2 = w / 2.0f; + float h_2 = h / 2.0f; + float rho_scale = fast_log(fast_sqrtf((w_2 * w_2) + (h_2 * h_2))) / h; + float theta_scale = 360.0f / w; + + for (int y = 0; y < h; y++) { + float *row_ptr = controller->data + (y * w * 2); + float rho = y * rho_scale; + for (int x = 0; x < w; x++) { + int sourceX, sourceY; + int theta = 630 - fast_roundf(x * theta_scale); + if (theta >= 360) theta -= 360; + sourceX = fast_roundf((fast_expf(rho) * cos_table[theta]) + w_2); + sourceY = fast_roundf((fast_expf(rho) * sin_table[theta]) + h_2); + if ((0 <= sourceX) && (sourceX < w) && (0 <= sourceY) && (sourceY < h)) { + float *ptr = tmp + (sourceY * w * 2); + row_ptr[(x * 2) + 0] = ptr[(sourceX * 2) + 0]; + row_ptr[(x * 2) + 1] = ptr[(sourceX * 2) + 1]; + } + } + } + + fb_free(); +} + +void fft2d_run_again(fft2d_controller_t *controller) +{ + for (int i = 0, ii = 1 << controller->h_pow2; i < ii; i++) { + fft1d_controller_t fft1d_controller_i; + fft1d_controller_i.pow2 = controller->w_pow2; + fft1d_controller_i.data = controller->data + (i * (2 << controller->w_pow2)); + fft1d_run_again(&fft1d_controller_i); + } + + // The above operates on the rows and this fft operates on the columns. To + // avoid having to transpose the array the fft takes a stride input. + for (int i = 0, ii = 2 << controller->w_pow2; i < ii; i += 2) { + float *p = controller->data + i; +// apply_hann_window(p, controller->h_pow2, (1 << controller->w_pow2)); + prepare_complex_input(p, p, controller->h_pow2, (1 << controller->w_pow2)); + do_fft(p, controller->h_pow2, (1 << controller->w_pow2)); + } +} diff --git a/src/omv/img/fft.h b/src/omv/img/fft.h index 7eee92f89..e67210d4c 100644 --- a/src/omv/img/fft.h +++ b/src/omv/img/fft.h @@ -20,6 +20,12 @@ void fft1d_alloc(fft1d_controller_t *controller, uint8_t *buf, int len); void fft1d_dealloc(); void fft1d_run(fft1d_controller_t *controller); void ifft1d_run(fft1d_controller_t *controller); +void fft1d_mag(fft1d_controller_t *controller); +void fft1d_phase(fft1d_controller_t *controller); +void fft1d_log(fft1d_controller_t *controller); +void fft1d_exp(fft1d_controller_t *controller); +void fft1d_swap(fft1d_controller_t *controller); // a.k.a MATLAB fftshift +void fft1d_run_again(fft1d_controller_t *controller); // Do FFT again on real mag/phase of the FFT. typedef struct fft2d_controller { image_t *img; rectangle_t r; @@ -30,4 +36,13 @@ void fft2d_alloc(fft2d_controller_t *controller, image_t *img, rectangle_t *r); void fft2d_dealloc(); void fft2d_run(fft2d_controller_t *controller); void ifft2d_run(fft2d_controller_t *controller); +void fft2d_mag(fft2d_controller_t *controller); +void fft2d_phase(fft2d_controller_t *controller); +void fft2d_log(fft2d_controller_t *controller); +void fft2d_exp(fft2d_controller_t *controller); +void fft2d_swap(fft2d_controller_t *controller); // a.k.a MATLAB fftshift +void fft2d_linpolar(fft2d_controller_t *controller); +void fft2d_logpolar(fft2d_controller_t *controller); +void fft2d_run_again(fft2d_controller_t *controller); // Do FFT again on real mag/phase of the FFT. +// END #endif /* __FFT_H__ */ diff --git a/src/omv/img/imlib.h b/src/omv/img/imlib.h index 310c1831c..bfb982e17 100644 --- a/src/omv/img/imlib.h +++ b/src/omv/img/imlib.h @@ -1249,7 +1249,8 @@ void imlib_find_apriltags(list_t *out, image_t *ptr, rectangle_t *roi, apriltag_ void imlib_find_datamatrices(list_t *out, image_t *ptr, rectangle_t *roi, int effort); void imlib_find_barcodes(list_t *out, image_t *ptr, rectangle_t *roi); // Template Matching -void imlib_phasecorrelate(image_t *img0, image_t *img1, rectangle_t *roi0, rectangle_t *roi1, bool logpolar, float *x_offset, float *y_offset, float *response); +void imlib_phasecorrelate(image_t *img0, image_t *img1, rectangle_t *roi0, rectangle_t *roi1, bool logpolar, bool fix_rotation_scale, + float *x_translation, float *y_translation, float *rotation, float *scale, float *response); // LeNet (CNN for character recognition) #define LENGTH_KERNEL 5 @@ -1274,26 +1275,26 @@ void imlib_phasecorrelate(image_t *img0, image_t *img1, rectangle_t *roi0, recta #define LENET_MODEL_SIZE (51902) typedef struct lenet5 { - float weight0_1[INPUT][LAYER1][LENGTH_KERNEL][LENGTH_KERNEL]; - float weight2_3[LAYER2][LAYER3][LENGTH_KERNEL][LENGTH_KERNEL]; - float weight4_5[LAYER4][LAYER5][LENGTH_KERNEL][LENGTH_KERNEL]; - float weight5_6[LAYER5 * LENGTH_FEATURE5 * LENGTH_FEATURE5][LENET_OUTPUT_SIZE]; + float weight0_1[INPUT][LAYER1][LENGTH_KERNEL][LENGTH_KERNEL]; + float weight2_3[LAYER2][LAYER3][LENGTH_KERNEL][LENGTH_KERNEL]; + float weight4_5[LAYER4][LAYER5][LENGTH_KERNEL][LENGTH_KERNEL]; + float weight5_6[LAYER5 * LENGTH_FEATURE5 * LENGTH_FEATURE5][LENET_OUTPUT_SIZE]; - float bias0_1[LAYER1]; - float bias2_3[LAYER3]; - float bias4_5[LAYER5]; - float bias5_6[LENET_OUTPUT_SIZE]; + float bias0_1[LAYER1]; + float bias2_3[LAYER3]; + float bias4_5[LAYER5]; + float bias5_6[LENET_OUTPUT_SIZE]; } lenet5_t; typedef struct lenet5_feature { - float input[INPUT][LENGTH_FEATURE0][LENGTH_FEATURE0]; - float layer1[LAYER1][LENGTH_FEATURE1][LENGTH_FEATURE1]; - float layer2[LAYER2][LENGTH_FEATURE2][LENGTH_FEATURE2]; - float layer3[LAYER3][LENGTH_FEATURE3][LENGTH_FEATURE3]; - float layer4[LAYER4][LENGTH_FEATURE4][LENGTH_FEATURE4]; - float layer5[LAYER5][LENGTH_FEATURE5][LENGTH_FEATURE5]; - float output[LENET_OUTPUT_SIZE]; + float input[INPUT][LENGTH_FEATURE0][LENGTH_FEATURE0]; + float layer1[LAYER1][LENGTH_FEATURE1][LENGTH_FEATURE1]; + float layer2[LAYER2][LENGTH_FEATURE2][LENGTH_FEATURE2]; + float layer3[LAYER3][LENGTH_FEATURE3][LENGTH_FEATURE3]; + float layer4[LAYER4][LENGTH_FEATURE4][LENGTH_FEATURE4]; + float layer5[LAYER5][LENGTH_FEATURE5][LENGTH_FEATURE5]; + float output[LENET_OUTPUT_SIZE]; } lenet5_feature_t; diff --git a/src/omv/img/phasecorrelation.c b/src/omv/img/phasecorrelation.c index 406ceadb7..dd812bff0 100644 --- a/src/omv/img/phasecorrelation.c +++ b/src/omv/img/phasecorrelation.c @@ -138,148 +138,346 @@ void imlib_logpolar(image_t *img, bool linear, bool reverse) } // Note that both ROI widths and heights must be equal. -void imlib_phasecorrelate(image_t *img0, image_t *img1, rectangle_t *roi0, rectangle_t *roi1, bool logpolar, float *x_offset, float *y_offset, float *response) +void imlib_phasecorrelate(image_t *img0, image_t *img1, rectangle_t *roi0, rectangle_t *roi1, bool logpolar, bool fix_rotation_scale, + float *x_translation, float *y_translation, float *rotation, float *scale, float *response) { - image_t img0alt, img1alt; - rectangle_t roi0alt, roi1alt; + // Step 1 - Get Rotation/Scale Differences + if ((!logpolar) && fix_rotation_scale) { + fft2d_controller_t fft0, fft1; - if (logpolar) { + fft2d_alloc(&fft0, img0, roi0); + fft2d_alloc(&fft1, img1, roi1); - img0alt.w = roi0->w; - img0alt.h = roi0->h; - img0alt.bpp = img0->bpp; - img0alt.data = fb_alloc0(image_size(&img0alt)); - imlib_logpolar_int(&img0alt, img0, roi0, false, false); - roi0alt.x = 0; - roi0alt.y = 0; - roi0alt.w = roi0->w; - roi0alt.h = roi0->h; + fft2d_run(&fft0); + fft2d_run(&fft1); - img1alt.w = roi1->w; - img1alt.h = roi1->h; - img1alt.bpp = img1->bpp; - img1alt.data = fb_alloc0(image_size(&img1alt)); - imlib_logpolar_int(&img1alt, img1, roi1, false, false); - roi1alt.x = 0; - roi1alt.y = 0; - roi1alt.w = roi1->w; - roi1alt.h = roi1->h; - } + fft2d_mag(&fft0); + fft2d_mag(&fft1); - fft2d_controller_t fft0, fft1; + fft2d_swap(&fft0); + fft2d_swap(&fft1); - fft2d_alloc(&fft0, logpolar ? &img0alt : img0, logpolar ? &roi0alt : roi0); - fft2d_alloc(&fft1, logpolar ? &img1alt : img1, logpolar ? &roi1alt : roi1); + fft2d_logpolar(&fft0); + fft2d_logpolar(&fft1); - fft2d_run(&fft0); - fft2d_run(&fft1); + fft2d_run_again(&fft0); + fft2d_run_again(&fft1); - int w = (1 << fft0.w_pow2); - int h = (1 << fft0.h_pow2); - for (int i = 0, j = w * h * 2; i < j; i += 2) { - float ga_r = fft0.data[i+0]; - float ga_i = fft0.data[i+1]; - float gb_r = fft1.data[i+0]; - float gb_i = -fft1.data[i+1]; // complex conjugate... - float hp_r = (ga_r * gb_r) - (ga_i * gb_i); // hadamard product - float hp_i = (ga_r * gb_i) + (ga_i * gb_r); // hadamard product - float mag = 1 / fast_sqrtf((hp_r*hp_r)+(hp_i*hp_i)); // magnitude - // Replace first fft with phase correlation... - fft0.data[i+0] = hp_r * mag; - fft0.data[i+1] = hp_i * mag; - } + int w = (1 << fft0.w_pow2); + int h = (1 << fft0.h_pow2); - ifft2d_run(&fft0); + for (int i = 0, j = h * w * 2; i < j; i += 2) { + float ga_r = fft0.data[i+0]; + float ga_i = fft0.data[i+1]; + float gb_r = fft1.data[i+0]; + float gb_i = -fft1.data[i+1]; // complex conjugate... + float hp_r = (ga_r * gb_r) - (ga_i * gb_i); // hadamard product + float hp_i = (ga_r * gb_i) + (ga_i * gb_r); // hadamard product + float mag = 1 / fast_sqrtf((hp_r*hp_r)+(hp_i*hp_i)); // magnitude + // Replace first fft with phase correlation... + fft0.data[i+0] = hp_r * mag; + fft0.data[i+1] = hp_i * mag; + } - float sum = 0; - float max = 0; - int off_x = 0; - int off_y = 0; - for (int i = 0; i < roi0->h; i++) { - for (int j = 0; j < roi0->w; j++) { - // Note that the output of the FFT is packed with real data in both - // the real and imaginary parts... - float f_r = fft0.data[(i * w * 2) + j]; - sum += f_r; - if (f_r > max) { - max = f_r; - off_x = j; - off_y = i; + ifft2d_run(&fft0); + + float sum = 0; + float max = 0; + int off_x = 0; + int off_y = 0; + + for (int i = 0; i < h; i++) { + for (int j = 0; j < w; j++) { + // Note that the output of the FFT is packed with real data in both + // the real and imaginary parts... (right side of the array is zero). + float f_r = fft0.data[(i * w * 2) + j]; + sum += f_r; + if (f_r > max) { + max = f_r; + off_x = j; + off_y = i; + } } } - } - *response = max / sum; // normalize this to [0:1]. + float tmp_response = max / sum; // normalize this to [0:1]. - float f_sum = 0; - float f_off_x = 0; - float f_off_y = 0; - for (int i = -2; i < 2; i++) { - for (int j = -2; j < 2; j++) { + float f_sum = 0; + float f_off_x = 0; + float f_off_y = 0; - // Wrap around - int new_x = off_x + j; - if (new_x < 0) new_x += roi0->w; - if (new_x >= roi0->w) new_x -= roi0->w; + for (int i = -2; i < 2; i++) { + for (int j = -2; j < 2; j++) { - // Wrap around - int new_y = off_y + i; - if (new_y < 0) new_y += roi0->h; - if (new_y >= roi0->h) new_y -= roi0->h; + // Wrap around + int new_x = off_x + j; + if (new_x < 0) new_x += w; + if (new_x >= w) new_x -= w; - // Compute centroid. - float f_r = fft0.data[(new_y * w * 2) + new_x]; - f_off_x += (off_x + j) * f_r; // don't use new_x here - f_off_y += (off_y + i) * f_r; // don't use new_y here - f_sum += f_r; + // Wrap around + int new_y = off_y + i; + if (new_y < 0) new_y += h; + if (new_y >= h) new_y -= h; + + // Compute centroid. + float f_r = fft0.data[(new_y * w * 2) + new_x]; + f_off_x += (off_x + j) * f_r; // don't use new_x here + f_off_y += (off_y + i) * f_r; // don't use new_y here + f_sum += f_r; + } } - } - f_off_x /= f_sum; - f_off_y /= f_sum; + f_off_x /= f_sum; + f_off_y /= f_sum; - // FFT Shift X - if (f_off_x >= (roi0->w/2)) { - *x_offset = f_off_x - roi0->w; - } else { - *x_offset = f_off_x; - } + // FFT Shift X + if (f_off_x >= (w/2.0f)) { + f_off_x = f_off_x - w; + } else { + f_off_x = f_off_x; + } - // FFT Shift Y - if (f_off_y >= (roi0->h/2)) { - *y_offset = -(f_off_y - roi0->h); - } else { - *y_offset = -f_off_y; - } + // FFT Shift Y + if (f_off_y >= (h/2.0f)) { + f_off_y = -(f_off_y - h); + } else { + f_off_y = -f_off_y; + } - if ((*x_offset < (-roi0->w/2)) - || ((roi0->w/2) <= *x_offset) - || (*y_offset < (-roi0->h/2)) - || ((roi0->h/2) <= *y_offset) - || isnanf(*x_offset) - || isinff(*x_offset) - || isnanf(*y_offset) - || isinff(*y_offset) - || isnanf(*response) - || isinff(*response)) { // Noise Filter - *x_offset = 0; - *y_offset = 0; - *response = 0; - } + if ((f_off_x < (-w/2.0f)) + || ((w/2.0f) <= f_off_x) + || (f_off_y < (-h/2.0f)) + || ((h/2.0f) <= f_off_y) + || isnanf(f_off_x) + || isinff(f_off_x) + || isnanf(f_off_y) + || isinff(f_off_y) + || isnanf(tmp_response) + || isinff(tmp_response)) { // Noise Filter + f_off_x = 0; + f_off_y = 0; + tmp_response = 0; + } - fft2d_dealloc(); // fft1 - fft2d_dealloc(); // fft0 - - if (logpolar) { - fb_free(); // img1alt - fb_free(); // img0alt + fft2d_dealloc(); // fft1 + fft2d_dealloc(); // fft0 float w_2 = roi0->w / 2.0f; float h_2 = roi0->h / 2.0f; float rho_scale = fast_log(fast_sqrtf((w_2 * w_2) + (h_2 * h_2))) / roi0->h; float theta_scale = (2 * M_PI) / roi0->w; - *x_offset *= theta_scale; - *y_offset *= rho_scale; + *rotation = f_off_x * theta_scale; + *scale = (f_off_y * rho_scale) + 1; + } else { + *rotation = 0; + *scale = 0; } + + image_t img0_fixed; + rectangle_t roi0_fixed; + + // Step 2 - Fix Rotation/Scale Differences + if ((!logpolar) && fix_rotation_scale) { + + img0_fixed.w = roi0->w; + img0_fixed.h = roi0->h; + img0_fixed.bpp = img0->bpp; + img0_fixed.data = fb_alloc(image_size(&img0_fixed)); + + roi0_fixed.x = 0; + roi0_fixed.y = 0; + roi0_fixed.w = roi0->w; + roi0_fixed.h = roi0->h; + + switch(img0->bpp) { + case IMAGE_BPP_BINARY: { + for (int y = roi0->y, yy = roi0->y + roi0->h; y < yy; y++) { + uint32_t *row_ptr = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(img0, y); + for (int x = roi0->x, xx = roi0->x + roi0->w; x < xx; x++) { + IMAGE_PUT_BINARY_PIXEL(&img0_fixed, x, y, IMAGE_GET_BINARY_PIXEL_FAST(row_ptr, x)); + } + } + break; + } + case IMAGE_BPP_GRAYSCALE: { + for (int y = roi0->y, yy = roi0->y + roi0->h; y < yy; y++) { + uint8_t *row_ptr = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(img0, y); + for (int x = roi0->x, xx = roi0->x + roi0->w; x < xx; x++) { + IMAGE_PUT_GRAYSCALE_PIXEL(&img0_fixed, x, y, IMAGE_GET_GRAYSCALE_PIXEL_FAST(row_ptr, x)); + } + } + break; + } + case IMAGE_BPP_RGB565: { + for (int y = roi0->y, yy = roi0->y + roi0->h; y < yy; y++) { + uint16_t *row_ptr = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(img0, y); + for (int x = roi0->x, xx = roi0->x + roi0->w; x < xx; x++) { + IMAGE_PUT_RGB565_PIXEL(&img0_fixed, x, y, IMAGE_GET_RGB565_PIXEL_FAST(row_ptr, x)); + } + } + break; + } + default: { + memset(img0_fixed.data, 0, image_size(&img0_fixed)); + break; + } + } + + imlib_rotation_corr(&img0_fixed, 0, 0, *rotation, 0, 0, *scale); + } else { + memcpy(&img0_fixed, img0, sizeof(image_t)); + memcpy(&roi0_fixed, roi0, sizeof(rectangle_t)); + } + + // Step 3 - Get Translation Differences + { + image_t img0alt, img1alt; + rectangle_t roi0alt, roi1alt; + + if (logpolar) { + img0alt.w = roi0_fixed.w; + img0alt.h = roi0_fixed.h; + img0alt.bpp = img0_fixed.bpp; + img0alt.data = fb_alloc0(image_size(&img0alt)); + imlib_logpolar_int(&img0alt, &img0_fixed, &roi0_fixed, false, false); + roi0alt.x = 0; + roi0alt.y = 0; + roi0alt.w = roi0_fixed.w; + roi0alt.h = roi0_fixed.h; + + img1alt.w = roi1->w; + img1alt.h = roi1->h; + img1alt.bpp = img1->bpp; + img1alt.data = fb_alloc0(image_size(&img1alt)); + imlib_logpolar_int(&img1alt, img1, roi1, false, false); + roi1alt.x = 0; + roi1alt.y = 0; + roi1alt.w = roi1->w; + roi1alt.h = roi1->h; + } + + fft2d_controller_t fft0, fft1; + + fft2d_alloc(&fft0, logpolar ? &img0alt : &img0_fixed, logpolar ? &roi0alt : &roi0_fixed); + fft2d_alloc(&fft1, logpolar ? &img1alt : img1, logpolar ? &roi1alt : roi1); + + fft2d_run(&fft0); + fft2d_run(&fft1); + + int w = (1 << fft0.w_pow2); + int h = (1 << fft0.h_pow2); + + for (int i = 0, j = h * w * 2; i < j; i += 2) { + float ga_r = fft0.data[i+0]; + float ga_i = fft0.data[i+1]; + float gb_r = fft1.data[i+0]; + float gb_i = -fft1.data[i+1]; // complex conjugate... + float hp_r = (ga_r * gb_r) - (ga_i * gb_i); // hadamard product + float hp_i = (ga_r * gb_i) + (ga_i * gb_r); // hadamard product + float mag = 1 / fast_sqrtf((hp_r*hp_r)+(hp_i*hp_i)); // magnitude + fft0.data[i+0] = hp_r * mag; + fft0.data[i+1] = hp_i * mag; + } + + ifft2d_run(&fft0); + + float sum = 0; + float max = 0; + int off_x = 0; + int off_y = 0; + + for (int i = 0; i < h; i++) { + for (int j = 0; j < w; j++) { + // Note that the output of the FFT is packed with real data in both + // the real and imaginary parts... (right side of the array is zero). + float f_r = fft0.data[(i * w * 2) + j]; + sum += f_r; + if (f_r > max) { + max = f_r; + off_x = j; + off_y = i; + } + } + } + + *response = max / sum; // normalize this to [0:1]. + + float f_sum = 0; + float f_off_x = 0; + float f_off_y = 0; + + for (int i = -2; i < 2; i++) { + for (int j = -2; j < 2; j++) { + + // Wrap around + int new_x = off_x + j; + if (new_x < 0) new_x += w; + if (new_x >= w) new_x -= w; + + // Wrap around + int new_y = off_y + i; + if (new_y < 0) new_y += h; + if (new_y >= h) new_y -= h; + + // Compute centroid. + float f_r = fft0.data[(new_y * w * 2) + new_x]; + f_off_x += (off_x + j) * f_r; // don't use new_x here + f_off_y += (off_y + i) * f_r; // don't use new_y here + f_sum += f_r; + } + } + + f_off_x /= f_sum; + f_off_y /= f_sum; + + // FFT Shift X + if (f_off_x >= (w/2.0f)) { + *x_translation = f_off_x - w; + } else { + *x_translation = f_off_x; + } + + // FFT Shift Y + if (f_off_y >= (h/2.0f)) { + *y_translation = -(f_off_y - h); + } else { + *y_translation = -f_off_y; + } + + if ((*x_translation < (-w/2.0f)) + || ((w/2.0f) <= *x_translation) + || (*y_translation < (-h/2.0f)) + || ((h/2.0f) <= *y_translation) + || isnanf(*x_translation) + || isinff(*x_translation) + || isnanf(*y_translation) + || isinff(*y_translation) + || isnanf(*response) + || isinff(*response)) { // Noise Filter + *x_translation = 0; + *y_translation = 0; + *response = 0; + } + + fft2d_dealloc(); // fft1 + fft2d_dealloc(); // fft0 + + if (logpolar) { + fb_free(); // img1alt + fb_free(); // img0alt + + float w_2 = roi0->w / 2.0f; + float h_2 = roi0->h / 2.0f; + float rho_scale = fast_log(fast_sqrtf((w_2 * w_2) + (h_2 * h_2))) / roi0->h; + float theta_scale = (2 * M_PI) / roi0->w; + + *rotation = *x_translation * theta_scale; + *scale = (*y_translation * rho_scale) + 1; + *x_translation = 0; + *y_translation = 0; + } + } + + if ((!logpolar) && fix_rotation_scale) fb_free(); } diff --git a/src/omv/py/py_image.c b/src/omv/py/py_image.c index 500baf438..80ab39450 100644 --- a/src/omv/py/py_image.c +++ b/src/omv/py/py_image.c @@ -3642,20 +3642,23 @@ static mp_obj_t py_image_find_barcodes(uint n_args, const mp_obj_t *args, mp_map } #endif // OMV_ENABLE_BARCODES +#ifdef OMV_ENABLE_PHASE_CORRELATION // Displacement Object // -#define py_displacement_obj_size 3 +#define py_displacement_obj_size 5 typedef struct py_displacement_obj { mp_obj_base_t base; - mp_obj_t x_offset, y_offset, response; + mp_obj_t x_translation, y_translation, rotation, scale, response; } py_displacement_obj_t; static void py_displacement_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) { py_displacement_obj_t *self = self_in; mp_printf(print, - "{x_offset:%f, y_offset:%f, response:%f}", - (double) mp_obj_get_float(self->x_offset), - (double) mp_obj_get_float(self->y_offset), + "{x_translation:%f, y_translation:%f, rotation:%f, scale:%f, response:%f}", + (double) mp_obj_get_float(self->x_translation), + (double) mp_obj_get_float(self->y_translation), + (double) mp_obj_get_float(self->rotation), + (double) mp_obj_get_float(self->scale), (double) mp_obj_get_float(self->response)); } @@ -3669,29 +3672,37 @@ static mp_obj_t py_displacement_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_ nlr_raise(mp_obj_new_exception_msg(&mp_type_OSError, "only slices with step=1 (aka None) are supported")); } mp_obj_tuple_t *result = mp_obj_new_tuple(slice.stop - slice.start, NULL); - mp_seq_copy(result->items, &(self->x_offset) + slice.start, result->len, mp_obj_t); + mp_seq_copy(result->items, &(self->x_translation) + slice.start, result->len, mp_obj_t); return result; } switch (mp_get_index(self->base.type, py_displacement_obj_size, index, false)) { - case 0: return self->x_offset; - case 1: return self->y_offset; - case 2: return self->response; + case 0: return self->x_translation; + case 1: return self->y_translation; + case 2: return self->rotation; + case 3: return self->scale; + case 4: return self->response; } } return MP_OBJ_NULL; // op not supported } -mp_obj_t py_displacement_x_offset(mp_obj_t self_in) { return ((py_displacement_obj_t *) self_in)->x_offset; } -mp_obj_t py_displacement_y_offset(mp_obj_t self_in) { return ((py_displacement_obj_t *) self_in)->y_offset; } +mp_obj_t py_displacement_x_translation(mp_obj_t self_in) { return ((py_displacement_obj_t *) self_in)->x_translation; } +mp_obj_t py_displacement_y_translation(mp_obj_t self_in) { return ((py_displacement_obj_t *) self_in)->y_translation; } +mp_obj_t py_displacement_rotation(mp_obj_t self_in) { return ((py_displacement_obj_t *) self_in)->rotation; } +mp_obj_t py_displacement_scale(mp_obj_t self_in) { return ((py_displacement_obj_t *) self_in)->scale; } mp_obj_t py_displacement_response(mp_obj_t self_in) { return ((py_displacement_obj_t *) self_in)->response; } -STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_displacement_x_offset_obj, py_displacement_x_offset); -STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_displacement_y_offset_obj, py_displacement_y_offset); +STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_displacement_x_translation_obj, py_displacement_x_translation); +STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_displacement_y_translation_obj, py_displacement_y_translation); +STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_displacement_rotation_obj, py_displacement_rotation); +STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_displacement_scale_obj, py_displacement_scale); STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_displacement_response_obj, py_displacement_response); STATIC const mp_rom_map_elem_t py_displacement_locals_dict_table[] = { - { MP_ROM_QSTR(MP_QSTR_x_offset), MP_ROM_PTR(&py_displacement_x_offset_obj) }, - { MP_ROM_QSTR(MP_QSTR_y_offset), MP_ROM_PTR(&py_displacement_y_offset_obj) }, + { MP_ROM_QSTR(MP_QSTR_x_translation), MP_ROM_PTR(&py_displacement_x_translation_obj) }, + { MP_ROM_QSTR(MP_QSTR_y_translation), MP_ROM_PTR(&py_displacement_y_translation_obj) }, + { MP_ROM_QSTR(MP_QSTR_rotation), MP_ROM_PTR(&py_displacement_rotation_obj) }, + { MP_ROM_QSTR(MP_QSTR_scale), MP_ROM_PTR(&py_displacement_scale_obj) }, { MP_ROM_QSTR(MP_QSTR_response), MP_ROM_PTR(&py_displacement_response_obj) }, }; @@ -3721,112 +3732,25 @@ static mp_obj_t py_image_find_displacement(uint n_args, const mp_obj_t *args, mp PY_ASSERT_FALSE_MSG((roi.w != template_roi.w) || (roi.h != template_roi.h), "ROI(w,h) != TEMPLATE_ROI(w,h)"); - float x_offset, y_offset, response; + bool logpolar = py_helper_lookup_int(kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_logpolar), false); + bool fix_rotation_scale = py_helper_lookup_int(kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_fix_rotation_scale), false); + + float x, y, r, s, response; fb_alloc_mark(); - imlib_phasecorrelate(arg_img, arg_template_img, &roi, &template_roi, false, &x_offset, &y_offset, &response); + imlib_phasecorrelate(arg_img, arg_template_img, &roi, &template_roi, logpolar, fix_rotation_scale, &x, &y, &r, &s, &response); fb_alloc_free_till_mark(); py_displacement_obj_t *o = m_new_obj(py_displacement_obj_t); o->base.type = &py_displacement_type; - o->x_offset = mp_obj_new_float(x_offset); - o->y_offset = mp_obj_new_float(y_offset); - o->response = mp_obj_new_float(response); - - return o; -} - -// RotScale Object // -#define py_rotscale_obj_size 3 -typedef struct py_rotscale_obj { - mp_obj_base_t base; - mp_obj_t rot_offset, scale_offset, response; -} py_rotscale_obj_t; - -static void py_rotscale_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) -{ - py_rotscale_obj_t *self = self_in; - mp_printf(print, - "{rot_offset:%f, scale_offset:%f, response:%f}", - (double) mp_obj_get_float(self->rot_offset), - (double) mp_obj_get_float(self->scale_offset), - (double) mp_obj_get_float(self->response)); -} - -static mp_obj_t py_rotscale_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) -{ - if (value == MP_OBJ_SENTINEL) { // load - py_rotscale_obj_t *self = self_in; - if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) { - mp_bound_slice_t slice; - if (!mp_seq_get_fast_slice_indexes(py_rotscale_obj_size, index, &slice)) { - nlr_raise(mp_obj_new_exception_msg(&mp_type_OSError, "only slices with step=1 (aka None) are supported")); - } - mp_obj_tuple_t *result = mp_obj_new_tuple(slice.stop - slice.start, NULL); - mp_seq_copy(result->items, &(self->rot_offset) + slice.start, result->len, mp_obj_t); - return result; - } - switch (mp_get_index(self->base.type, py_rotscale_obj_size, index, false)) { - case 0: return self->rot_offset; - case 1: return self->scale_offset; - case 2: return self->response; - } - } - return MP_OBJ_NULL; // op not supported -} - -mp_obj_t py_rotscale_rot_offset(mp_obj_t self_in) { return ((py_rotscale_obj_t *) self_in)->rot_offset; } -mp_obj_t py_rotscale_scale_offset(mp_obj_t self_in) { return ((py_rotscale_obj_t *) self_in)->scale_offset; } -mp_obj_t py_rotscale_response(mp_obj_t self_in) { return ((py_rotscale_obj_t *) self_in)->response; } - -STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_rotscale_rot_offset_obj, py_rotscale_rot_offset); -STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_rotscale_scale_offset_obj, py_rotscale_scale_offset); -STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_rotscale_response_obj, py_rotscale_response); - -STATIC const mp_rom_map_elem_t py_rotscale_locals_dict_table[] = { - { MP_ROM_QSTR(MP_QSTR_rot_offset), MP_ROM_PTR(&py_rotscale_rot_offset_obj) }, - { MP_ROM_QSTR(MP_QSTR_scale_offset), MP_ROM_PTR(&py_rotscale_scale_offset_obj) }, - { MP_ROM_QSTR(MP_QSTR_response), MP_ROM_PTR(&py_rotscale_response_obj) }, -}; - -STATIC MP_DEFINE_CONST_DICT(py_rotscale_locals_dict, py_rotscale_locals_dict_table); - -static const mp_obj_type_t py_rotscale_type = { - { &mp_type_type }, - .name = MP_QSTR_rotscale, - .print = py_rotscale_print, - .subscr = py_rotscale_subscr, - .locals_dict = (mp_obj_t) &py_rotscale_locals_dict, -}; - -static mp_obj_t py_image_find_rotscale(uint n_args, const mp_obj_t *args, mp_map_t *kw_args) -{ - image_t *arg_img = py_image_cobj(args[0]); - PY_ASSERT_TRUE_MSG(IM_IS_MUTABLE(arg_img), "Image format is not supported."); - - image_t *arg_template_img = py_image_cobj(args[1]); - PY_ASSERT_TRUE_MSG(IM_IS_MUTABLE(arg_template_img), "Image format is not supported."); - - rectangle_t roi; - py_helper_lookup_rectangle(kw_args, arg_img, &roi); - - rectangle_t template_roi; - py_helper_lookup_rectangle_2(kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_template_roi), arg_template_img, &template_roi); - - PY_ASSERT_FALSE_MSG((roi.w != template_roi.w) || (roi.h != template_roi.h), "ROI(w,h) != TEMPLATE_ROI(w,h)"); - - float rot_offset, scale_offset, response; - fb_alloc_mark(); - imlib_phasecorrelate(arg_img, arg_template_img, &roi, &template_roi, true, &rot_offset, &scale_offset, &response); - fb_alloc_free_till_mark(); - - py_rotscale_obj_t *o = m_new_obj(py_rotscale_obj_t); - o->base.type = &py_rotscale_type; - o->rot_offset = mp_obj_new_float(rot_offset); - o->scale_offset = mp_obj_new_float(scale_offset); + o->x_translation = mp_obj_new_float(x); + o->y_translation = mp_obj_new_float(y); + o->rotation = mp_obj_new_float(r); + o->scale = mp_obj_new_float(s); o->response = mp_obj_new_float(response); return o; } +#endif // OMV_ENABLE_PHASE_CORRELATION #ifdef OMV_ENABLE_LENET static mp_obj_t py_image_find_number(uint n_args, const mp_obj_t *args, mp_map_t *kw_args) @@ -4230,7 +4154,7 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_image_histeq_obj, py_image_histeq); STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_image_lens_corr_obj, 1, py_image_lens_corr); #ifdef OMV_ENABLE_ROTATION_CORR STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_image_rotation_corr_obj, 1, py_image_rotation_corr); -#endif +#endif STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_image_mask_ellipse_obj, py_image_mask_ellipse); /* Image Statistics */ STATIC MP_DEFINE_CONST_FUN_OBJ_2(py_image_get_similarity_obj, py_image_get_similarity); @@ -4264,8 +4188,9 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_image_find_datamatrices_obj, 1, py_image_fi STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_image_find_barcodes_obj, 1, py_image_find_barcodes); #endif /* Template Matching */ +#ifdef OMV_ENABLE_PHASE_CORRELATION STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_image_find_displacement_obj, 2, py_image_find_displacement); -STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_image_find_rotscale_obj, 2, py_image_find_rotscale); +#endif #ifdef OMV_ENABLE_LENET STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_image_find_number_obj, 1, py_image_find_number); #endif @@ -4381,8 +4306,9 @@ static const mp_map_elem_t locals_dict_table[] = { {MP_OBJ_NEW_QSTR(MP_QSTR_find_barcodes), (mp_obj_t)&py_image_find_barcodes_obj}, #endif /* Template Matching */ +#ifdef OMV_ENABLE_PHASE_CORRELATION {MP_OBJ_NEW_QSTR(MP_QSTR_find_displacement), (mp_obj_t)&py_image_find_displacement_obj}, - {MP_OBJ_NEW_QSTR(MP_QSTR_find_rotscale), (mp_obj_t)&py_image_find_rotscale_obj}, +#endif #ifdef OMV_ENABLE_LENET {MP_OBJ_NEW_QSTR(MP_QSTR_find_number), (mp_obj_t)&py_image_find_number_obj}, #endif diff --git a/src/omv/py/qstrdefsomv.h b/src/omv/py/qstrdefsomv.h index 9d4f5ca7b..1cbc4a8c8 100644 --- a/src/omv/py/qstrdefsomv.h +++ b/src/omv/py/qstrdefsomv.h @@ -663,20 +663,15 @@ Q(CODE128) Q(find_displacement) // duplicate Q(roi) Q(template_roi) +// duplicate Q(logpolar) +Q(fix_rotation_scale) Q(displacement) -Q(x_offset) -Q(y_offset) +// duplicate Q(x_translation) +// duplicate Q(y_translation) +// duplicate Q(rotation) +// duplicate Q(scale) Q(response) -// Find Rotation And Scale -Q(find_rotscale) -// duplicate Q(roi) -// duplicate Q(template_roi) -Q(rotscale) -Q(rot_offset) -Q(scale_offset) -// duplicate Q(response) - // LENET Q(find_number) diff --git a/usr/examples/18-MAVLink/mavlink_opticalflow.py b/usr/examples/18-MAVLink/mavlink_opticalflow.py index 4e81b90cc..775957a42 100644 --- a/usr/examples/18-MAVLink/mavlink_opticalflow.py +++ b/usr/examples/18-MAVLink/mavlink_opticalflow.py @@ -13,17 +13,10 @@ uart_baudrate = 115200 MAV_system_id = 1 MAV_component_id = 0x54 -MAV_OPTICAL_FLOW_confidence_threshold = 0.2 +MAV_OPTICAL_FLOW_confidence_threshold = 0.1 # Below 0.1 or so (YMMV) and the results are just noise. ############################################################################## -# Camera Setup - -sensor.reset() -sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QQVGA) -sensor.skip_frames(time = 2000) - # Link Setup uart = pyb.UART(3, uart_baudrate, timeout_char = 1000) @@ -56,8 +49,8 @@ def send_optical_flow_packet(x, y, c): 0, 0, 0, - int(x * 10 * 4), # up sample by 4 - int(y * 10 * 4), # up sample by 4 + int(x * 10), + int(y * 10), MAV_OPTICAL_FLOW_id, int(c * 255)) temp = struct.pack(" 40x30 while(True): - clock.tick() - new_img = sensor.snapshot().mean_pooled(4, 4) # 160x120 -> 40x30 - x, y, c = new_img.find_displacement(old_img) - old_img = new_img + clock.tick() # Track elapsed milliseconds between snapshots(). + img = sensor.snapshot() # Take a picture and return the image. - if (not (math.isnan(x) or math.isnan(y) or math.isnan(c))) and (c > MAV_OPTICAL_FLOW_confidence_threshold): - send_optical_flow_packet(-x, -y, c) - print("dx %10f, dy %10f, confidence %10f - FPS %f" % (-x, -y, c, clock.fps())) + displacement = extra_fb.find_displacement(img) + extra_fb.replace(img) + + # Offset results are noisy without filtering so we drop some accuracy. + sub_pixel_x = int(displacement.x_translation() * 5) / 5.0 + sub_pixel_y = int(displacement.y_translation() * 5) / 5.0 + + if(displacement.response() > MAV_OPTICAL_FLOW_confidence_threshold): + send_optical_flow_packet(sub_pixel_x, sub_pixel_y, displacement.response()) + + print("{0:+f}x {1:+f}y {2} {3} FPS".format(sub_pixel_x, sub_pixel_y, + displacement.response(), + clock.fps())) else: - print("FPS %f" % (clock.fps())) + print(clock.fps()) diff --git a/usr/examples/22-Optical-Flow/absolute-rotation-scale.py b/usr/examples/22-Optical-Flow/absolute-rotation-scale.py index 2725eeb24..1dafa06ee 100644 --- a/usr/examples/22-Optical-Flow/absolute-rotation-scale.py +++ b/usr/examples/22-Optical-Flow/absolute-rotation-scale.py @@ -13,7 +13,7 @@ import sensor, image, time, math # NOTE!!! You have to use a small power of 2 resolution when using -# find_rotscale(). This is because the algorithm is powered by +# find_displacement(). This is because the algorithm is powered by # something called phase correlation which does the image comparison # using FFTs. A non-power of 2 resolution requires padding to a power # of 2 which reduces the usefulness of the algorithm results. Please @@ -48,18 +48,20 @@ while(True): # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works. # Put in a zoom value below and you should see the z output be equal to that. - if(1): + if(0): expected_zoom = 0.8 - img.rotation_corr(zoom=expected_zoom+0.05) + img.rotation_corr(zoom=expected_zoom) # For this example we never update the old image to measure absolute change. - rotscale_obj = extra_fb.find_rotscale(img) + displacement = extra_fb.find_displacement(img, logpolar=True) # Offset results are noisy without filtering so we drop some accuracy. - rotation_change = int(math.degrees(rotscale_obj.rot_offset()) * 5) / 5.0 - zoom_amount = 1.0 + rotscale_obj.scale_offset() + rotation_change = int(math.degrees(displacement.rotation()) * 5) / 5.0 + zoom_amount = displacement.scale() - if(rotscale_obj.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. - print("{0:+f}r {1:+f}z {2} {3} FPS".format(rotation_change, zoom_amount, rotscale_obj.response(), clock.fps())) + if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. + print("{0:+f}r {1:+f}z {2} {3} FPS".format(rotation_change, zoom_amount, \ + displacement.response(), + clock.fps())) else: print(clock.fps()) diff --git a/usr/examples/22-Optical-Flow/absolute-translation.py b/usr/examples/22-Optical-Flow/absolute-translation.py index f1999c345..f4dd2e49f 100644 --- a/usr/examples/22-Optical-Flow/absolute-translation.py +++ b/usr/examples/22-Optical-Flow/absolute-translation.py @@ -41,13 +41,15 @@ while(True): img = sensor.snapshot() # Take a picture and return the image. # For this example we never update the old image to measure absolute change. - displacement_obj = extra_fb.find_displacement(img) + displacement = extra_fb.find_displacement(img) # Offset results are noisy without filtering so we drop some accuracy. - sub_pixel_x = int(displacement_obj.x_offset() * 5) / 5.0 - sub_pixel_y = int(displacement_obj.y_offset() * 5) / 5.0 + sub_pixel_x = int(displacement.x_translation() * 5) / 5.0 + sub_pixel_y = int(displacement.y_translation() * 5) / 5.0 - if(displacement_obj.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. - print("{0:+f}x {1:+f}y {2} {3} FPS".format(sub_pixel_x, sub_pixel_y, displacement_obj.response(), clock.fps())) + if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. + print("{0:+f}x {1:+f}y {2} {3} FPS".format(sub_pixel_x, sub_pixel_y, + displacement.response(), + clock.fps())) else: print(clock.fps()) diff --git a/usr/examples/22-Optical-Flow/differential-rotation-scale.py b/usr/examples/22-Optical-Flow/differential-rotation-scale.py index c0a4b25e4..8e1b54c64 100644 --- a/usr/examples/22-Optical-Flow/differential-rotation-scale.py +++ b/usr/examples/22-Optical-Flow/differential-rotation-scale.py @@ -13,7 +13,7 @@ import sensor, image, time, math # NOTE!!! You have to use a small power of 2 resolution when using -# find_rotscale(). This is because the algorithm is powered by +# find_displacement(). This is because the algorithm is powered by # something called phase correlation which does the image comparison # using FFTs. A non-power of 2 resolution requires padding to a power # of 2 which reduces the usefulness of the algorithm results. Please @@ -50,16 +50,18 @@ while(True): # Put in a zoom value below and you should see the z output be equal to that. if(0): expected_zoom = 0.8 - extra_fb.rotation_corr(zoom=(2.05-expected_zoom)) + extra_fb.rotation_corr(zoom=(2.00-expected_zoom)) - rotscale_obj = extra_fb.find_rotscale(img) + displacement = extra_fb.find_displacement(img, logpolar=True) extra_fb.replace(img) # Offset results are noisy without filtering so we drop some accuracy. - rotation_change = int(math.degrees(rotscale_obj.rot_offset()) * 5) / 5.0 - zoom_amount = 1.0 + rotscale_obj.scale_offset() + rotation_change = int(math.degrees(displacement.rotation()) * 5) / 5.0 + zoom_amount = displacement.scale() - if(rotscale_obj.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. - print("{0:+f}r {1:+f}z {2} {3} FPS".format(rotation_change, zoom_amount, rotscale_obj.response(), clock.fps())) + if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. + print("{0:+f}r {1:+f}z {2} {3} FPS".format(rotation_change, zoom_amount, \ + displacement.response(), + clock.fps())) else: print(clock.fps()) diff --git a/usr/examples/22-Optical-Flow/differential-translation.py b/usr/examples/22-Optical-Flow/differential-translation.py index 63afe2ca0..04416cf09 100644 --- a/usr/examples/22-Optical-Flow/differential-translation.py +++ b/usr/examples/22-Optical-Flow/differential-translation.py @@ -40,14 +40,16 @@ while(True): clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot() # Take a picture and return the image. - displacement_obj = extra_fb.find_displacement(img) + displacement = extra_fb.find_displacement(img) extra_fb.replace(img) # Offset results are noisy without filtering so we drop some accuracy. - sub_pixel_x = int(displacement_obj.x_offset() * 5) / 5.0 - sub_pixel_y = int(displacement_obj.y_offset() * 5) / 5.0 + sub_pixel_x = int(displacement.x_translation() * 5) / 5.0 + sub_pixel_y = int(displacement.y_translation() * 5) / 5.0 - if(displacement_obj.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. - print("{0:+f}x {1:+f}y {2} {3} FPS".format(sub_pixel_x, sub_pixel_y, displacement_obj.response(), clock.fps())) + if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. + print("{0:+f}x {1:+f}y {2} {3} FPS".format(sub_pixel_x, sub_pixel_y, + displacement.response(), + clock.fps())) else: print(clock.fps()) diff --git a/usr/examples/22-Optical-Flow/image-patches-absolute-rotation-scale.py b/usr/examples/22-Optical-Flow/image-patches-absolute-rotation-scale.py index 3f400ea2c..414a105e6 100644 --- a/usr/examples/22-Optical-Flow/image-patches-absolute-rotation-scale.py +++ b/usr/examples/22-Optical-Flow/image-patches-absolute-rotation-scale.py @@ -9,7 +9,7 @@ # image at once. Instead it breaks up the process by working on groups # of pixels in the image. This gives you a "new" image of results. # -# Note that surfaces need to have some type of "edge" on them for the +# NOTE that surfaces need to have some type of "edge" on them for the # algorithm to work. A featureless surface produces crazy results. # NOTE: Unless you have a very nice test rig this example is hard to see usefulness of... @@ -25,7 +25,7 @@ BLOCK_H = 16 # pow2 import sensor, image, time, math # NOTE!!! You have to use a small power of 2 resolution when using -# find_rotscale(). This is because the algorithm is powered by +# find_displacement(). This is because the algorithm is powered by # something called phase correlation which does the image comparison # using FFTs. A non-power of 2 resolution requires padding to a power # of 2 which reduces the usefulness of the algorithm results. Please @@ -55,13 +55,13 @@ while(True): for y in range(0, sensor.height(), BLOCK_H): for x in range(0, sensor.width(), BLOCK_W): # For this example we never update the old image to measure absolute change. - rotscale_obj = extra_fb.find_rotscale(img, \ + displacement = extra_fb.find_displacement(img, logpolar=True, \ roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) # Below 0.1 or so (YMMV) and the results are just noise. - if(rotscale_obj.response() > 0.1): - rotation_change = rotscale_obj.rot_offset() - zoom_amount = 1.0 + rotscale_obj.scale_offset() + if(displacement.response() > 0.1): + rotation_change = displacement.rotation() + zoom_amount = displacement.scale() pixel_x = x + (BLOCK_W//2) + int(math.sin(rotation_change) * zoom_amount * (BLOCK_W//4)) pixel_y = y + (BLOCK_H//2) + int(math.cos(rotation_change) * zoom_amount * (BLOCK_H//4)) img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ diff --git a/usr/examples/22-Optical-Flow/image-patches-absolute-translation.py b/usr/examples/22-Optical-Flow/image-patches-absolute-translation.py index 2c60b74e2..0bfae8ca6 100644 --- a/usr/examples/22-Optical-Flow/image-patches-absolute-translation.py +++ b/usr/examples/22-Optical-Flow/image-patches-absolute-translation.py @@ -9,7 +9,7 @@ # image at once. Instead it breaks up the process by working on groups # of pixels in the image. This gives you a "new" image of results. # -# Note that surfaces need to have some type of "edge" on them for the +# NOTE that surfaces need to have some type of "edge" on them for the # algorithm to work. A featureless surface produces crazy results. BLOCK_W = 16 # pow2 @@ -53,13 +53,13 @@ while(True): for y in range(0, sensor.height(), BLOCK_H): for x in range(0, sensor.width(), BLOCK_W): # For this example we never update the old image to measure absolute change. - displacement_obj = extra_fb.find_displacement(img, \ + displacement = extra_fb.find_displacement(img, \ roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) # Below 0.1 or so (YMMV) and the results are just noise. - if(displacement_obj.response() > 0.1): - pixel_x = x + (BLOCK_W//2) + int(displacement_obj.x_offset()) - pixel_y = y + (BLOCK_H//2) + int(displacement_obj.y_offset()) + if(displacement.response() > 0.1): + pixel_x = x + (BLOCK_W//2) + int(displacement.x_translation()) + pixel_y = y + (BLOCK_H//2) + int(displacement.y_translation()) img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ color = 255) else: diff --git a/usr/examples/22-Optical-Flow/image-patches-differential-rotation-scale.py b/usr/examples/22-Optical-Flow/image-patches-differential-rotation-scale.py index 648591aef..bb1bc2eea 100644 --- a/usr/examples/22-Optical-Flow/image-patches-differential-rotation-scale.py +++ b/usr/examples/22-Optical-Flow/image-patches-differential-rotation-scale.py @@ -9,7 +9,7 @@ # image at once. Instead it breaks up the process by working on groups # of pixels in the image. This gives you a "new" image of results. # -# Note that surfaces need to have some type of "edge" on them for the +# NOTE that surfaces need to have some type of "edge" on them for the # algorithm to work. A featureless surface produces crazy results. # NOTE: Unless you have a very nice test rig this example is hard to see usefulness of... @@ -25,7 +25,7 @@ BLOCK_H = 16 # pow2 import sensor, image, time, math # NOTE!!! You have to use a small power of 2 resolution when using -# find_rotscale(). This is because the algorithm is powered by +# find_displacement(). This is because the algorithm is powered by # something called phase correlation which does the image comparison # using FFTs. A non-power of 2 resolution requires padding to a power # of 2 which reduces the usefulness of the algorithm results. Please @@ -54,13 +54,13 @@ while(True): for y in range(0, sensor.height(), BLOCK_H): for x in range(0, sensor.width(), BLOCK_W): - rotscale_obj = extra_fb.find_rotscale(img, \ + displacement = extra_fb.find_displacement(img, logpolar=True, \ roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) # Below 0.1 or so (YMMV) and the results are just noise. - if(rotscale_obj.response() > 0.1): - rotation_change = rotscale_obj.rot_offset() - zoom_amount = 1.0 + rotscale_obj.scale_offset() + if(displacement.response() > 0.1): + rotation_change = displacement.rotation() + zoom_amount = 1.0 + displacement.scale() pixel_x = x + (BLOCK_W//2) + int(math.sin(rotation_change) * zoom_amount * (BLOCK_W//4)) pixel_y = y + (BLOCK_H//2) + int(math.cos(rotation_change) * zoom_amount * (BLOCK_H//4)) img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ diff --git a/usr/examples/22-Optical-Flow/image-patches-differential-translation.py b/usr/examples/22-Optical-Flow/image-patches-differential-translation.py index 3c592d7f0..0a87c42f3 100644 --- a/usr/examples/22-Optical-Flow/image-patches-differential-translation.py +++ b/usr/examples/22-Optical-Flow/image-patches-differential-translation.py @@ -9,7 +9,7 @@ # image at once. Instead it breaks up the process by working on groups # of pixels in the image. This gives you a "new" image of results. # -# Note that surfaces need to have some type of "edge" on them for the +# NOTE that surfaces need to have some type of "edge" on them for the # algorithm to work. A featureless surface produces crazy results. BLOCK_W = 16 # pow2 @@ -52,13 +52,13 @@ while(True): for y in range(0, sensor.height(), BLOCK_H): for x in range(0, sensor.width(), BLOCK_W): - displacement_obj = extra_fb.find_displacement(img, \ + displacement = extra_fb.find_displacement(img, \ roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) # Below 0.1 or so (YMMV) and the results are just noise. - if(displacement_obj.response() > 0.1): - pixel_x = x + (BLOCK_W//2) + int(displacement_obj.x_offset()) - pixel_y = y + (BLOCK_H//2) + int(displacement_obj.y_offset()) + if(displacement.response() > 0.1): + pixel_x = x + (BLOCK_W//2) + int(displacement.x_translation()) + pixel_y = y + (BLOCK_H//2) + int(displacement.y_translation()) img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \ color = 255) else: