diff --git a/src/omv/imlib/filter.c b/src/omv/imlib/filter.c index 396462808..2ef5e6499 100644 --- a/src/omv/imlib/filter.c +++ b/src/omv/imlib/filter.c @@ -1249,23 +1249,26 @@ void imlib_morph(image_t *img, buf.w = img->w; buf.h = brows; buf.pixfmt = img->pixfmt; - const int32_t m_int = (int32_t) (65536.0 * m); // m is 1/kernel_weight + const int32_t m_int = fast_roundf(65536 * m); + const int32_t b_int = b << 16; + invert = invert ? 1 : 0; // ensure binary switch (img->pixfmt) { case PIXFORMAT_BINARY: { buf.data = fb_alloc(IMAGE_BINARY_LINE_LEN_BYTES(img) * brows, FB_ALLOC_NO_HINT); - for (int y = 0, yy = img->h; y < yy; y++) { + for (int y = 0; y < img->h; y++) { uint32_t *row_ptr = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(img, y); uint32_t *buf_row_ptr = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(&buf, (y % brows)); - for (int x = 0, xx = img->w; x < xx; x++) { + for (int x = 0; x < img->w; x++) { if (mask && (!image_get_mask_pixel(mask, x, y))) { - IMAGE_PUT_BINARY_PIXEL_FAST(buf_row_ptr, x, IMAGE_GET_BINARY_PIXEL_FAST(row_ptr, x)); + int p = IMAGE_GET_BINARY_PIXEL_FAST(row_ptr, x); + IMAGE_PUT_BINARY_PIXEL_FAST(buf_row_ptr, x, p); continue; // Short circuit. } - int acc = 0, ptr = 0; + int32_t acc = 0, ptr = 0; if (x >= ksize && x < img->w - ksize && y >= ksize && y < img->h - ksize) { for (int j = -ksize; j <= ksize; j++) { @@ -1276,29 +1279,23 @@ void imlib_morph(image_t *img, } } else { for (int j = -ksize; j <= ksize; j++) { - uint32_t *k_row_ptr = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(img, - IM_MIN(IM_MAX(y + j, 0), (img->h - 1))); + int y_j = IM_MIN(IM_MAX(y + j, 0), (img->h - 1)); + uint32_t *k_row_ptr = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(img, y_j); for (int k = -ksize; k <= ksize; k++) { - acc += krn[ptr++] * IMAGE_GET_BINARY_PIXEL_FAST(k_row_ptr, - IM_MIN(IM_MAX(x + k, 0), (img->w - 1))); + int x_k = IM_MIN(IM_MAX(x + k, 0), (img->w - 1)); + acc += krn[ptr++] * IMAGE_GET_BINARY_PIXEL_FAST(k_row_ptr, x_k); } } } - int32_t tmp = (acc * m_int) >> 16; - int pixel = tmp + b; - if (pixel < 0) { - pixel = 0; - } else if (pixel > 1) { - pixel = 1; - } + + int32_t tmp = (acc * m_int) + b_int; + int pixel = __USAT_ASR(tmp, 1, 16); if (threshold) { - if (((pixel - offset) < IMAGE_GET_BINARY_PIXEL_FAST(row_ptr, x)) ^ invert) { - pixel = COLOR_BINARY_MAX; - } else { - pixel = COLOR_BINARY_MIN; - } + pixel -= offset; + pixel = pixel < IMAGE_GET_BINARY_PIXEL_FAST(row_ptr, x); + pixel = pixel ^ invert; } IMAGE_PUT_BINARY_PIXEL_FAST(buf_row_ptr, x, pixel); @@ -1313,7 +1310,7 @@ void imlib_morph(image_t *img, } // Copy any remaining lines from the buffer image... - for (int y = IM_MAX(img->h - ksize, 0), yy = img->h; y < yy; y++) { + for (int y = IM_MAX(img->h - ksize, 0); y < img->h; y++) { memcpy(IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(img, y), IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(&buf, (y % brows)), IMAGE_BINARY_LINE_LEN_BYTES(img)); @@ -1325,53 +1322,161 @@ void imlib_morph(image_t *img, case PIXFORMAT_GRAYSCALE: { buf.data = fb_alloc(IMAGE_GRAYSCALE_LINE_LEN_BYTES(img) * brows, FB_ALLOC_NO_HINT); - for (int y = 0, yy = img->h; y < yy; y++) { + #if defined(ARM_MATH_DSP) + int32_t krn_4, krn_2_0, krn_5_3, krn_8_6, krn_7_1, offset_int, invert_ge, invert_lt; + if (ksize == 1) { + krn_4 = krn[4]; + krn_2_0 = __PKHBT(krn[0], krn[2], 16); + krn_5_3 = __PKHBT(krn[3], krn[5], 16); + krn_8_6 = __PKHBT(krn[6], krn[8], 16); + krn_7_1 = __PKHBT(krn[1], krn[7], 16); + offset_int = __PKHBT(offset, offset, 16); + invert_ge = invert ? 0x00FF00FF : 0xFF00FF00; + invert_lt = invert ? 0xFF00FF00 : 0x00FF00FF; + } + #endif + + for (int y = 0; y < img->h; y++) { uint8_t *row_ptr = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(img, y); uint8_t *buf_row_ptr = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(&buf, (y % brows)); - for (int x = 0, xx = img->w; x < xx; x++) { - if (mask && (!image_get_mask_pixel(mask, x, y))) { - IMAGE_PUT_GRAYSCALE_PIXEL_FAST(buf_row_ptr, x, IMAGE_GET_GRAYSCALE_PIXEL_FAST(row_ptr, x)); - continue; // Short circuit. - } + if (0) { + #if defined(ARM_MATH_DSP) + } else if ((ksize == 1) && (!mask)) { + uint8_t *row_ptr_m1, *row_ptr_p1; - int32_t acc = 0, ptr = 0; - - if (x >= ksize && x < img->w - ksize && y >= ksize && y < img->h - ksize) { - for (int j = -ksize; j <= ksize; j++) { - uint8_t *k_row_ptr = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(img, y + j); - for (int k = -ksize; k <= ksize; k++) { - acc += krn[ptr++] * IMAGE_GET_GRAYSCALE_PIXEL_FAST(k_row_ptr, x + k); - } - } + if (y == 0) { + row_ptr_m1 = row_ptr; + row_ptr_p1 = row_ptr + ((img->h >= 2) ? img->w : 0); + } else if (y >= (img->h - 1)) { + row_ptr_m1 = row_ptr - img->w; + row_ptr_p1 = row_ptr; } else { - for (int j = -ksize; j <= ksize; j++) { - uint8_t *k_row_ptr = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(img, - IM_MIN(IM_MAX(y + j, 0), (img->h - 1))); + // get 2 neighboring rows + row_ptr_m1 = row_ptr - img->w; + row_ptr_p1 = row_ptr + img->w; + } - for (int k = -ksize; k <= ksize; k++) { - acc += krn[ptr++] * IMAGE_GET_GRAYSCALE_PIXEL_FAST(k_row_ptr, - IM_MIN(IM_MAX(x + k, 0), (img->w - 1))); + // If the image is an odd width this will go for the last loop and we drop the last column. + for (int x = 0; x < img->w; x += 2) { + uint32_t row_0, row_1, row_2; + + if (x == 0) { + if (img->w >= 3) { + row_0 = *((uint16_t *) row_ptr_m1) | (*(row_ptr_m1 + 2) << 16); + row_1 = *((uint16_t *) row_ptr) | (*(row_ptr + 2) << 16); + row_2 = *((uint16_t *) row_ptr_p1) | (*(row_ptr_p1 + 2) << 16); + } else if (img->w >= 2) { + row_0 = *((uint16_t *) row_ptr_m1); + row_0 = __REV(row_0) | row_0; + row_1 = *((uint16_t *) row_ptr); + row_1 = __REV(row_1) | row_1; + row_2 = *((uint16_t *) row_ptr_p1); + row_2 = __REV(row_2) | row_2; + } else { + row_0 = *row_ptr_m1 * 0x010101; + row_1 = *row_ptr * 0x010101; + row_2 = *row_ptr_p1 * 0x010101; + } + row_0 = (row_0 << 8) | (row_0 & 0xff); + row_1 = (row_1 << 8) | (row_1 & 0xff); + row_2 = (row_2 << 8) | (row_2 & 0xff); + } else if (x == (img->w - 2)) { + row_0 = *((uint32_t *) (row_ptr_m1 + x - 2)); + row_0 = (row_0 >> 8) | ((row_0 << 8) & 0xff000000); + row_1 = *((uint32_t *) (row_ptr + x - 2)); + row_1 = (row_1 >> 8) | ((row_1 << 8) & 0xff000000); + row_2 = *((uint32_t *) (row_ptr_p1 + x - 2)); + row_2 = (row_2 >> 8) | ((row_2 << 8) & 0xff000000); + } else if (x >= (img->w - 1)) { + row_0 = *((uint16_t *) (row_ptr_m1 + x - 1)); + row_0 = ((__UXTB_RORn(row_0, 8) * 0x0101) << 16) | row_0; + row_1 = *((uint16_t *) (row_ptr + x - 1)); + row_1 = ((__UXTB_RORn(row_1, 8) * 0x0101) << 16) | row_1; + row_2 = *((uint16_t *) (row_ptr_p1 + x - 1)); + row_2 = ((__UXTB_RORn(row_2, 8) * 0x0101) << 16) | row_2; + } else { + // get 3 neighboring rows + row_0 = *((uint32_t *) (row_ptr_m1 + x - 1)); + row_1 = *((uint32_t *) (row_ptr + x - 1)); + row_2 = *((uint32_t *) (row_ptr_p1 + x - 1)); + } + + int32_t p0_4 = __UXTB_RORn(row_1, 8); + int32_t p0_7_1 = __PKHBT(__UXTB_RORn(row_0, 8), __UXTB_RORn(row_2, 8), 16); + int32_t pixel0 = krn_4 * p0_4; + pixel0 = __SMLAD(__UXTB16(row_0), krn_2_0, pixel0); + pixel0 = __SMLAD(__UXTB16(row_1), krn_5_3, pixel0); + pixel0 = __SMLAD(__UXTB16(row_2), krn_8_6, pixel0); + pixel0 = __SMLAD(p0_7_1, krn_7_1, pixel0); + pixel0 = (pixel0 * m_int) + b_int; + pixel0 = __USAT_ASR(pixel0, 8, 16); + + int32_t p1_4 = __UXTB_RORn(row_1, 16); + int32_t p1_7_1 = __PKHBT(__UXTB_RORn(row_0, 16), __UXTB_RORn(row_2, 16), 16); + int32_t pixel1 = krn_4 * p1_4; + pixel1 = __SMLAD(__UXTB16_RORn(row_0, 8), krn_2_0, pixel1); + pixel1 = __SMLAD(__UXTB16_RORn(row_1, 8), krn_5_3, pixel1); + pixel1 = __SMLAD(__UXTB16_RORn(row_2, 8), krn_8_6, pixel1); + pixel1 = __SMLAD(p1_7_1, krn_7_1, pixel1); + pixel1 = (pixel1 * m_int) + b_int; + pixel1 = __USAT_ASR(pixel1, 8, 16); + + // Re-pack to make thresholding faster. + int32_t p1_p0 = __PKHBT(pixel0, pixel1, 16); + + if (threshold) { + p1_p0 = __SSUB16(__SSUB16(p1_p0, offset_int), __PKHBT(p0_4, p1_4, 16)); + p1_p0 = __SEL(invert_ge, invert_lt); + } + + IMAGE_PUT_GRAYSCALE_PIXEL_FAST(buf_row_ptr, x, p1_p0); + + if (x != (img->w - 1)) { + IMAGE_PUT_GRAYSCALE_PIXEL_FAST(buf_row_ptr, x + 1, p1_p0 >> 16); + } + } + #endif + } else { + for (int x = 0; x < img->w; x++) { + if (mask && (!image_get_mask_pixel(mask, x, y))) { + int p = IMAGE_GET_GRAYSCALE_PIXEL_FAST(row_ptr, x); + IMAGE_PUT_GRAYSCALE_PIXEL_FAST(buf_row_ptr, x, p); + continue; // Short circuit. + } + + int32_t acc = 0, ptr = 0; + + if (x >= ksize && x < img->w - ksize && y >= ksize && y < img->h - ksize) { + for (int j = -ksize; j <= ksize; j++) { + uint8_t *k_row_ptr = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(img, y + j); + for (int k = -ksize; k <= ksize; k++) { + acc += krn[ptr++] * IMAGE_GET_GRAYSCALE_PIXEL_FAST(k_row_ptr, x + k); + } + } + } else { + for (int j = -ksize; j <= ksize; j++) { + int y_j = IM_MIN(IM_MAX(y + j, 0), (img->h - 1)); + uint8_t *k_row_ptr = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(img, y_j); + + for (int k = -ksize; k <= ksize; k++) { + int x_k = IM_MIN(IM_MAX(x + k, 0), (img->w - 1)); + acc += krn[ptr++] * IMAGE_GET_GRAYSCALE_PIXEL_FAST(k_row_ptr, x_k); + } } } - } - int32_t tmp = (acc * m_int) >> 16; - int pixel = tmp + b; - if (pixel > COLOR_GRAYSCALE_MAX) { - pixel = COLOR_GRAYSCALE_MAX; - } else if (pixel < 0) { - pixel = 0; - } - if (threshold) { - if (((pixel - offset) < IMAGE_GET_GRAYSCALE_PIXEL_FAST(row_ptr, x)) ^ invert) { - pixel = COLOR_GRAYSCALE_BINARY_MAX; - } else { - pixel = COLOR_GRAYSCALE_BINARY_MIN; + int32_t tmp = (acc * m_int) + b_int; + int pixel = __USAT_ASR(tmp, 8, 16); + + if (threshold) { + pixel -= offset; + pixel = pixel < IMAGE_GET_GRAYSCALE_PIXEL_FAST(row_ptr, x); + pixel = (pixel ^ invert) * COLOR_GRAYSCALE_BINARY_MAX; } - } - IMAGE_PUT_GRAYSCALE_PIXEL_FAST(buf_row_ptr, x, pixel); + IMAGE_PUT_GRAYSCALE_PIXEL_FAST(buf_row_ptr, x, pixel); + } } if (y >= ksize) { @@ -1383,7 +1488,7 @@ void imlib_morph(image_t *img, } // Copy any remaining lines from the buffer image... - for (int y = IM_MAX(img->h - ksize, 0), yy = img->h; y < yy; y++) { + for (int y = IM_MAX(img->h - ksize, 0); y < img->h; y++) { memcpy(IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(img, y), IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(&buf, (y % brows)), IMAGE_GRAYSCALE_LINE_LEN_BYTES(img)); @@ -1395,75 +1500,232 @@ void imlib_morph(image_t *img, case PIXFORMAT_RGB565: { buf.data = fb_alloc(IMAGE_RGB565_LINE_LEN_BYTES(img) * brows, FB_ALLOC_NO_HINT); - for (int y = 0, yy = img->h; y < yy; y++) { + #if defined(ARM_MATH_DSP) + int32_t krn_5, krn_1_0, krn_4_3, krn_7_6, krn_8_2, offset_int, invert_ge, invert_lt; + if (ksize == 1) { + krn_5 = krn[5]; + krn_1_0 = __PKHBT(krn[0], krn[1], 16); + krn_4_3 = __PKHBT(krn[3], krn[4], 16); + krn_7_6 = __PKHBT(krn[6], krn[7], 16); + krn_8_2 = __PKHBT(krn[2], krn[8], 16); + offset_int = __PKHBT(offset, offset, 16); + invert_ge = invert ? 0xFFFFFFFF : 0x00000000; + invert_lt = invert ? 0x00000000 : 0xFFFFFFFF; + } + #endif + + for (int y = 0; y < img->h; y++) { uint16_t *row_ptr = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(img, y); uint16_t *buf_row_ptr = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(&buf, (y % brows)); - for (int x = 0, xx = img->w; x < xx; x++) { - if (mask && (!image_get_mask_pixel(mask, x, y))) { - IMAGE_PUT_RGB565_PIXEL_FAST(buf_row_ptr, x, IMAGE_GET_RGB565_PIXEL_FAST(row_ptr, x)); - continue; // Short circuit. - } + if (0) { + #if defined(ARM_MATH_DSP) + } else if (0 && (ksize == 1) && (!mask)) { + uint16_t *row_ptr_m1, *row_ptr_p1; - int32_t tmp, r_acc = 0, g_acc = 0, b_acc = 0, ptr = 0; - - if (x >= ksize && x < img->w - ksize && y >= ksize && y < img->h - ksize) { - for (int j = -ksize; j <= ksize; j++) { - uint16_t *k_row_ptr = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(img, y + j); - for (int k = -ksize; k <= ksize; k++) { - int pixel = IMAGE_GET_RGB565_PIXEL_FAST(k_row_ptr, x + k); - r_acc += krn[ptr] * COLOR_RGB565_TO_R5(pixel); - g_acc += krn[ptr] * COLOR_RGB565_TO_G6(pixel); - b_acc += krn[ptr++] * COLOR_RGB565_TO_B5(pixel); - } - } + if (y == 0) { + row_ptr_m1 = row_ptr; + row_ptr_p1 = row_ptr + ((img->h >= 2) ? img->w : 0); + } else if (y >= (img->h - 1)) { + row_ptr_m1 = row_ptr - img->w; + row_ptr_p1 = row_ptr; } else { - for (int j = -ksize; j <= ksize; j++) { - uint16_t *k_row_ptr = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(img, - IM_MIN(IM_MAX(y + j, 0), (img->h - 1))); - for (int k = -ksize; k <= ksize; k++) { - int pixel = IMAGE_GET_RGB565_PIXEL_FAST(k_row_ptr, - IM_MIN(IM_MAX(x + k, 0), (img->w - 1))); - r_acc += krn[ptr] * COLOR_RGB565_TO_R5(pixel); - g_acc += krn[ptr] * COLOR_RGB565_TO_G6(pixel); - b_acc += krn[ptr++] * COLOR_RGB565_TO_B5(pixel); + // get 2 neighboring rows + row_ptr_m1 = row_ptr - img->w; + row_ptr_p1 = row_ptr + img->w; + } + + // If the image is an odd width this will go for the last loop and we drop the last column. + for (int x = 0; x < img->w; x += 2) { + uint32_t row_0[2], row_1[2], row_2[2]; + + if (x == 0) { + row_0[0] = *row_ptr_m1 * 0x10001; + row_1[0] = *row_ptr * 0x10001; + row_2[0] = *row_ptr_p1 * 0x10001; + if (img->w >= 3) { + row_0[1] = *((uint32_t *) (row_ptr_m1 + 1)); + row_1[1] = *((uint32_t *) (row_ptr + 1)); + row_2[1] = *((uint32_t *) (row_ptr_p1 + 1)); + } else if (img->w >= 2) { + row_0[1] = row_ptr_m1[1] * 0x10001; + row_1[1] = row_ptr[1] * 0x10001; + row_2[1] = row_ptr_p1[1] * 0x10001; + } else { + row_0[1] = row_0[0]; + row_1[1] = row_1[0]; + row_2[1] = row_2[0]; + } + } else if (x == (img->w - 2)) { + row_0[0] = *((uint32_t *) (row_ptr_m1 + x - 1)); + row_0[1] = row_ptr_m1[x + 1] * 0x10001; + row_1[0] = *((uint32_t *) (row_ptr + x - 1)); + row_1[1] = row_ptr[x + 1] * 0x10001; + row_2[0] = *((uint32_t *) (row_ptr_p1 + x - 1)); + row_2[1] = row_ptr_p1[x + 1] * 0x10001; + } else if (x >= (img->w - 1)) { + row_0[0] = *((uint32_t *) (row_ptr_m1 + x - 1)); + row_0[1] = __PKHTB(row_0[0], row_0[0], 16); + row_1[0] = *((uint32_t *) (row_ptr + x - 1)); + row_1[1] = __PKHTB(row_1[0], row_1[0], 16); + row_2[0] = *((uint32_t *) (row_ptr_p1 + x - 1)); + row_2[1] = __PKHTB(row_2[0], row_2[0], 16); + } else { + // get 3 neighboring rows + row_0[0] = *((uint32_t *) (row_ptr_m1 + x - 1)); + row_0[1] = *((uint32_t *) (row_ptr_m1 + x + 1)); + row_1[0] = *((uint32_t *) (row_ptr + x - 1)); + row_1[1] = *((uint32_t *) (row_ptr + x + 1)); + row_2[0] = *((uint32_t *) (row_ptr_p1 + x - 1)); + row_2[1] = *((uint32_t *) (row_ptr_p1 + x + 1)); + } + + int32_t p0_8_2 = __PKHBT(row_0[1], row_2[1], 16); + + int32_t p0_r_acc = ((row_1[1] >> 11) & 0x1F) * krn_5; + p0_r_acc = __SMLAD((row_0[0] >> 11) & 0x1F001F, krn_1_0, p0_r_acc); + p0_r_acc = __SMLAD((row_1[0] >> 11) & 0x1F001F, krn_4_3, p0_r_acc); + p0_r_acc = __SMLAD((row_2[0] >> 11) & 0x1F001F, krn_7_6, p0_r_acc); + p0_r_acc = __SMLAD((p0_8_2 >> 11) & 0x1F001F, krn_8_2, p0_r_acc); + p0_r_acc = (p0_r_acc * m_int) + b_int; + p0_r_acc = __USAT_ASR(p0_r_acc, 5, 16); + + int32_t p0_g_acc = ((row_1[1] >> 5) & 0x3F) * krn_5; + p0_g_acc = __SMLAD((row_0[0] >> 5) & 0x3F003F, krn_1_0, p0_g_acc); + p0_g_acc = __SMLAD((row_1[0] >> 5) & 0x3F003F, krn_4_3, p0_g_acc); + p0_g_acc = __SMLAD((row_2[0] >> 5) & 0x3F003F, krn_7_6, p0_g_acc); + p0_g_acc = __SMLAD((p0_8_2 >> 5) & 0x3F003F, krn_8_2, p0_g_acc); + p0_g_acc = (p0_g_acc * m_int) + b_int; + p0_g_acc = __USAT_ASR(p0_g_acc, 6, 16); + + int32_t p0_b_acc = (row_1[1] & 0x1F) * krn_5; + p0_b_acc = __SMLAD(row_0[0] & 0x1F001F, krn_1_0, p0_b_acc); + p0_b_acc = __SMLAD(row_1[0] & 0x1F001F, krn_4_3, p0_b_acc); + p0_b_acc = __SMLAD(row_2[0] & 0x1F001F, krn_7_6, p0_b_acc); + p0_b_acc = __SMLAD(p0_8_2 & 0x1F001F, krn_8_2, p0_b_acc); + p0_b_acc = (p0_b_acc * m_int) + b_int; + p0_b_acc = __USAT_ASR(p0_b_acc, 5, 16); + + int pixel0 = COLOR_R5_G6_B5_TO_RGB565(p0_r_acc, p0_g_acc, p0_b_acc); + + int32_t p1_8_2 = __PKHTB(row_2[1], row_0[1], 16); + int32_t p1_1_0 = (row_0[1] << 16) | (row_0[0] >> 16); + int32_t p1_4_3 = (row_1[1] << 16) | (row_1[0] >> 16); + int32_t p1_7_6 = (row_2[1] << 16) | (row_2[0] >> 16); + + int32_t p1_r_acc = (row_1[1] >> 27) * krn_5; + p1_r_acc = __SMLAD((p1_1_0 >> 11) & 0x1F001F, krn_1_0, p1_r_acc); + p1_r_acc = __SMLAD((p1_4_3 >> 11) & 0x1F001F, krn_4_3, p1_r_acc); + p1_r_acc = __SMLAD((p1_7_6 >> 11) & 0x1F001F, krn_7_6, p1_r_acc); + p1_r_acc = __SMLAD((p1_8_2 >> 11) & 0x1F001F, krn_8_2, p1_r_acc); + p1_r_acc = (p1_r_acc * m_int) + b_int; + p1_r_acc = __USAT_ASR(p1_r_acc, 5, 16); + + int32_t p1_g_acc = ((row_1[1] >> 21) & 0x3F) * krn_5; + p1_g_acc = __SMLAD((p1_1_0 >> 5) & 0x3F003F, krn_1_0, p1_g_acc); + p1_g_acc = __SMLAD((p1_4_3 >> 5) & 0x3F003F, krn_4_3, p1_g_acc); + p1_g_acc = __SMLAD((p1_7_6 >> 5) & 0x3F003F, krn_7_6, p1_g_acc); + p1_g_acc = __SMLAD((p1_8_2 >> 5) & 0x3F003F, krn_8_2, p1_g_acc); + p1_g_acc = (p1_g_acc * m_int) + b_int; + p1_g_acc = __USAT_ASR(p1_g_acc, 6, 16); + + int32_t p1_b_acc = ((row_1[1] >> 16) & 0x1F) * krn_5; + p1_b_acc = __SMLAD(p1_1_0 & 0x1F001F, krn_1_0, p1_b_acc); + p1_b_acc = __SMLAD(p1_4_3 & 0x1F001F, krn_4_3, p1_b_acc); + p1_b_acc = __SMLAD(p1_7_6 & 0x1F001F, krn_7_6, p1_b_acc); + p1_b_acc = __SMLAD(p1_8_2 & 0x1F001F, krn_8_2, p1_b_acc); + p1_b_acc = (p1_b_acc * m_int) + b_int; + p1_b_acc = __USAT_ASR(p1_b_acc, 5, 16); + + int pixel1 = COLOR_R5_G6_B5_TO_RGB565(p1_r_acc, p1_g_acc, p1_b_acc); + + // Re-pack to make thresholding faster. + int32_t p1_p0 = __PKHBT(pixel0, pixel1, 16); + + if (threshold) { + int32_t r_p = __PKHBT(p0_r_acc, p1_r_acc, 16); + int32_t g_p = __PKHBT(p0_g_acc, p1_g_acc, 16); + int32_t b_p = __PKHBT(p0_b_acc, p1_b_acc, 16); + int32_t r_l = (p1_4_3 >> 11) & 0x1F001F; + int32_t g_l = (p1_4_3 >> 5) & 0x3F003F; + int32_t b_l = p1_4_3 & 0x1F001F; + // Note, since the above values are rgb565 versus rgb888 we adjust + // the yuv transform below to account for the scale difference. + // r5 to r8 scale = (r << 3) | (r >> 2) = 8.25 ~= 255/31 + // g6 to g8 scale = (g << 2) | (g >> 4) = 4.0625 ~= 255/63 + // b5 to b8 scale = (b << 3) | (b >> 2) = 8.25 ~= 255/31 + // r -> 38 * 8.25 = 313.5 -> 313 + // g -> 75 * 4.0625 = 304.6875 -> 305 + // b -> 15 * 8.25 = 123.75 -> 124 + int y_p = __UXTB16(((r_p * 313) + (g_p * 305) + (b_p * 124)) >> 7); + int y_l = __UXTB16(((r_l * 313) + (g_l * 305) + (b_l * 124)) >> 7); + p1_p0 = __SSUB16(__SSUB16(y_p, offset_int), y_l); + p1_p0 = __SEL(invert_ge, invert_lt); + } + + if (x == (img->w - 1)) { + // just put bottom + IMAGE_PUT_RGB565_PIXEL_FAST(buf_row_ptr, x, p1_p0); + } else { + // put both + *((uint32_t *) (buf_row_ptr + x)) = p1_p0; + } + } + #endif + } else { + for (int x = 0; x < img->w; x++) { + if (mask && (!image_get_mask_pixel(mask, x, y))) { + int p = IMAGE_GET_RGB565_PIXEL_FAST(row_ptr, x); + IMAGE_PUT_RGB565_PIXEL_FAST(buf_row_ptr, x, p); + continue; // Short circuit. + } + + int32_t r_acc = 0, g_acc = 0, b_acc = 0, ptr = 0; + + if (x >= ksize && x < img->w - ksize && y >= ksize && y < img->h - ksize) { + for (int j = -ksize; j <= ksize; j++) { + uint16_t *k_row_ptr = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(img, y + j); + for (int k = -ksize; k <= ksize; k++) { + int pixel = IMAGE_GET_RGB565_PIXEL_FAST(k_row_ptr, x + k); + r_acc += krn[ptr] * COLOR_RGB565_TO_R5(pixel); + g_acc += krn[ptr] * COLOR_RGB565_TO_G6(pixel); + b_acc += krn[ptr++] * COLOR_RGB565_TO_B5(pixel); + } + } + } else { + for (int j = -ksize; j <= ksize; j++) { + int y_j = IM_MIN(IM_MAX(y + j, 0), (img->h - 1)); + uint16_t *k_row_ptr = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(img, y_j); + + for (int k = -ksize; k <= ksize; k++) { + int x_k = IM_MIN(IM_MAX(x + k, 0), (img->w - 1)); + int pixel = IMAGE_GET_RGB565_PIXEL_FAST(k_row_ptr, x_k); + r_acc += krn[ptr] * COLOR_RGB565_TO_R5(pixel); + g_acc += krn[ptr] * COLOR_RGB565_TO_G6(pixel); + b_acc += krn[ptr++] * COLOR_RGB565_TO_B5(pixel); + } } } - } - tmp = (r_acc * m_int) >> 16; - r_acc = tmp + b; - if (r_acc > COLOR_R5_MAX) { - r_acc = COLOR_R5_MAX; - } else if (r_acc < 0) { - r_acc = 0; - } - tmp = (g_acc * m_int) >> 16; - g_acc = tmp + b; - if (g_acc > COLOR_G6_MAX) { - g_acc = COLOR_G6_MAX; - } else if (g_acc < 0) { - g_acc = 0; - } - tmp = (b_acc * m_int) >> 16; - b_acc = tmp + b; - if (b_acc > COLOR_B5_MAX) { - b_acc = COLOR_B5_MAX; - } else if (b_acc < 0) { - b_acc = 0; - } - int pixel = COLOR_R5_G6_B5_TO_RGB565(r_acc, g_acc, b_acc); + int32_t r_tmp = (r_acc * m_int) + b_int; + int r_pixel = __USAT_ASR(r_tmp, 5, 16); - if (threshold) { - if (((COLOR_RGB565_TO_Y(pixel) - offset) < - COLOR_RGB565_TO_Y(IMAGE_GET_RGB565_PIXEL_FAST(row_ptr, x))) ^ invert) { - pixel = COLOR_RGB565_BINARY_MAX; - } else { - pixel = COLOR_RGB565_BINARY_MIN; + int32_t g_tmp = (g_acc * m_int) + b_int; + int g_pixel = __USAT_ASR(g_tmp, 6, 16); + + int32_t b_tmp = (b_acc * m_int) + b_int; + int b_pixel = __USAT_ASR(b_tmp, 5, 16); + + int pixel = COLOR_R5_G6_B5_TO_RGB565(r_pixel, g_pixel, b_pixel); + + if (threshold) { + pixel = COLOR_RGB565_TO_Y(pixel) - offset; + pixel = pixel < COLOR_RGB565_TO_Y(IMAGE_GET_RGB565_PIXEL_FAST(row_ptr, x)); + pixel = (pixel ^ invert) * COLOR_RGB565_BINARY_MAX; } - } - IMAGE_PUT_RGB565_PIXEL_FAST(buf_row_ptr, x, pixel); + IMAGE_PUT_RGB565_PIXEL_FAST(buf_row_ptr, x, pixel); + } } if (y >= ksize) { @@ -1475,7 +1737,7 @@ void imlib_morph(image_t *img, } // Copy any remaining lines from the buffer image... - for (int y = IM_MAX(img->h - ksize, 0), yy = img->h; y < yy; y++) { + for (int y = IM_MAX(img->h - ksize, 0); y < img->h; y++) { memcpy(IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(img, y), IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(&buf, (y % brows)), IMAGE_RGB565_LINE_LEN_BYTES(img));