From f9e79d1d7b24e4904edefa182a3222303e03816d Mon Sep 17 00:00:00 2001 From: "Kwabena W. Agyeman" Date: Thu, 8 Aug 2024 19:59:28 -0700 Subject: [PATCH 1/2] imlib: Optimize debayering with Helium. --- src/omv/boards/OPENMV_RT1060/imlib_config.h | 4 + src/omv/imlib/bayer.c | 3170 ++++++++++++++----- src/omv/imlib/imlib.h | 2 + src/omv/imlib/jpege.c | 667 +--- src/omv/imlib/simd.h | 1781 +++++++++++ 5 files changed, 4136 insertions(+), 1488 deletions(-) create mode 100644 src/omv/imlib/simd.h diff --git a/src/omv/boards/OPENMV_RT1060/imlib_config.h b/src/omv/boards/OPENMV_RT1060/imlib_config.h index 367621d77..c54c34a96 100644 --- a/src/omv/boards/OPENMV_RT1060/imlib_config.h +++ b/src/omv/boards/OPENMV_RT1060/imlib_config.h @@ -148,4 +148,8 @@ // Stereo Imaging // #define IMLIB_ENABLE_STEREO_DISPARITY + +// Bayer +#define IMLIB_ENABLE_DEBAYER_OPTIMIZATION + #endif //__IMLIB_CONFIG_H__ diff --git a/src/omv/imlib/bayer.c b/src/omv/imlib/bayer.c index ec9cba7bf..22a0126ab 100644 --- a/src/omv/imlib/bayer.c +++ b/src/omv/imlib/bayer.c @@ -1,14 +1,22 @@ /* * This file is part of the OpenMV project. * - * Copyright (c) 2013-2021 Ibrahim Abdelkader - * Copyright (c) 2013-2021 Kwabena W. Agyeman + * Copyright (c) 2013-2024 Ibrahim Abdelkader + * Copyright (c) 2013-2024 Kwabena W. Agyeman * * This work is licensed under the MIT license, see the file LICENSE for details. * * Debayering Functions */ #include "imlib.h" +#include "simd.h" + +#define VBAYER_Y_STRIDE (2) +#define VBAYER_X_STRIDE ((UINT8_VECTOR_SIZE) / 2) +#define VBAYER_X_STRIDE_2X2 ((VBAYER_X_STRIDE) * 4) + +#define VBAYER_BUF_KSIZE ((VBAYER_Y_STRIDE) * 2) +#define VBAYER_BUF_BROWS ((VBAYER_Y_STRIDE) * 4) pixformat_t imlib_bayer_shift(pixformat_t pixfmt, int x, int y, bool transpose) { bool shift_right = x % 2; @@ -22,6 +30,8 @@ pixformat_t imlib_bayer_shift(pixformat_t pixfmt, int x, int y, bool transpose) return transpose ? PIXFORMAT_BAYER_GRBG : PIXFORMAT_BAYER_GBRG; } else if (shift_down) { return transpose ? PIXFORMAT_BAYER_GBRG : PIXFORMAT_BAYER_GRBG; + } else { + return pixfmt; } } case PIXFORMAT_BAYER_GBRG: { @@ -31,6 +41,8 @@ pixformat_t imlib_bayer_shift(pixformat_t pixfmt, int x, int y, bool transpose) return PIXFORMAT_BAYER_BGGR; } else if (shift_down) { return PIXFORMAT_BAYER_RGGB; + } else { + return pixfmt; } } case PIXFORMAT_BAYER_GRBG: { @@ -40,6 +52,8 @@ pixformat_t imlib_bayer_shift(pixformat_t pixfmt, int x, int y, bool transpose) return PIXFORMAT_BAYER_RGGB; } else if (shift_down) { return PIXFORMAT_BAYER_BGGR; + } else { + return pixfmt; } } case PIXFORMAT_BAYER_RGGB: { @@ -49,889 +63,2387 @@ pixformat_t imlib_bayer_shift(pixformat_t pixfmt, int x, int y, bool transpose) return transpose ? PIXFORMAT_BAYER_GBRG : PIXFORMAT_BAYER_GRBG; } else if (shift_down) { return transpose ? PIXFORMAT_BAYER_GRBG : PIXFORMAT_BAYER_GBRG; + } else { + return pixfmt; } } default: { - return pixfmt; + __builtin_unreachable(); } } } -void imlib_debayer_line(int x_start, int x_end, int y_row, void *dst_row_ptr, pixformat_t pixfmt, image_t *src) { - int src_w = src->w, w_limit = src_w - 1, w_limit_m_1 = w_limit - 1; - int src_h = src->h, h_limit = src_h - 1, h_limit_m_1 = h_limit - 1; +// Row vectors are loaded into memory and processed in little-endian order. +// row_0 stores MSB [G1, R1, G0, R0] LSB pixels where each pixel is 8-bits. +// row_1 stores MSB [B1, G3, B0, G2] LSB pixels where each pixel is 8-bits. +// row_2 stores MSB [G5, R3, G4, R2] LSB pixels where each pixel is 8-bits. +// In the case of vectors larger than 32-bits the pattern is repeated for every 32-bits. +// +// vdebayer_bggr produces two output pixels per 32-bits: +// pixels.r = MSB [0, R@G3, 0, R@B0] LSB pixels where each pixel is 8-bits. +// pixels.g = MSB [0, G@G3, 0, G@B0] LSB pixels where each pixel is 8-bits. +// pixels.b = MSB [0, B@G3, 0, B@B0] LSB pixels where each pixel is 8-bits. +static inline vrgb_pixels_t vdebayer_bggr(v128_t row_0, v128_t row_1, v128_t row_2) { + vrgb_pixels_t pixels; + v128_t row_02 = vhadd_u8(row_0, row_2); + // row_02 = [(G1+G5)/2, (R1+R3)/2, (G0+G4)/2, (R0+R2)/2] + v128_t row_11 = vhadd_u8(row_1, vpkhtb(row_1, row_1)); + // row_11 = [(B1+B1)/2, (G3+G3)/2, (B0+B1)/2, (G2+G3)/2] + // row_11 = [B1, G3, (B0+B1)/2, (G2+G3)/2] + v128_t t_r_pixels = vhadd_u8(row_02, vpkhtb(row_02, row_02)); + // t_r_pixels = [(G1+G5+G1+G5)/4, (R1+R3+R1+R3)/4, (G1+G5+G0+G4)/4, (R1+R3+R0+R2)/4] + // t_r_pixels = [(G1+G5)/2, R@G3, G@G3, R@B0] + pixels.r = vuxtb16(t_r_pixels); + // pixels.r = [0, R@G3, 0, R@B0] + v128_t t_g_pixels = vhadd_u8(row_11, vpkhtb_ror8(row_11, row_02)); + // t_g_pixels = [(B1+B1)/2, (G3+G3)/2, (B0+B1+R1+R3)/4, (G2+G3+G0+G4)/4] + // t_g_pixels = [B@B1, G@G3, (B0+B1+R1+R3)/4, G@B0] + pixels.g = vuxtb16(t_g_pixels); + // pixels.g = [0, G@G3, 0, G@B0] + v128_t t_b_pixels = vhadd_u8(row_1, vpkhbt(row_1, row_1)); + // t_b_pixels = [(B1+B0)/2, (G3+G2)/2, (B0+B0)/2, (G2+G2)/2] + // t_b_pixels = [B@G3, (G3+G2)/2, B@B0, G@G2] + pixels.b = vuxtb16_ror8(t_b_pixels); + // pixels.b = [0, B@G3, 0, B@B0] + return pixels; +} - int y_row_odd = y_row & 1; - int y = (y_row / 2) * 2; - uint8_t *rowptr_grgr_0, *rowptr_bgbg_1, *rowptr_grgr_2, *rowptr_bgbg_3; +// Row vectors are loaded into memory and processed in little-endian order. +// row_0 stores MSB [R1, G1, R0, G0] LSB pixels where each pixel is 8-bits. +// row_1 stores MSB [G3, B1, G2, B0] LSB pixels where each pixel is 8-bits. +// row_2 stores MSB [R3, G5, R2, G4] LSB pixels where each pixel is 8-bits. +// In the case of vectors larger than 32-bits the pattern is repeated for every 32-bits. +// +// vdebayer_gbrg produces two output pixels per 32-bits: +// pixels.r = MSB [0, R@B1, 0, R@G2] LSB pixels where each pixel is 8-bits. +// pixels.g = MSB [0, G@B1, 0, G@G2] LSB pixels where each pixel is 8-bits. +// pixels.b = MSB [0, B@B1, 0, B@G2] LSB pixels where each pixel is 8-bits. +static inline vrgb_pixels_t vdebayer_gbrg(v128_t row_0, v128_t row_1, v128_t row_2) { + vrgb_pixels_t pixels; + v128_t row_02 = vhadd_u8(row_0, row_2); + // row_02 = [(R1+R3)/2, (G1+G5)/2, (R0+R2)/2, (G0+G4)/2] + v128_t row_11 = vhadd_u8(row_1, vpkhbt(row_1, row_1)); + // row_11 = [(G3+G2)/2, (B1+B0)/2, (G2+G2)/2, (B0+B0)/2] + // row_11 = [(G3+G2)/2, B@G2, G@G2, B@B0] + v128_t t_r_pixels = vhadd_u8(row_02, vpkhbt(row_02, row_02)); + // t_r_pixels = [(R1+R3+R0+R2)/4, (G1+G5+G0+G4)/4, (R0+R2+R0+R2)/4, (G0+G4+G0+G4)/4] + // t_r_pixels = [R@B1, (G1+G5+G0+G4)/4, R@G2, (G0+G4)/2] + pixels.r = vuxtb16_ror8(t_r_pixels); + // pixels.r = [0, R@B1, 0, R@G2] + v128_t t_g_pixels = vhadd_u8(row_11, vpkhbt_ror8(row_11, row_02)); + // t_g_pixels = [(G3+G2+G1+G5)/4, (B1+B0+R0+R2)/4, (G2+G2+G2+G2)/4, (B0+B0+B0+B0)/2] + // t_g_pixels = [G@B1, (B1+B0+R0+R2)/4, G@G2, B@B0] + pixels.g = vuxtb16_ror8(t_g_pixels); + // pixels.g = [0, G@B1, 0, G@G2] + v128_t t_b_pixels = vhadd_u8(row_1, vpkhtb(row_1, row_1)); + // t_b_pixels = [(G3+G3)/2, (B1+B1)/2, (G2+G3)/2, (B0+B1)/2] + // t_b_pixels = [(G3+G3)/2, B@B1, (G2+G3)/2, B@G2] + pixels.b = vuxtb16(t_b_pixels); + // pixels.b = [0, B@B1, 0, B@G2] + return pixels; +} + +// Row vectors are loaded into memory and processed in little-endian order. +// row_0 stores MSB [B1, G1, B0, G0] LSB pixels where each pixel is 8-bits. +// row_1 stores MSB [G3, R3, G2, R0] LSB pixels where each pixel is 8-bits. +// row_2 stores MSB [B3, G5, B2, G4] LSB pixels where each pixel is 8-bits. +// In the case of vectors larger than 32-bits the pattern is repeated for every 32-bits. +// +// vdebayer_grbg produces two output pixels per 32-bits: +// pixels.r = MSB [0, R@R3, 0, R@G2] LSB pixels where each pixel is 8-bits. +// pixels.g = MSB [0, G@R3, 0, G@G2] LSB pixels where each pixel is 8-bits. +// pixels.b = MSB [0, B@R3, 0, B@G2] LSB pixels where each pixel is 8-bits. +static inline vrgb_pixels_t vdebayer_grbg(v128_t row_0, v128_t row_1, v128_t row_2) { + vrgb_pixels_t pixels; + v128_t row_02 = vhadd_u8(row_0, row_2); + // row_02 = [(B1+B3)/2, (G1+G5)/2, (B0+B2)/2, (G0+G4)/2] + v128_t row_11 = vhadd_u8(row_1, vpkhbt(row_1, row_1)); + // row_11 = [(G3+G2)/2, (R3+R0)/2, (G2+G2)/2, (R0+R0)/2] + // row_11 = [(G3+G2)/2, R@G2, G@G2, R@R0] + v128_t t_r_pixels = vhadd_u8(row_1, vpkhtb(row_1, row_1)); + // t_r_pixels = [(G3+G3)/2, (R3+R3)/2, (G2+G3)/2, (R0+R3)/2] + // t_r_pixels = [(G3+G3)/2, R@R3, (G2+G3)/2, R@G2] + pixels.r = vuxtb16(t_r_pixels); + // pixels.r = [0, R@R3, 0, R@G2] + v128_t t_g_pixels = vhadd_u8(row_11, vpkhbt_ror8(row_11, row_02)); + // t_g_pixels = [(G3+G2+G1+G5)/4, (R3+R0+B0+B2)/4, (G2+G2+G2+G2)/4, (R0+R0+R0+R0)/2] + // t_g_pixels = [G@R3, (R3+R0+B0+B2)/4, G@G2, R@R0] + pixels.g = vuxtb16_ror8(t_g_pixels); + // pixels.g = [0, G@R3, 0, G@G2] + v128_t t_b_pixels = vhadd_u8(row_02, vpkhbt(row_02, row_02)); + // t_b_pixels = [(B1+B3+B0+B2)/4, (G1+G5+G0+G4)/4, (B0+B2+B0+B2)/4, (G0+G4+G0+G4)/2] + // t_b_pixels = [B@R3, (G1+G5+G0+G4)/4, B@G2, (G0+G4)/2] + pixels.b = vuxtb16_ror8(t_b_pixels); + // pixels.b = [0, B@R3, 0, B@G2] + return pixels; +} + +// Row vectors are loaded into memory and processed in little-endian order. +// row_0 stores MSB [G1, B1, G0, B0] LSB pixels where each pixel is 8-bits. +// row_1 stores MSB [R1, G3, R0, G2] LSB pixels where each pixel is 8-bits. +// row_2 stores MSB [G5, B3, G4, B2] LSB pixels where each pixel is 8-bits. +// In the case of vectors larger than 32-bits the pattern is repeated for every 32-bits. +// +// vdebayer_rggb produces two output pixels per 32-bits: +// pixels.r = MSB [0, R@G3, 0, R@R0] LSB pixels where each pixel is 8-bits. +// pixels.g = MSB [0, G@G3, 0, G@R0] LSB pixels where each pixel is 8-bits. +// pixels.b = MSB [0, B@G3, 0, B@R0] LSB pixels where each pixel is 8-bits. +static inline vrgb_pixels_t vdebayer_rggb(v128_t row_0, v128_t row_1, v128_t row_2) { + vrgb_pixels_t pixels; + v128_t row_02 = vhadd_u8(row_0, row_2); + // row_02 = [(G1+G5)/2, (B1+B3)/2, (G0+G4)/2, (B0+B2)/2] + v128_t row_11 = vhadd_u8(row_1, vpkhtb(row_1, row_1)); + // row_11 = [(R1+R1)/2, (G3+G3)/2, (R0+R1)/2, (G2+G3)/2] + // row_11 = [R@R1, G@G3, R@G3, (G2+G3)/2] + v128_t t_r_pixels = vhadd_u8(row_1, vpkhbt(row_1, row_1)); + // t_r_pixels = [(R1+R0)/2, (G3+G2)/2, (R0+R0)/2, (G2+G2)/2] + // t_r_pixels = [R@G3, (G3+G2)/2, R@R0, G@G2] + pixels.r = vuxtb16_ror8(t_r_pixels); + // pixels.r = [0, R@G3, 0, R@R0] + v128_t t_g_pixels = vhadd_u8(row_11, vpkhtb_ror8(row_11, row_02)); + // t_g_pixels = [(R1+R1+R1+R1)/4, (G3+G3+G3+G3)/4, (R0+R1+B1+B3)/4, (G2+G3+G0+G4)/4] + // t_g_pixels = [R@R1, G@G3, (R0+R1+B1+B3)/4, G@R0] + pixels.g = vuxtb16(t_g_pixels); + // pixels.g = [0, G@G3, 0, G@R0] + v128_t t_b_pixels = vhadd_u8(row_02, vpkhtb(row_02, row_02)); + // t_b_pixels = [(G1+G5+G1+G5)/4, (B1+B3+B1+B3)/4, (G0+G4+G1+G5)/4, (B0+B2+B1+B3)/4] + // t_b_pixels = [(G1+G5)/2, B@G3, (G0+G4+G1+G5)/4, B@R0] + pixels.b = vuxtb16(t_b_pixels); + // pixels.b = [0, B@G3, 0, B@R0] + return pixels; +} + +#if !defined(IMLIB_ENABLE_DEBAYER_OPTIMIZATION) +static inline vrgb_pixels_t vdebayer_all_0(image_t *src, v128_t row_0, v128_t row_1, v128_t row_2) { + switch (src->pixfmt) { + case PIXFORMAT_BAYER_BGGR: { + return vdebayer_bggr(row_0, row_1, row_2); + } + case PIXFORMAT_BAYER_GBRG: { + return vdebayer_gbrg(row_0, row_1, row_2); + } + case PIXFORMAT_BAYER_GRBG: { + return vdebayer_grbg(row_0, row_1, row_2); + } + case PIXFORMAT_BAYER_RGGB: { + return vdebayer_rggb(row_0, row_1, row_2); + } + default: { + __builtin_unreachable(); + } + } +} + +static inline vrgb_pixels_t vdebayer_all_1(image_t *src, v128_t row_0, v128_t row_1, v128_t row_2) { + switch (src->pixfmt) { + case PIXFORMAT_BAYER_BGGR: { + // PIXFORMAT_BAYER_BGGR shifted down by 1 becomes PIXFORMAT_BAYER_GRBG + return vdebayer_grbg(row_0, row_1, row_2); + } + case PIXFORMAT_BAYER_GBRG: { + // PIXFORMAT_BAYER_GBRG shifted down by 1 becomes PIXFORMAT_BAYER_RGGB + return vdebayer_rggb(row_0, row_1, row_2); + } + case PIXFORMAT_BAYER_GRBG: { + // PIXFORMAT_BAYER_GRBG shifted down by 1 becomes PIXFORMAT_BAYER_BGGR + return vdebayer_bggr(row_0, row_1, row_2); + } + case PIXFORMAT_BAYER_RGGB: { + // PIXFORMAT_BAYER_RGGB shifted down by 1 becomes PIXFORMAT_BAYER_GBRG + return vdebayer_gbrg(row_0, row_1, row_2); + } + default: { + __builtin_unreachable(); + } + } +} +#endif // !IMLIB_ENABLE_DEBAYER_OPTIMIZATION + +// Note that the loaded pointers are shifted to up by 1 to account for the offset +// created by debayering the image. +static inline v4x_row_ptrs_t vdebayer_rowptrs_init(const image_t *src, int32_t y) { + v4x_row_ptrs_t rowptrs; // keep row pointers in bounds if (y == 0) { - rowptr_bgbg_1 = src->data; - rowptr_grgr_2 = rowptr_bgbg_1 + ((src_h >= 2) ? src_w : 0); - rowptr_bgbg_3 = rowptr_bgbg_1 + ((src_h >= 3) ? (src_w * 2) : 0); - rowptr_grgr_0 = rowptr_grgr_2; - } else if (y == h_limit_m_1) { - rowptr_grgr_0 = src->data + ((y - 1) * src_w); - rowptr_bgbg_1 = rowptr_grgr_0 + src_w; - rowptr_grgr_2 = rowptr_bgbg_1 + src_w; - rowptr_bgbg_3 = rowptr_bgbg_1; - } else if (y >= h_limit) { - rowptr_grgr_0 = src->data + ((y - 1) * src_w); - rowptr_bgbg_1 = rowptr_grgr_0 + src_w; - rowptr_grgr_2 = rowptr_grgr_0; - rowptr_bgbg_3 = rowptr_bgbg_1; + rowptrs.p1.u8 = src->data; + rowptrs.p2.u8 = rowptrs.p1.u8 + ((src->h >= 2) ? src->w : 0); + rowptrs.p3.u8 = rowptrs.p1.u8 + ((src->h >= 3) ? (src->w * 2) : 0); + rowptrs.p0.u8 = rowptrs.p2.u8; + } else if (y == (src->h - 2)) { + rowptrs.p0.u8 = src->data + ((y - 1) * src->w); + rowptrs.p1.u8 = rowptrs.p0.u8 + src->w; + rowptrs.p2.u8 = rowptrs.p1.u8 + src->w; + rowptrs.p3.u8 = rowptrs.p1.u8; + } else if (y == (src->h - 1)) { + rowptrs.p0.u8 = src->data + ((y - 1) * src->w); + rowptrs.p1.u8 = rowptrs.p0.u8 + src->w; + rowptrs.p2.u8 = rowptrs.p0.u8; + rowptrs.p3.u8 = rowptrs.p1.u8; } else { // get 4 neighboring rows - rowptr_grgr_0 = src->data + ((y - 1) * src_w); - rowptr_bgbg_1 = rowptr_grgr_0 + src_w; - rowptr_grgr_2 = rowptr_bgbg_1 + src_w; - rowptr_bgbg_3 = rowptr_grgr_2 + src_w; + rowptrs.p0.u8 = src->data + ((y - 1) * src->w); + rowptrs.p1.u8 = rowptrs.p0.u8 + src->w; + rowptrs.p2.u8 = rowptrs.p1.u8 + src->w; + rowptrs.p3.u8 = rowptrs.p2.u8 + src->w; } - // If the image is an odd width this will go for the last loop and we drop the last column. - if (!y_row_odd) { - // even - for (int x = x_start, i = 0; x < x_end; x += 2, i += 2) { - uint32_t row_grgr_0, row_bgbg_1, row_grgr_2; + // Shift loaded pointers up by 1 to account for the offset created by debayering the image. + rowptrs.p0.u8 -= 1; + rowptrs.p1.u8 -= 1; + rowptrs.p2.u8 -= 1; + rowptrs.p3.u8 -= 1; + return rowptrs; +} - // keep pixels in bounds - if (x == 0) { - if (src_w >= 4) { - row_grgr_0 = *((uint32_t *) rowptr_grgr_0); - row_bgbg_1 = *((uint32_t *) rowptr_bgbg_1); - row_grgr_2 = *((uint32_t *) rowptr_grgr_2); - } else if (src_w >= 3) { - row_grgr_0 = *((uint16_t *) rowptr_grgr_0) | (*(rowptr_grgr_0 + 2) << 16); - row_bgbg_1 = *((uint16_t *) rowptr_bgbg_1) | (*(rowptr_bgbg_1 + 2) << 16); - row_grgr_2 = *((uint16_t *) rowptr_grgr_2) | (*(rowptr_grgr_2 + 2) << 16); - } else if (src_w >= 2) { - row_grgr_0 = *((uint16_t *) rowptr_grgr_0); - row_grgr_0 = (row_grgr_0 << 16) | row_grgr_0; - row_bgbg_1 = *((uint16_t *) rowptr_bgbg_1); - row_bgbg_1 = (row_bgbg_1 << 16) | row_bgbg_1; - row_grgr_2 = *((uint16_t *) rowptr_grgr_2); - row_grgr_2 = (row_grgr_2 << 16) | row_grgr_2; - } else { - row_grgr_0 = *(rowptr_grgr_0) * 0x01010101; - row_bgbg_1 = *(rowptr_bgbg_1) * 0x01010101; - row_grgr_2 = *(rowptr_grgr_2) * 0x01010101; - } - // The starting point needs to be offset by 1. The below patterns are actually - // rgrg, gbgb, rgrg, and gbgb. So, shift left and backfill the missing border pixel. - row_grgr_0 = (row_grgr_0 << 8) | __UXTB_RORn(row_grgr_0, 8); - row_bgbg_1 = (row_bgbg_1 << 8) | __UXTB_RORn(row_bgbg_1, 8); - row_grgr_2 = (row_grgr_2 << 8) | __UXTB_RORn(row_grgr_2, 8); - } else if (x == w_limit_m_1) { - row_grgr_0 = *((uint32_t *) (rowptr_grgr_0 + x - 2)); - row_grgr_0 = (row_grgr_0 >> 8) | ((row_grgr_0 << 8) & 0xff000000); - row_bgbg_1 = *((uint32_t *) (rowptr_bgbg_1 + x - 2)); - row_bgbg_1 = (row_bgbg_1 >> 8) | ((row_bgbg_1 << 8) & 0xff000000); - row_grgr_2 = *((uint32_t *) (rowptr_grgr_2 + x - 2)); - row_grgr_2 = (row_grgr_2 >> 8) | ((row_grgr_2 << 8) & 0xff000000); - } else if (x >= w_limit) { - row_grgr_0 = *((uint16_t *) (rowptr_grgr_0 + x - 1)); - row_grgr_0 = (row_grgr_0 << 16) | row_grgr_0; - row_bgbg_1 = *((uint16_t *) (rowptr_bgbg_1 + x - 1)); - row_bgbg_1 = (row_bgbg_1 << 16) | row_bgbg_1; - row_grgr_2 = *((uint16_t *) (rowptr_grgr_2 + x - 1)); - row_grgr_2 = (row_grgr_2 << 16) | row_grgr_2; - } else { - // get 4 neighboring rows - row_grgr_0 = *((uint32_t *) (rowptr_grgr_0 + x - 1)); - row_bgbg_1 = *((uint32_t *) (rowptr_bgbg_1 + x - 1)); - row_grgr_2 = *((uint32_t *) (rowptr_grgr_2 + x - 1)); - } +static inline v2x_row_ptrs_t vdebayer_quarter_rowptrs_init(const image_t *src, int32_t y) { + v2x_row_ptrs_t rowptrs; - int r_pixels_0, g_pixels_0, b_pixels_0; - - switch (src->pixfmt) { - case PIXFORMAT_BAYER_BGGR: { - #if defined(ARM_MATH_DSP) - int row_02 = __UHADD8(row_grgr_0, row_grgr_2); - int row_1g = __UHADD8(row_bgbg_1, __PKHTB(row_bgbg_1, row_bgbg_1, 16)); - - r_pixels_0 = __UXTB16(__UHADD8(row_02, __PKHTB(row_02, row_02, 16))); - g_pixels_0 = __UXTB16(__UHADD8(row_1g, __PKHTB(row_1g, row_02, 8))); - b_pixels_0 = __UXTB16_RORn(__UHADD8(row_bgbg_1, __PKHBT(row_bgbg_1, row_bgbg_1, 16)), 8); - #else - - int r0 = ((row_grgr_0 & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - int r2 = (((row_grgr_0 >> 16) & 0xFF) + ((row_grgr_2 >> 16) & 0xFF)) >> 1; - r_pixels_0 = (r2 << 16) | ((r0 + r2) >> 1); - - int g0 = (row_grgr_0 >> 8) & 0xFF; - int g1 = (((row_bgbg_1 >> 16) & 0xFF) + (row_bgbg_1 & 0xFF)) >> 1; - int g2 = (row_grgr_2 >> 8) & 0xFF; - g_pixels_0 = (row_bgbg_1 & 0xFF0000) | ((((g0 + g2) >> 1) + g1) >> 1); - - int b1 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_1 >> 8) & 0xFF)) >> 1; - b_pixels_0 = (b1 << 16) | ((row_bgbg_1 >> 8) & 0xFF); - - #endif - break; - } - case PIXFORMAT_BAYER_GBRG: { - #if defined(ARM_MATH_DSP) - int row_02 = __UHADD8(row_grgr_0, row_grgr_2); - int row_1g = __UHADD8(row_bgbg_1, __PKHBT(row_bgbg_1, row_bgbg_1, 16)); - - r_pixels_0 = __UXTB16_RORn(__UHADD8(row_02, __PKHBT(row_02, row_02, 16)), 8); - g_pixels_0 = __UXTB16_RORn(__UHADD8(row_1g, __PKHBT(row_1g, row_02, 8)), 8); - b_pixels_0 = __UXTB16(__UHADD8(row_bgbg_1, __PKHTB(row_bgbg_1, row_bgbg_1, 16))); - #else - - int r0 = (((row_grgr_0 >> 8) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - int r2 = (((row_grgr_0 >> 24) & 0xFF) + ((row_grgr_2 >> 24) & 0xFF)) >> 1; - r_pixels_0 = r0 | (((r0 + r2) >> 1) << 16); - - int g0 = (row_grgr_0 >> 16) & 0xFF; - int g1 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_1 >> 8) & 0xFF)) >> 1; - int g2 = (row_grgr_2 >> 16) & 0xFF; - g_pixels_0 = ((row_bgbg_1 >> 8) & 0xFF) | (((((g0 + g2) >> 1) + g1) >> 1) << 16); - - int b1 = (((row_bgbg_1 >> 16) & 0xFF) + (row_bgbg_1 & 0xFF)) >> 1; - b_pixels_0 = b1 | (row_bgbg_1 & 0xFF0000); - - #endif - break; - } - case PIXFORMAT_BAYER_GRBG: { - #if defined(ARM_MATH_DSP) - int row_02 = __UHADD8(row_grgr_0, row_grgr_2); - int row_1g = __UHADD8(row_bgbg_1, __PKHBT(row_bgbg_1, row_bgbg_1, 16)); - - r_pixels_0 = __UXTB16(__UHADD8(row_bgbg_1, __PKHTB(row_bgbg_1, row_bgbg_1, 16))); - g_pixels_0 = __UXTB16_RORn(__UHADD8(row_1g, __PKHBT(row_1g, row_02, 8)), 8); - b_pixels_0 = __UXTB16_RORn(__UHADD8(row_02, __PKHBT(row_02, row_02, 16)), 8); - #else - - int r1 = (((row_bgbg_1 >> 16) & 0xFF) + (row_bgbg_1 & 0xFF)) >> 1; - r_pixels_0 = r1 | (row_bgbg_1 & 0xFF0000); - - int g0 = (row_grgr_0 >> 16) & 0xFF; - int g1 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_1 >> 8) & 0xFF)) >> 1; - int g2 = (row_grgr_2 >> 16) & 0xFF; - g_pixels_0 = ((row_bgbg_1 >> 8) & 0xFF) | (((((g0 + g2) >> 1) + g1) >> 1) << 16); - - int b0 = (((row_grgr_0 >> 8) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - int b2 = (((row_grgr_0 >> 24) & 0xFF) + ((row_grgr_2 >> 24) & 0xFF)) >> 1; - b_pixels_0 = b0 | (((b0 + b2) >> 1) << 16); - - #endif - break; - } - case PIXFORMAT_BAYER_RGGB: { - #if defined(ARM_MATH_DSP) - int row_02 = __UHADD8(row_grgr_0, row_grgr_2); - int row_1g = __UHADD8(row_bgbg_1, __PKHTB(row_bgbg_1, row_bgbg_1, 16)); - - r_pixels_0 = __UXTB16_RORn(__UHADD8(row_bgbg_1, __PKHBT(row_bgbg_1, row_bgbg_1, 16)), 8); - g_pixels_0 = __UXTB16(__UHADD8(row_1g, __PKHTB(row_1g, row_02, 8))); - b_pixels_0 = __UXTB16(__UHADD8(row_02, __PKHTB(row_02, row_02, 16))); - #else - - int r1 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_1 >> 8) & 0xFF)) >> 1; - r_pixels_0 = (r1 << 16) | ((row_bgbg_1 >> 8) & 0xFF); - - int g0 = (row_grgr_0 >> 8) & 0xFF; - int g1 = (((row_bgbg_1 >> 16) & 0xFF) + (row_bgbg_1 & 0xFF)) >> 1; - int g2 = (row_grgr_2 >> 8) & 0xFF; - g_pixels_0 = (row_bgbg_1 & 0xFF0000) | ((((g0 + g2) >> 1) + g1) >> 1); - - int b0 = ((row_grgr_0 & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - int b2 = (((row_grgr_0 >> 16) & 0xFF) + ((row_grgr_2 >> 16) & 0xFF)) >> 1; - b_pixels_0 = (b2 << 16) | ((b0 + b2) >> 1); - - #endif - break; - } - default: { - r_pixels_0 = 0; - g_pixels_0 = 0; - b_pixels_0 = 0; - break; - } - } - - switch (pixfmt) { - case PIXFORMAT_BINARY: { - uint32_t *dst_row_ptr_32 = (uint32_t *) dst_row_ptr; - int y0 = ((r_pixels_0 * 38) + (g_pixels_0 * 75) + (b_pixels_0 * 15)) >> 7; - IMAGE_PUT_BINARY_PIXEL_FAST(dst_row_ptr_32, i, (y0 >> 7)); - - if (x != w_limit) { - IMAGE_PUT_BINARY_PIXEL_FAST(dst_row_ptr_32, i + 1, (y0 >> 23)); - } - - break; - } - case PIXFORMAT_GRAYSCALE: { - uint8_t *dst_row_ptr_8 = (uint8_t *) dst_row_ptr; - int y0 = ((r_pixels_0 * 38) + (g_pixels_0 * 75) + (b_pixels_0 * 15)) >> 7; - IMAGE_PUT_GRAYSCALE_PIXEL_FAST(dst_row_ptr_8, i, y0); - - if (x != w_limit) { - IMAGE_PUT_GRAYSCALE_PIXEL_FAST(dst_row_ptr_8, i + 1, y0 >> 16); - } - - break; - } - case PIXFORMAT_RGB565: { - uint16_t *dst_row_ptr_16 = (uint16_t *) dst_row_ptr; - int rgb565_0 = ((r_pixels_0 << 8) & 0xf800f800) | - ((g_pixels_0 << 3) & 0x07e007e0) | - ((b_pixels_0 >> 3) & 0x001f001f); - - if (x == w_limit) { - // just put bottom - IMAGE_PUT_RGB565_PIXEL_FAST(dst_row_ptr_16, i, rgb565_0); - } else { - // put both - *((uint32_t *) (dst_row_ptr_16 + i)) = rgb565_0; - } - - break; - } - default: { - break; - } - } - } + // keep row pointers in bounds + if (y == 0) { + rowptrs.p0.u8 = src->data; + rowptrs.p1.u8 = rowptrs.p0.u8 + ((src->h >= 2) ? src->w : 0); + } else if (y == (src->h - 1)) { + rowptrs.p0.u8 = src->data + (y * src->w); + rowptrs.p1.u8 = rowptrs.p0.u8 - src->w; } else { - // odd - for (int x = x_start, i = 0; x < x_end; x += 2, i += 2) { - uint32_t row_bgbg_1, row_grgr_2, row_bgbg_3; + // get 2 neighboring rows + rowptrs.p0.u8 = src->data + (y * src->w); + rowptrs.p1.u8 = rowptrs.p0.u8 + src->w; + } - // keep pixels in bounds - if (x == 0) { - if (src_w >= 4) { - row_bgbg_1 = *((uint32_t *) rowptr_bgbg_1); - row_grgr_2 = *((uint32_t *) rowptr_grgr_2); - row_bgbg_3 = *((uint32_t *) rowptr_bgbg_3); - } else if (src_w >= 3) { - row_bgbg_1 = *((uint16_t *) rowptr_bgbg_1) | (*(rowptr_bgbg_1 + 2) << 16); - row_grgr_2 = *((uint16_t *) rowptr_grgr_2) | (*(rowptr_grgr_2 + 2) << 16); - row_bgbg_3 = *((uint16_t *) rowptr_bgbg_3) | (*(rowptr_bgbg_3 + 2) << 16); - } else if (src_w >= 2) { - row_bgbg_1 = *((uint16_t *) rowptr_bgbg_1); - row_bgbg_1 = (row_bgbg_1 << 16) | row_bgbg_1; - row_grgr_2 = *((uint16_t *) rowptr_grgr_2); - row_grgr_2 = (row_grgr_2 << 16) | row_grgr_2; - row_bgbg_3 = *((uint16_t *) rowptr_bgbg_3); - row_bgbg_3 = (row_bgbg_3 << 16) | row_bgbg_3; - } else { - row_bgbg_1 = *(rowptr_bgbg_1) * 0x01010101; - row_grgr_2 = *(rowptr_grgr_2) * 0x01010101; - row_bgbg_3 = *(rowptr_bgbg_3) * 0x01010101; - } - // The starting point needs to be offset by 1. The below patterns are actually - // rgrg, gbgb, rgrg, and gbgb. So, shift left and backfill the missing border pixel. - row_bgbg_1 = (row_bgbg_1 << 8) | __UXTB_RORn(row_bgbg_1, 8); - row_grgr_2 = (row_grgr_2 << 8) | __UXTB_RORn(row_grgr_2, 8); - row_bgbg_3 = (row_bgbg_3 << 8) | __UXTB_RORn(row_bgbg_3, 8); - } else if (x == w_limit_m_1) { - row_bgbg_1 = *((uint32_t *) (rowptr_bgbg_1 + x - 2)); - row_bgbg_1 = (row_bgbg_1 >> 8) | ((row_bgbg_1 << 8) & 0xff000000); - row_grgr_2 = *((uint32_t *) (rowptr_grgr_2 + x - 2)); - row_grgr_2 = (row_grgr_2 >> 8) | ((row_grgr_2 << 8) & 0xff000000); - row_bgbg_3 = *((uint32_t *) (rowptr_bgbg_3 + x - 2)); - row_bgbg_3 = (row_bgbg_3 >> 8) | ((row_bgbg_1 << 8) & 0xff000000); - } else if (x >= w_limit) { - row_bgbg_1 = *((uint16_t *) (rowptr_bgbg_1 + x - 1)); - row_bgbg_1 = (row_bgbg_1 << 16) | row_bgbg_1; - row_grgr_2 = *((uint16_t *) (rowptr_grgr_2 + x - 1)); - row_grgr_2 = (row_grgr_2 << 16) | row_grgr_2; - row_bgbg_3 = *((uint16_t *) (rowptr_bgbg_3 + x - 1)); - row_bgbg_3 = (row_bgbg_3 << 16) | row_bgbg_3; - } else { - // get 4 neighboring rows - row_bgbg_1 = *((uint32_t *) (rowptr_bgbg_1 + x - 1)); - row_grgr_2 = *((uint32_t *) (rowptr_grgr_2 + x - 1)); - row_bgbg_3 = *((uint32_t *) (rowptr_bgbg_3 + x - 1)); - } + return rowptrs; +} - int r_pixels_1, g_pixels_1, b_pixels_1; +static inline v128_predicate_t vdebayer_load_pred(const image_t *src, int32_t x) { + // Load 1x to 4x 32-bit rows overlapping by 2 pixels. This creates a 6 pixel overlap. + return vpredicate_8(src->w - x + 6); +} - switch (src->pixfmt) { - case PIXFORMAT_BAYER_BGGR: { - #if defined(ARM_MATH_DSP) - int row_13 = __UHADD8(row_bgbg_1, row_bgbg_3); - int row_2g = __UHADD8(row_grgr_2, __PKHBT(row_grgr_2, row_grgr_2, 16)); +static inline v128_predicate_t vdebayer_store_pred(int32_t width, int32_t x) { + // For 2x to 8x 16-bit lanes. + return vpredicate_16(width - x); +} - r_pixels_1 = __UXTB16(__UHADD8(row_grgr_2, __PKHTB(row_grgr_2, row_grgr_2, 16))); - g_pixels_1 = __UXTB16_RORn(__UHADD8(row_2g, __PKHBT(row_2g, row_13, 8)), 8); - b_pixels_1 = __UXTB16_RORn(__UHADD8(row_13, __PKHBT(row_13, row_13, 16)), 8); - #else +// Loads pixels from the image into the 4 row vectors and handles the boundary conditions. +// Note that the loaded pixels are shifted to the right by 1 to account for the offset +// created by debayering the image. +static v4x_rows_t vdebayer_load_rows_inner(v4x_row_ptrs_t rowptrs, uint32_t x, v128_t offsets, v128_predicate_t pred) { + bool x_is_0 = (x == 0); - int r2 = (((row_grgr_2 >> 16) & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - r_pixels_1 = (row_grgr_2 & 0xFF0000) | r2; + // Start loading 1 pixel behind the x position and load 1 extra pixel. + if (!x_is_0) { + pred = vpredicate_8_add(pred, 1); + } else { + // Pointers are shifted back by 1 already so this undoes that shift. + x += 1; + } - int g1 = (row_bgbg_1 >> 16) & 0xFF; - int g2 = (((row_grgr_2 >> 24) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - int g3 = (row_bgbg_3 >> 16) & 0xFF; - g_pixels_1 = (((((g1 + g3) >> 1) + g2) >> 1) << 16) | ((row_grgr_2 >> 8) & 0xFF); + v4x_rows_t rows = vldr_u32_gather_pred_x4_unaligned(rowptrs, x, offsets, pred); - int b1 = (((row_bgbg_1 >> 8) & 0xFF) + ((row_bgbg_3 >> 8) & 0xFF)) >> 1; - int b3 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_3 >> 24) & 0xFF)) >> 1; - b_pixels_1 = (((b1 + b3) >> 1) << 16) | b1; + // Vector lane access must use constant offsets so we handle the boundary conditions + // using a switch statement per boundary condition. In this case the get/set functions + // compile down into 1 instruction each. + switch (vpredicate_8_get_n(pred)) { + case 1: + // MSB [0, 0, 0, G0] LSB -> MSB [G0, G0, G0, G0] LSB + rows.r0 = vset_u32(rows.r0, 0, vget_u8(rows.r0, 0) * 0x01010101); + rows.r1 = vset_u32(rows.r1, 0, vget_u8(rows.r1, 0) * 0x01010101); + rows.r2 = vset_u32(rows.r2, 0, vget_u8(rows.r2, 0) * 0x01010101); + rows.r3 = vset_u32(rows.r3, 0, vget_u8(rows.r3, 0) * 0x01010101); + break; + case 2: + // MSB [0, 0, R0, G0] LSB -> MSB [R0, G0, R0, G0] LSB + rows.r0 = vset_u16(rows.r0, 1, vget_u16(rows.r0, 0)); + rows.r1 = vset_u16(rows.r1, 1, vget_u16(rows.r1, 0)); + rows.r2 = vset_u16(rows.r2, 1, vget_u16(rows.r2, 0)); + rows.r3 = vset_u16(rows.r3, 1, vget_u16(rows.r3, 0)); + break; + case 3: + // MSB [0, G1, R0, G0] LSB -> MSB [R0, G1, R0, G0] LSB + rows.r0 = vset_u8(rows.r0, 3, vget_u8(rows.r0, 2)); + rows.r1 = vset_u8(rows.r1, 3, vget_u8(rows.r1, 2)); + rows.r2 = vset_u8(rows.r2, 3, vget_u8(rows.r2, 2)); + rows.r3 = vset_u8(rows.r3, 3, vget_u8(rows.r3, 2)); + break; + #if UINT8_VECTOR_SIZE >= 8 + case 5: + // MSB [0, 0, 0, G0] LSB -> MSB [G0, G0, G0, G0] LSB + rows.r0 = vset_u32(rows.r0, 1, vget_u8(rows.r0, 4) * 0x01010101); + rows.r1 = vset_u32(rows.r1, 1, vget_u8(rows.r1, 4) * 0x01010101); + rows.r2 = vset_u32(rows.r2, 1, vget_u8(rows.r2, 4) * 0x01010101); + rows.r3 = vset_u32(rows.r3, 1, vget_u8(rows.r3, 4) * 0x01010101); + break; + case 6: + // MSB [0, 0, R0, G0] LSB -> MSB [R0, G0, R0, G0] LSB + rows.r0 = vset_u16(rows.r0, 3, vget_u16(rows.r0, 2)); + rows.r1 = vset_u16(rows.r1, 3, vget_u16(rows.r1, 2)); + rows.r2 = vset_u16(rows.r2, 3, vget_u16(rows.r2, 2)); + rows.r3 = vset_u16(rows.r3, 3, vget_u16(rows.r3, 2)); + break; + case 7: + // MSB [0, G1, R0, G0] LSB -> MSB [R0, G1, R0, G0] LSB + rows.r0 = vset_u8(rows.r0, 7, vget_u8(rows.r0, 5)); + rows.r1 = vset_u8(rows.r1, 7, vget_u8(rows.r1, 5)); + rows.r2 = vset_u8(rows.r2, 7, vget_u8(rows.r2, 5)); + rows.r3 = vset_u8(rows.r3, 7, vget_u8(rows.r3, 5)); + break; + #endif + #if UINT8_VECTOR_SIZE >= 16 + case 9: + // MSB [0, 0, 0, G0] LSB -> MSB [G0, G0, G0, G0] LSB + rows.r0 = vset_u32(rows.r0, 2, vget_u8(rows.r0, 8) * 0x01010101); + rows.r1 = vset_u32(rows.r1, 2, vget_u8(rows.r1, 8) * 0x01010101); + rows.r2 = vset_u32(rows.r2, 2, vget_u8(rows.r2, 8) * 0x01010101); + rows.r3 = vset_u32(rows.r3, 2, vget_u8(rows.r3, 8) * 0x01010101); + break; + case 10: + // MSB [0, 0, R0, G0] LSB -> MSB [R0, G0, R0, G0] LSB + rows.r0 = vset_u16(rows.r0, 5, vget_u16(rows.r0, 4)); + rows.r1 = vset_u16(rows.r1, 5, vget_u16(rows.r1, 4)); + rows.r2 = vset_u16(rows.r2, 5, vget_u16(rows.r2, 4)); + rows.r3 = vset_u16(rows.r3, 5, vget_u16(rows.r3, 4)); + break; + case 11: + // MSB [0, G1, R0, G0] LSB -> MSB [R0, G1, R0, G0] LSB + rows.r0 = vset_u8(rows.r0, 11, vget_u8(rows.r0, 9)); + rows.r1 = vset_u8(rows.r1, 11, vget_u8(rows.r1, 9)); + rows.r2 = vset_u8(rows.r2, 11, vget_u8(rows.r2, 9)); + rows.r3 = vset_u8(rows.r3, 11, vget_u8(rows.r3, 9)); + break; + case 13: + // MSB [0, 0, 0, G0] LSB -> MSB [G0, G0, G0, G0] LSB + rows.r0 = vset_u32(rows.r0, 3, vget_u8(rows.r0, 12) * 0x01010101); + rows.r1 = vset_u32(rows.r1, 3, vget_u8(rows.r1, 12) * 0x01010101); + rows.r2 = vset_u32(rows.r2, 3, vget_u8(rows.r2, 12) * 0x01010101); + rows.r3 = vset_u32(rows.r3, 3, vget_u8(rows.r3, 12) * 0x01010101); + break; + case 14: + // MSB [0, 0, R0, G0] LSB -> MSB [R0, G0, R0, G0] LSB + rows.r0 = vset_u16(rows.r0, 7, vget_u16(rows.r0, 6)); + rows.r1 = vset_u16(rows.r1, 7, vget_u16(rows.r1, 6)); + rows.r2 = vset_u16(rows.r2, 7, vget_u16(rows.r2, 6)); + rows.r3 = vset_u16(rows.r3, 7, vget_u16(rows.r3, 6)); + break; + case 15: + // MSB [0, G1, R0, G0] LSB -> MSB [R0, G1, R0, G0] LSB + rows.r0 = vset_u8(rows.r0, 15, vget_u8(rows.r0, 13)); + rows.r1 = vset_u8(rows.r1, 15, vget_u8(rows.r1, 13)); + rows.r2 = vset_u8(rows.r2, 15, vget_u8(rows.r2, 13)); + rows.r3 = vset_u8(rows.r3, 15, vget_u8(rows.r3, 13)); + break; + #endif + #if UINT8_VECTOR_SIZE >= 32 + #error "Unsupported vector size" + #endif + default: + break; + } - #endif - break; - } - case PIXFORMAT_BAYER_GBRG: { - #if defined(ARM_MATH_DSP) - int row_13 = __UHADD8(row_bgbg_1, row_bgbg_3); - int row_2g = __UHADD8(row_grgr_2, __PKHTB(row_grgr_2, row_grgr_2, 16)); + if (x_is_0) { + // Shift loaded pixels left by 1 as if we started loading at x - 1. + // MSB [R1, G1, R0, G0] LSB -> MSB [G1, R0, G0, R0] LSB + uint32_t r0 = vget_u8(rows.r0, 1); + uint32_t r1 = vget_u8(rows.r1, 1); + uint32_t r2 = vget_u8(rows.r2, 1); + uint32_t r3 = vget_u8(rows.r3, 1); + rows.r0 = vshlc(rows.r0, &r0, 8); + rows.r1 = vshlc(rows.r1, &r1, 8); + rows.r2 = vshlc(rows.r2, &r2, 8); + rows.r3 = vshlc(rows.r3, &r3, 8); + } - r_pixels_1 = __UXTB16_RORn(__UHADD8(row_grgr_2, __PKHBT(row_grgr_2, row_grgr_2, 16)), 8); - g_pixels_1 = __UXTB16(__UHADD8(row_2g, __PKHTB(row_2g, row_13, 8))); - b_pixels_1 = __UXTB16(__UHADD8(row_13, __PKHTB(row_13, row_13, 16))); - #else + return rows; +} - int r2 = (((row_grgr_2 >> 24) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - r_pixels_1 = ((row_grgr_2 >> 8) & 0xFF) | (r2 << 16); +static inline v4x_rows_t vdebayer_load_rows(const image_t *src, v4x_row_ptrs_t rowptrs, uint32_t x, v128_t offsets) { + v128_predicate_t pred = vdebayer_load_pred(src, x); - int g1 = (row_bgbg_1 >> 8) & 0xFF; - int g2 = (((row_grgr_2 >> 16) & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - int g3 = (row_bgbg_3 >> 8) & 0xFF; - g_pixels_1 = ((((g1 + g3) >> 1) + g2) >> 1) | (row_grgr_2 & 0xFF0000); - - int b1 = ((row_bgbg_1 & 0xFF) + (row_bgbg_3 & 0xFF)) >> 1; - int b3 = (((row_bgbg_1 >> 16) & 0xFF) + ((row_bgbg_3 >> 16) & 0xFF)) >> 1; - b_pixels_1 = ((b1 + b3) >> 1) | (b3 << 16); - - #endif - break; - } - case PIXFORMAT_BAYER_GRBG: { - #if defined(ARM_MATH_DSP) - int row_13 = __UHADD8(row_bgbg_1, row_bgbg_3); - int row_2g = __UHADD8(row_grgr_2, __PKHTB(row_grgr_2, row_grgr_2, 16)); - - r_pixels_1 = __UXTB16(__UHADD8(row_13, __PKHTB(row_13, row_13, 16))); - g_pixels_1 = __UXTB16(__UHADD8(row_2g, __PKHTB(row_2g, row_13, 8))); - b_pixels_1 = __UXTB16_RORn(__UHADD8(row_grgr_2, __PKHBT(row_grgr_2, row_grgr_2, 16)), 8); - #else - - int r1 = ((row_bgbg_1 & 0xFF) + (row_bgbg_3 & 0xFF)) >> 1; - int r3 = (((row_bgbg_1 >> 16) & 0xFF) + ((row_bgbg_3 >> 16) & 0xFF)) >> 1; - r_pixels_1 = ((r1 + r3) >> 1) | (r3 << 16); - - int g1 = (row_bgbg_1 >> 8) & 0xFF; - int g2 = (((row_grgr_2 >> 16) & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - int g3 = (row_bgbg_3 >> 8) & 0xFF; - g_pixels_1 = ((((g1 + g3) >> 1) + g2) >> 1) | (row_grgr_2 & 0xFF0000); - - int b2 = (((row_grgr_2 >> 24) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - b_pixels_1 = ((row_grgr_2 >> 8) & 0xFF) | (b2 << 16); - - #endif - break; - } - case PIXFORMAT_BAYER_RGGB: { - #if defined(ARM_MATH_DSP) - int row_13 = __UHADD8(row_bgbg_1, row_bgbg_3); - int row_2g = __UHADD8(row_grgr_2, __PKHBT(row_grgr_2, row_grgr_2, 16)); - - r_pixels_1 = __UXTB16_RORn(__UHADD8(row_13, __PKHBT(row_13, row_13, 16)), 8); - g_pixels_1 = __UXTB16_RORn(__UHADD8(row_2g, __PKHBT(row_2g, row_13, 8)), 8); - b_pixels_1 = __UXTB16(__UHADD8(row_grgr_2, __PKHTB(row_grgr_2, row_grgr_2, 16))); - #else - - int r1 = (((row_bgbg_1 >> 8) & 0xFF) + ((row_bgbg_3 >> 8) & 0xFF)) >> 1; - int r3 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_3 >> 24) & 0xFF)) >> 1; - r_pixels_1 = (((r1 + r3) >> 1) << 16) | r1; - - int g1 = (row_bgbg_1 >> 16) & 0xFF; - int g2 = (((row_grgr_2 >> 24) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - int g3 = (row_bgbg_3 >> 16) & 0xFF; - g_pixels_1 = (((((g1 + g3) >> 1) + g2) >> 1) << 16) | ((row_grgr_2 >> 8) & 0xFF); - - int b2 = (((row_grgr_2 >> 16) & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - b_pixels_1 = (row_grgr_2 & 0xFF0000) | b2; - - #endif - break; - } - default: { - r_pixels_1 = 0; - g_pixels_1 = 0; - b_pixels_1 = 0; - break; - } - } - - switch (pixfmt) { - case PIXFORMAT_BINARY: { - uint32_t *dst_row_ptr_32 = (uint32_t *) dst_row_ptr; - int y1 = ((r_pixels_1 * 38) + (g_pixels_1 * 75) + (b_pixels_1 * 15)) >> 7; - IMAGE_PUT_BINARY_PIXEL_FAST(dst_row_ptr_32, i, (y1 >> 7)); - - if (x != w_limit) { - IMAGE_PUT_BINARY_PIXEL_FAST(dst_row_ptr_32, i + 1, (y1 >> 23)); - } - - break; - } - case PIXFORMAT_GRAYSCALE: { - uint8_t *dst_row_ptr_8 = (uint8_t *) dst_row_ptr; - int y1 = ((r_pixels_1 * 38) + (g_pixels_1 * 75) + (b_pixels_1 * 15)) >> 7; - IMAGE_PUT_GRAYSCALE_PIXEL_FAST(dst_row_ptr_8, i, y1); - - if (x != w_limit) { - IMAGE_PUT_GRAYSCALE_PIXEL_FAST(dst_row_ptr_8, i + 1, y1 >> 16); - } - - break; - } - case PIXFORMAT_RGB565: { - uint16_t *dst_row_ptr_16 = (uint16_t *) dst_row_ptr; - int rgb565_1 = ((r_pixels_1 << 8) & 0xf800f800) | - ((g_pixels_1 << 3) & 0x07e007e0) | - ((b_pixels_1 >> 3) & 0x001f001f); - - if (x == w_limit) { - // just put bottom - IMAGE_PUT_RGB565_PIXEL_FAST(dst_row_ptr_16, i, rgb565_1); - } else { - // put both - *((uint32_t *) (dst_row_ptr_16 + i)) = rgb565_1; - } - - break; - } - default: { - break; - } - } - } + // For the vast majority of cases we load vector size pixels at a time and exit quickly. + if ((x != 0) && vpredicate_8_all_lanes_active(pred)) { + // Start loading 1 pixel behind the x position and load 1 extra pixel. + v4x_rows_t rows; + rows.r0 = vldr_u32_gather_unaligned(rowptrs.p0.u8 + x, offsets); + rows.r1 = vldr_u32_gather_unaligned(rowptrs.p1.u8 + x, offsets); + rows.r2 = vldr_u32_gather_unaligned(rowptrs.p2.u8 + x, offsets); + rows.r3 = vldr_u32_gather_unaligned(rowptrs.p3.u8 + x, offsets); + return rows; + } else { + return vdebayer_load_rows_inner(rowptrs, x, offsets, pred); } } -// Does no bounds checking on the destination. Destination must be mutable. -void imlib_debayer_image(image_t *dst, image_t *src) { - int src_w = src->w, w_limit = src_w - 1, w_limit_m_1 = w_limit - 1; - int src_h = src->h, h_limit = src_h - 1, h_limit_m_1 = h_limit - 1; +// In the case of vectors larger than 32-bits the pattern is repeated for every 32-bits. +// +// pixels.r = MSB [0, R1, 0, R0] LSB pixels where each pixel is 8-bits. +// pixels.g = MSB [0, G1, 0, G0] LSB pixels where each pixel is 8-bits. +// pixels.b = MSB [0, B1, 0, B0] LSB pixels where each pixel is 8-bits. +// +// Y == ((R * 38) + (G * 75) + (B * 15)) / 128 +// +// Returns 2x int8_t Y (MSB [garbage, Y1, garbage, Y0] LSB) pixels for every 32-bits. +static inline v128_t vdebayer_to_y(vrgb_pixels_t pixels) { + pixels.r = vrgb_pixels_to_grayscale(pixels); + #if (OMV_JPEG_CODEC_ENABLE == 0) + pixels.r = veor_u32(pixels.r, vdup_u32(0x800080)); + #endif + return pixels.r; +} - // If the image is an odd height this will go for the last loop and we drop the last row. - for (int y = 0; y < src_h; y += 2) { - void *row_ptr_e = NULL, *row_ptr_o = NULL; +// In the case of vectors larger than 32-bits the pattern is repeated for every 32-bits. +// +// pixels.r = MSB [0, R1, 0, R0] LSB pixels where each pixel is 8-bits. +// pixels.g = MSB [0, G1, 0, G0] LSB pixels where each pixel is 8-bits. +// pixels.b = MSB [0, B1, 0, B0] LSB pixels where each pixel is 8-bits. +// +// CB == ((B * 64) - ((R * 21) + (G * 43))) / 128 +// +// Returns 2x int8_t CB (MSB [garbage, CB1, garbage, CB0] LSB) pixels for every 32-bits. +static inline v128_t vdebayer_to_cb(vrgb_pixels_t pixels) { + #if (__ARM_ARCH >= 8) + pixels.r = vmul_n_s16(pixels.r, -21); + pixels.r = vmla_n_s16(pixels.g, -43, pixels.r); + pixels.r = vmla_n_s16(pixels.b, 64, pixels.r); + #else + pixels.r = vmul_n_u32(pixels.r, 21); + pixels.r = vmla_n_u32(pixels.g, 43, pixels.r); + pixels.b = vmul_n_u32(pixels.b, 64); + pixels.r = vsub_s16(pixels.b, pixels.r); + #endif + pixels.r = vlsr_u32(pixels.r, 7); + #if (OMV_JPEG_CODEC_ENABLE == 1) + pixels.r = veor_u32(pixels.r, vdup_u32(0x800080)); + #endif + return pixels.r; +} - switch (dst->pixfmt) { - case PIXFORMAT_BINARY: { - row_ptr_e = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(dst, y); - row_ptr_o = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(dst, y + 1); - break; - } - case PIXFORMAT_GRAYSCALE: { - row_ptr_e = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(dst, y); - row_ptr_o = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(dst, y + 1); - break; - } - case PIXFORMAT_RGB565: { - row_ptr_e = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(dst, y); - row_ptr_o = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(dst, y + 1); - break; - } - } +// In the case of vectors larger than 32-bits the pattern is repeated for every 32-bits. +// +// pixels.r = MSB [0, R1, 0, R0] LSB pixels where each pixel is 8-bits. +// pixels.g = MSB [0, G1, 0, G0] LSB pixels where each pixel is 8-bits. +// pixels.b = MSB [0, B1, 0, B0] LSB pixels where each pixel is 8-bits. +// +// CR == ((R * 64) - ((G * 54) + (B * 10))) / 128 +// +// Returns 2x int8_t CR (MSB [garbage, CR1, garbage, CR0] LSB) pixels for every 32-bits. +static inline v128_t vdebayer_to_cr(vrgb_pixels_t pixels) { + #if (__ARM_ARCH >= 8) + pixels.r = vmul_n_s16(pixels.r, 64); + pixels.r = vmla_n_s16(pixels.g, -54, pixels.r); + pixels.r = vmla_n_s16(pixels.b, -10, pixels.r); + #else + pixels.r = vmul_n_u32(pixels.r, 64); + pixels.g = vmul_n_u32(pixels.g, 54); + pixels.g = vmla_n_u32(pixels.b, 10, pixels.g); + pixels.r = vsub_s16(pixels.r, pixels.g); + #endif + pixels.r = vlsr_u32(pixels.r, 7); + #if (OMV_JPEG_CODEC_ENABLE == 1) + pixels.r = veor_u32(pixels.r, vdup_u32(0x800080)); + #endif + return pixels.r; +} - uint8_t *rowptr_grgr_0, *rowptr_bgbg_1, *rowptr_grgr_2, *rowptr_bgbg_3; +// In the case of vectors larger than 32-bits the pattern is repeated for every 32-bits. +// +// pixels.r = MSB [0, R1, 0, R0] LSB pixels where each pixel is 8-bits. +// pixels.g = MSB [0, G1, 0, G0] LSB pixels where each pixel is 8-bits. +// pixels.b = MSB [0, B1, 0, B0] LSB pixels where each pixel is 8-bits. +// +// Returns the same. +static inline vrgb_pixels_t vdebayer_apply_rb_gain(vrgb_pixels_t pixels, uint32_t red_gain, uint32_t blue_gain) { + pixels.r = vusat_s16_narrow_u8_lo(pixels.r, vmul_n_u32(pixels.r, red_gain), 5); + pixels.b = vusat_s16_narrow_u8_lo(pixels.b, vmul_n_u32(pixels.b, blue_gain), 5); + return pixels; +} - // keep row pointers in bounds - if (y == 0) { - rowptr_bgbg_1 = src->data; - rowptr_grgr_2 = rowptr_bgbg_1 + ((src_h >= 2) ? src_w : 0); - rowptr_bgbg_3 = rowptr_bgbg_1 + ((src_h >= 3) ? (src_w * 2) : 0); - rowptr_grgr_0 = rowptr_grgr_2; - } else if (y == h_limit_m_1) { - rowptr_grgr_0 = src->data + ((y - 1) * src_w); - rowptr_bgbg_1 = rowptr_grgr_0 + src_w; - rowptr_grgr_2 = rowptr_bgbg_1 + src_w; - rowptr_bgbg_3 = rowptr_bgbg_1; - } else if (y >= h_limit) { - rowptr_grgr_0 = src->data + ((y - 1) * src_w); - rowptr_bgbg_1 = rowptr_grgr_0 + src_w; - rowptr_grgr_2 = rowptr_grgr_0; - rowptr_bgbg_3 = rowptr_bgbg_1; - } else { - // get 4 neighboring rows - rowptr_grgr_0 = src->data + ((y - 1) * src_w); - rowptr_bgbg_1 = rowptr_grgr_0 + src_w; - rowptr_grgr_2 = rowptr_bgbg_1 + src_w; - rowptr_bgbg_3 = rowptr_grgr_2 + src_w; - } +// In the case of vectors larger than 32-bits the pattern is repeated for every 32-bits. +// +// pixels.r = MSB [R3, R2, R1, R0] LSB pixels where each pixel is 8-bits. +// pixels.g = MSB [G3, G2, G1, G0] LSB pixels where each pixel is 8-bits. +// pixels.b = MSB [B3, B2, B2, B0] LSB pixels where each pixel is 8-bits. +// +// Stores 4x Grayscale pixels for every 32-bits. +static inline void vdebayer_store_packed_grayscale(uint8_t *p, vrgb_pixels_t packed_pixels, + uint32_t red_gain, uint32_t blue_gain, + int32_t len) { + vrgb_pixels_t pixels0 = { + .r = vuxtb16(packed_pixels.r), + .g = vuxtb16(packed_pixels.g), + .b = vuxtb16(packed_pixels.b), + }; - // If the image is an odd width this will go for the last loop and we drop the last column. - for (int x = 0; x < src_w; x += 2) { - uint32_t row_grgr_0, row_bgbg_1, row_grgr_2, row_bgbg_3; + v128_t v0 = vrgb_pixels_to_grayscale(vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain)); - // keep pixels in bounds - if (x == 0) { - if (src_w >= 4) { - row_grgr_0 = *((uint32_t *) rowptr_grgr_0); - row_bgbg_1 = *((uint32_t *) rowptr_bgbg_1); - row_grgr_2 = *((uint32_t *) rowptr_grgr_2); - row_bgbg_3 = *((uint32_t *) rowptr_bgbg_3); - } else if (src_w >= 3) { - row_grgr_0 = *((uint16_t *) rowptr_grgr_0) | (*(rowptr_grgr_0 + 2) << 16); - row_bgbg_1 = *((uint16_t *) rowptr_bgbg_1) | (*(rowptr_bgbg_1 + 2) << 16); - row_grgr_2 = *((uint16_t *) rowptr_grgr_2) | (*(rowptr_grgr_2 + 2) << 16); - row_bgbg_3 = *((uint16_t *) rowptr_bgbg_3) | (*(rowptr_bgbg_3 + 2) << 16); - } else if (src_w >= 2) { - row_grgr_0 = *((uint16_t *) rowptr_grgr_0); - row_grgr_0 = (row_grgr_0 << 16) | row_grgr_0; - row_bgbg_1 = *((uint16_t *) rowptr_bgbg_1); - row_bgbg_1 = (row_bgbg_1 << 16) | row_bgbg_1; - row_grgr_2 = *((uint16_t *) rowptr_grgr_2); - row_grgr_2 = (row_grgr_2 << 16) | row_grgr_2; - row_bgbg_3 = *((uint16_t *) rowptr_bgbg_3); - row_bgbg_3 = (row_bgbg_3 << 16) | row_bgbg_3; - } else { - row_grgr_0 = *(rowptr_grgr_0) * 0x01010101; - row_bgbg_1 = *(rowptr_bgbg_1) * 0x01010101; - row_grgr_2 = *(rowptr_grgr_2) * 0x01010101; - row_bgbg_3 = *(rowptr_bgbg_3) * 0x01010101; - } - // The starting point needs to be offset by 1. The below patterns are actually - // rgrg, gbgb, rgrg, and gbgb. So, shift left and backfill the missing border pixel. - row_grgr_0 = (row_grgr_0 << 8) | __UXTB_RORn(row_grgr_0, 8); - row_bgbg_1 = (row_bgbg_1 << 8) | __UXTB_RORn(row_bgbg_1, 8); - row_grgr_2 = (row_grgr_2 << 8) | __UXTB_RORn(row_grgr_2, 8); - row_bgbg_3 = (row_bgbg_3 << 8) | __UXTB_RORn(row_bgbg_3, 8); - } else if (x == w_limit_m_1) { - row_grgr_0 = *((uint32_t *) (rowptr_grgr_0 + x - 2)); - row_grgr_0 = (row_grgr_0 >> 8) | ((row_grgr_0 << 8) & 0xff000000); - row_bgbg_1 = *((uint32_t *) (rowptr_bgbg_1 + x - 2)); - row_bgbg_1 = (row_bgbg_1 >> 8) | ((row_bgbg_1 << 8) & 0xff000000); - row_grgr_2 = *((uint32_t *) (rowptr_grgr_2 + x - 2)); - row_grgr_2 = (row_grgr_2 >> 8) | ((row_grgr_2 << 8) & 0xff000000); - row_bgbg_3 = *((uint32_t *) (rowptr_bgbg_3 + x - 2)); - row_bgbg_3 = (row_bgbg_3 >> 8) | ((row_bgbg_1 << 8) & 0xff000000); - } else if (x >= w_limit) { - row_grgr_0 = *((uint16_t *) (rowptr_grgr_0 + x - 1)); - row_grgr_0 = (row_grgr_0 << 16) | row_grgr_0; - row_bgbg_1 = *((uint16_t *) (rowptr_bgbg_1 + x - 1)); - row_bgbg_1 = (row_bgbg_1 << 16) | row_bgbg_1; - row_grgr_2 = *((uint16_t *) (rowptr_grgr_2 + x - 1)); - row_grgr_2 = (row_grgr_2 << 16) | row_grgr_2; - row_bgbg_3 = *((uint16_t *) (rowptr_bgbg_3 + x - 1)); - row_bgbg_3 = (row_bgbg_3 << 16) | row_bgbg_3; - } else { - // get 4 neighboring rows - row_grgr_0 = *((uint32_t *) (rowptr_grgr_0 + x - 1)); - row_bgbg_1 = *((uint32_t *) (rowptr_bgbg_1 + x - 1)); - row_grgr_2 = *((uint32_t *) (rowptr_grgr_2 + x - 1)); - row_bgbg_3 = *((uint32_t *) (rowptr_bgbg_3 + x - 1)); - } + vrgb_pixels_t pixels1 = { + .r = vuxtb16_ror8(packed_pixels.r), + .g = vuxtb16_ror8(packed_pixels.g), + .b = vuxtb16_ror8(packed_pixels.b), + }; - int r_pixels_0, g_pixels_0, b_pixels_0; + v128_t v1 = vrgb_pixels_to_grayscale(vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain)); - switch (src->pixfmt) { - case PIXFORMAT_BAYER_BGGR: { - #if defined(ARM_MATH_DSP) - int row_02 = __UHADD8(row_grgr_0, row_grgr_2); - int row_1g = __UHADD8(row_bgbg_1, __PKHTB(row_bgbg_1, row_bgbg_1, 16)); + vstr_u8_pred(p, vmov_u16_narrow_u8_hi(v0, v1), vpredicate_8(len)); +} - r_pixels_0 = __UXTB16(__UHADD8(row_02, __PKHTB(row_02, row_02, 16))); - g_pixels_0 = __UXTB16(__UHADD8(row_1g, __PKHTB(row_1g, row_02, 8))); - b_pixels_0 = __UXTB16_RORn(__UHADD8(row_bgbg_1, __PKHBT(row_bgbg_1, row_bgbg_1, 16)), 8); - #else +// In the case of vectors larger than 32-bits the pattern is repeated for every 32-bits. +// +// pixels.r = MSB [R3, R2, R1, R0] LSB pixels where each pixel is 8-bits. +// pixels.g = MSB [G3, G2, G1, G0] LSB pixels where each pixel is 8-bits. +// pixels.b = MSB [B3, B2, B2, B0] LSB pixels where each pixel is 8-bits. +// +// Stores 4x RGB565 pixels for every 32-bits. +static inline void vdebayer_store_packed_rgb565(uint16_t *p, vrgb_pixels_t packed_pixels, + uint32_t red_gain, uint32_t blue_gain, + int32_t len) { + v2x_rows_t out; - int r0 = ((row_grgr_0 & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - int r2 = (((row_grgr_0 >> 16) & 0xFF) + ((row_grgr_2 >> 16) & 0xFF)) >> 1; - r_pixels_0 = (r2 << 16) | ((r0 + r2) >> 1); + vrgb_pixels_t pixels0 = { + .r = vuxtb16(packed_pixels.r), + .g = vuxtb16(packed_pixels.g), + .b = vuxtb16(packed_pixels.b), + }; - int g0 = (row_grgr_0 >> 8) & 0xFF; - int g1 = (((row_bgbg_1 >> 16) & 0xFF) + (row_bgbg_1 & 0xFF)) >> 1; - int g2 = (row_grgr_2 >> 8) & 0xFF; - g_pixels_0 = (row_bgbg_1 & 0xFF0000) | ((((g0 + g2) >> 1) + g1) >> 1); + out.r0 = vrgb_pixels_to_rgb565(vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain)); - int b1 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_1 >> 8) & 0xFF)) >> 1; - b_pixels_0 = (b1 << 16) | ((row_bgbg_1 >> 8) & 0xFF); + vrgb_pixels_t pixels1 = { + .r = vuxtb16_ror8(packed_pixels.r), + .g = vuxtb16_ror8(packed_pixels.g), + .b = vuxtb16_ror8(packed_pixels.b), + }; - #endif - break; - } - case PIXFORMAT_BAYER_GBRG: { - #if defined(ARM_MATH_DSP) - int row_02 = __UHADD8(row_grgr_0, row_grgr_2); - int row_1g = __UHADD8(row_bgbg_1, __PKHBT(row_bgbg_1, row_bgbg_1, 16)); + out.r1 = vrgb_pixels_to_rgb565(vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain)); - r_pixels_0 = __UXTB16_RORn(__UHADD8(row_02, __PKHBT(row_02, row_02, 16)), 8); - g_pixels_0 = __UXTB16_RORn(__UHADD8(row_1g, __PKHBT(row_1g, row_02, 8)), 8); - b_pixels_0 = __UXTB16(__UHADD8(row_bgbg_1, __PKHTB(row_bgbg_1, row_bgbg_1, 16))); - #else + if (len >= (UINT16_VECTOR_SIZE * 2)) { + vst2_u16(p, out); + } else { + vst2_u16_len(p, out, len); + } +} - int r0 = (((row_grgr_0 >> 8) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - int r2 = (((row_grgr_0 >> 24) & 0xFF) + ((row_grgr_2 >> 24) & 0xFF)) >> 1; - r_pixels_0 = r0 | (((r0 + r2) >> 1) << 16); +static inline void vdebayer_grayscale_buf_copy(int32_t y, image_t *buf, image_t *dst) { + if (y >= VBAYER_BUF_KSIZE) { + // Transfer buffer lines... + int32_t y_offset = y - VBAYER_BUF_KSIZE; + vmemcpy_8(IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(dst, y_offset), + IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(buf, (y_offset % VBAYER_BUF_BROWS)), + IMAGE_GRAYSCALE_LINE_LEN_BYTES(dst) * VBAYER_Y_STRIDE); + } +} - int g0 = (row_grgr_0 >> 16) & 0xFF; - int g1 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_1 >> 8) & 0xFF)) >> 1; - int g2 = (row_grgr_2 >> 16) & 0xFF; - g_pixels_0 = ((row_bgbg_1 >> 8) & 0xFF) | (((((g0 + g2) >> 1) + g1) >> 1) << 16); +static inline void vdebayer_rgb565_buf_copy(int32_t y, image_t *buf, image_t *dst) { + if (y >= VBAYER_BUF_KSIZE) { + // Transfer buffer lines... + int32_t y_offset = y - VBAYER_BUF_KSIZE; + vmemcpy_16(IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(dst, y_offset), + IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(buf, (y_offset % VBAYER_BUF_BROWS)), + IMAGE_RGB565_LINE_LEN_BYTES(dst) * VBAYER_Y_STRIDE); + } +} - int b1 = (((row_bgbg_1 >> 16) & 0xFF) + (row_bgbg_1 & 0xFF)) >> 1; - b_pixels_0 = b1 | (row_bgbg_1 & 0xFF0000); +#if defined(IMLIB_ENABLE_DEBAYER_OPTIMIZATION) +static void vdebayer_bggr_to_binary(image_t *src, rectangle_t *roi, int32_t x_offset, image_t *dst) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); - #endif - break; - } - case PIXFORMAT_BAYER_GRBG: { - #if defined(ARM_MATH_DSP) - int row_02 = __UHADD8(row_grgr_0, row_grgr_2); - int row_1g = __UHADD8(row_bgbg_1, __PKHBT(row_bgbg_1, row_bgbg_1, 16)); + for (int32_t y = 0, y_end = roi->h - 1; y < roi->h; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y + roi->y); + uint32_t *p0 = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(dst, y); + uint32_t *p1 = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(dst, y + 1); - r_pixels_0 = __UXTB16(__UHADD8(row_bgbg_1, __PKHTB(row_bgbg_1, row_bgbg_1, 16))); - g_pixels_0 = __UXTB16_RORn(__UHADD8(row_1g, __PKHBT(row_1g, row_02, 8)), 8); - b_pixels_0 = __UXTB16_RORn(__UHADD8(row_02, __PKHBT(row_02, row_02, 16)), 8); - #else + for (int32_t x = 0; x < roi->w; x += VBAYER_X_STRIDE) { + v4x_rows_t rows = vdebayer_load_rows(src, rowptrs, x + roi->x, offsets); + v128_predicate_t pred = vdebayer_store_pred(roi->w, x); - int r1 = (((row_bgbg_1 >> 16) & 0xFF) + (row_bgbg_1 & 0xFF)) >> 1; - r_pixels_0 = r1 | (row_bgbg_1 & 0xFF0000); + vrgb_pixels_store_binary(p0, x + x_offset, vdebayer_bggr(rows.r0, rows.r1, rows.r2), pred); - int g0 = (row_grgr_0 >> 16) & 0xFF; - int g1 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_1 >> 8) & 0xFF)) >> 1; - int g2 = (row_grgr_2 >> 16) & 0xFF; - g_pixels_0 = ((row_bgbg_1 >> 8) & 0xFF) | (((((g0 + g2) >> 1) + g1) >> 1) << 16); - - int b0 = (((row_grgr_0 >> 8) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - int b2 = (((row_grgr_0 >> 24) & 0xFF) + ((row_grgr_2 >> 24) & 0xFF)) >> 1; - b_pixels_0 = b0 | (((b0 + b2) >> 1) << 16); - - #endif - break; - } - case PIXFORMAT_BAYER_RGGB: { - #if defined(ARM_MATH_DSP) - int row_02 = __UHADD8(row_grgr_0, row_grgr_2); - int row_1g = __UHADD8(row_bgbg_1, __PKHTB(row_bgbg_1, row_bgbg_1, 16)); - - r_pixels_0 = __UXTB16_RORn(__UHADD8(row_bgbg_1, __PKHBT(row_bgbg_1, row_bgbg_1, 16)), 8); - g_pixels_0 = __UXTB16(__UHADD8(row_1g, __PKHTB(row_1g, row_02, 8))); - b_pixels_0 = __UXTB16(__UHADD8(row_02, __PKHTB(row_02, row_02, 16))); - #else - - int r1 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_1 >> 8) & 0xFF)) >> 1; - r_pixels_0 = (r1 << 16) | ((row_bgbg_1 >> 8) & 0xFF); - - int g0 = (row_grgr_0 >> 8) & 0xFF; - int g1 = (((row_bgbg_1 >> 16) & 0xFF) + (row_bgbg_1 & 0xFF)) >> 1; - int g2 = (row_grgr_2 >> 8) & 0xFF; - g_pixels_0 = (row_bgbg_1 & 0xFF0000) | ((((g0 + g2) >> 1) + g1) >> 1); - - int b0 = ((row_grgr_0 & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - int b2 = (((row_grgr_0 >> 16) & 0xFF) + ((row_grgr_2 >> 16) & 0xFF)) >> 1; - b_pixels_0 = (b2 << 16) | ((b0 + b2) >> 1); - - #endif - break; - } - default: { - r_pixels_0 = 0; - g_pixels_0 = 0; - b_pixels_0 = 0; - break; - } - } - - switch (dst->pixfmt) { - case PIXFORMAT_BINARY: { - uint32_t *row_ptr_e_32 = (uint32_t *) row_ptr_e; - int y0 = ((r_pixels_0 * 38) + (g_pixels_0 * 75) + (b_pixels_0 * 15)) >> 7; - IMAGE_PUT_BINARY_PIXEL_FAST(row_ptr_e_32, x, (y0 >> 7)); - - if (x != w_limit) { - IMAGE_PUT_BINARY_PIXEL_FAST(row_ptr_e_32, x + 1, (y0 >> 23)); - } - - break; - } - case PIXFORMAT_GRAYSCALE: { - uint8_t *row_ptr_e_8 = (uint8_t *) row_ptr_e; - int y0 = ((r_pixels_0 * 38) + (g_pixels_0 * 75) + (b_pixels_0 * 15)) >> 7; - IMAGE_PUT_GRAYSCALE_PIXEL_FAST(row_ptr_e_8, x, y0); - - if (x != w_limit) { - IMAGE_PUT_GRAYSCALE_PIXEL_FAST(row_ptr_e_8, x + 1, y0 >> 16); - } - - break; - } - case PIXFORMAT_RGB565: { - uint16_t *row_ptr_e_16 = (uint16_t *) row_ptr_e; - int rgb565_0 = ((r_pixels_0 << 8) & 0xf800f800) | - ((g_pixels_0 << 3) & 0x07e007e0) | - ((b_pixels_0 >> 3) & 0x001f001f); - - if (x == w_limit) { - // just put bottom - IMAGE_PUT_RGB565_PIXEL_FAST(row_ptr_e_16, x, rgb565_0); - } else { - // put both - *((uint32_t *) (row_ptr_e_16 + x)) = rgb565_0; - } - - break; - } - } - - if (y == h_limit) { + if (y == y_end) { continue; } - int r_pixels_1, g_pixels_1, b_pixels_1; - - switch (src->pixfmt) { - case PIXFORMAT_BAYER_BGGR: { - #if defined(ARM_MATH_DSP) - int row_13 = __UHADD8(row_bgbg_1, row_bgbg_3); - int row_2g = __UHADD8(row_grgr_2, __PKHBT(row_grgr_2, row_grgr_2, 16)); - - r_pixels_1 = __UXTB16(__UHADD8(row_grgr_2, __PKHTB(row_grgr_2, row_grgr_2, 16))); - g_pixels_1 = __UXTB16_RORn(__UHADD8(row_2g, __PKHBT(row_2g, row_13, 8)), 8); - b_pixels_1 = __UXTB16_RORn(__UHADD8(row_13, __PKHBT(row_13, row_13, 16)), 8); - #else - - int r2 = (((row_grgr_2 >> 16) & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - r_pixels_1 = (row_grgr_2 & 0xFF0000) | r2; - - int g1 = (row_bgbg_1 >> 16) & 0xFF; - int g2 = (((row_grgr_2 >> 24) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - int g3 = (row_bgbg_3 >> 16) & 0xFF; - g_pixels_1 = (((((g1 + g3) >> 1) + g2) >> 1) << 16) | ((row_grgr_2 >> 8) & 0xFF); - - int b1 = (((row_bgbg_1 >> 8) & 0xFF) + ((row_bgbg_3 >> 8) & 0xFF)) >> 1; - int b3 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_3 >> 24) & 0xFF)) >> 1; - b_pixels_1 = (((b1 + b3) >> 1) << 16) | b1; - - #endif - break; - } - case PIXFORMAT_BAYER_GBRG: { - #if defined(ARM_MATH_DSP) - int row_13 = __UHADD8(row_bgbg_1, row_bgbg_3); - int row_2g = __UHADD8(row_grgr_2, __PKHTB(row_grgr_2, row_grgr_2, 16)); - - r_pixels_1 = __UXTB16_RORn(__UHADD8(row_grgr_2, __PKHBT(row_grgr_2, row_grgr_2, 16)), 8); - g_pixels_1 = __UXTB16(__UHADD8(row_2g, __PKHTB(row_2g, row_13, 8))); - b_pixels_1 = __UXTB16(__UHADD8(row_13, __PKHTB(row_13, row_13, 16))); - #else - - int r2 = (((row_grgr_2 >> 24) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - r_pixels_1 = ((row_grgr_2 >> 8) & 0xFF) | (r2 << 16); - - int g1 = (row_bgbg_1 >> 8) & 0xFF; - int g2 = (((row_grgr_2 >> 16) & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - int g3 = (row_bgbg_3 >> 8) & 0xFF; - g_pixels_1 = ((((g1 + g3) >> 1) + g2) >> 1) | (row_grgr_2 & 0xFF0000); - - int b1 = ((row_bgbg_1 & 0xFF) + (row_bgbg_3 & 0xFF)) >> 1; - int b3 = (((row_bgbg_1 >> 16) & 0xFF) + ((row_bgbg_3 >> 16) & 0xFF)) >> 1; - b_pixels_1 = ((b1 + b3) >> 1) | (b3 << 16); - - #endif - break; - } - case PIXFORMAT_BAYER_GRBG: { - #if defined(ARM_MATH_DSP) - int row_13 = __UHADD8(row_bgbg_1, row_bgbg_3); - int row_2g = __UHADD8(row_grgr_2, __PKHTB(row_grgr_2, row_grgr_2, 16)); - - r_pixels_1 = __UXTB16(__UHADD8(row_13, __PKHTB(row_13, row_13, 16))); - g_pixels_1 = __UXTB16(__UHADD8(row_2g, __PKHTB(row_2g, row_13, 8))); - b_pixels_1 = __UXTB16_RORn(__UHADD8(row_grgr_2, __PKHBT(row_grgr_2, row_grgr_2, 16)), 8); - #else - - int r1 = ((row_bgbg_1 & 0xFF) + (row_bgbg_3 & 0xFF)) >> 1; - int r3 = (((row_bgbg_1 >> 16) & 0xFF) + ((row_bgbg_3 >> 16) & 0xFF)) >> 1; - r_pixels_1 = ((r1 + r3) >> 1) | (r3 << 16); - - int g1 = (row_bgbg_1 >> 8) & 0xFF; - int g2 = (((row_grgr_2 >> 16) & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - int g3 = (row_bgbg_3 >> 8) & 0xFF; - g_pixels_1 = ((((g1 + g3) >> 1) + g2) >> 1) | (row_grgr_2 & 0xFF0000); - - int b2 = (((row_grgr_2 >> 24) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - b_pixels_1 = ((row_grgr_2 >> 8) & 0xFF) | (b2 << 16); - - #endif - break; - } - case PIXFORMAT_BAYER_RGGB: { - #if defined(ARM_MATH_DSP) - int row_13 = __UHADD8(row_bgbg_1, row_bgbg_3); - int row_2g = __UHADD8(row_grgr_2, __PKHBT(row_grgr_2, row_grgr_2, 16)); - - r_pixels_1 = __UXTB16_RORn(__UHADD8(row_13, __PKHBT(row_13, row_13, 16)), 8); - g_pixels_1 = __UXTB16_RORn(__UHADD8(row_2g, __PKHBT(row_2g, row_13, 8)), 8); - b_pixels_1 = __UXTB16(__UHADD8(row_grgr_2, __PKHTB(row_grgr_2, row_grgr_2, 16))); - #else - - int r1 = (((row_bgbg_1 >> 8) & 0xFF) + ((row_bgbg_3 >> 8) & 0xFF)) >> 1; - int r3 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_3 >> 24) & 0xFF)) >> 1; - r_pixels_1 = (((r1 + r3) >> 1) << 16) | r1; - - int g1 = (row_bgbg_1 >> 16) & 0xFF; - int g2 = (((row_grgr_2 >> 24) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - int g3 = (row_bgbg_3 >> 16) & 0xFF; - g_pixels_1 = (((((g1 + g3) >> 1) + g2) >> 1) << 16) | ((row_grgr_2 >> 8) & 0xFF); - - int b2 = (((row_grgr_2 >> 16) & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - b_pixels_1 = (row_grgr_2 & 0xFF0000) | b2; - - #endif - break; - } - default: { - r_pixels_1 = 0; - g_pixels_1 = 0; - b_pixels_1 = 0; - break; - } - } - - switch (dst->pixfmt) { - case PIXFORMAT_BINARY: { - uint32_t *row_ptr_o_32 = (uint32_t *) row_ptr_o; - int y1 = ((r_pixels_1 * 38) + (g_pixels_1 * 75) + (b_pixels_1 * 15)) >> 7; - IMAGE_PUT_BINARY_PIXEL_FAST(row_ptr_o_32, x, (y1 >> 7)); - - if (x != w_limit) { - IMAGE_PUT_BINARY_PIXEL_FAST(row_ptr_o_32, x + 1, (y1 >> 23)); - } - - break; - } - case PIXFORMAT_GRAYSCALE: { - uint8_t *row_ptr_o_8 = (uint8_t *) row_ptr_o; - int y1 = ((r_pixels_1 * 38) + (g_pixels_1 * 75) + (b_pixels_1 * 15)) >> 7; - IMAGE_PUT_GRAYSCALE_PIXEL_FAST(row_ptr_o_8, x, y1); - - if (x != w_limit) { - IMAGE_PUT_GRAYSCALE_PIXEL_FAST(row_ptr_o_8, x + 1, y1 >> 16); - } - - break; - } - case PIXFORMAT_RGB565: { - uint16_t *row_ptr_o_16 = (uint16_t *) row_ptr_o; - int rgb565_1 = ((r_pixels_1 << 8) & 0xf800f800) | - ((g_pixels_1 << 3) & 0x07e007e0) | - ((b_pixels_1 >> 3) & 0x001f001f); - - if (x == w_limit) { - // just put bottom - IMAGE_PUT_RGB565_PIXEL_FAST(row_ptr_o_16, x, rgb565_1); - } else { - // put both - *((uint32_t *) (row_ptr_o_16 + x)) = rgb565_1; - } - - break; - } - } + // PIXFORMAT_BAYER_BGGR shifted down by 1 becomes PIXFORMAT_BAYER_GRBG + vrgb_pixels_store_binary(p1, x + x_offset, vdebayer_grbg(rows.r1, rows.r2, rows.r3), pred); } } } + +static void vdebayer_gbrg_to_binary(image_t *src, rectangle_t *roi, int32_t x_offset, image_t *dst) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = roi->h - 1; y < roi->h; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y + roi->y); + uint32_t *p0 = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(dst, y); + uint32_t *p1 = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(dst, y + 1); + + for (int32_t x = 0; x < roi->w; x += VBAYER_X_STRIDE) { + v4x_rows_t rows = vdebayer_load_rows(src, rowptrs, x + roi->x, offsets); + v128_predicate_t pred = vdebayer_store_pred(roi->w, x); + + vrgb_pixels_store_binary(p0, x + x_offset, vdebayer_gbrg(rows.r0, rows.r1, rows.r2), pred); + + if (y == y_end) { + continue; + } + + // PIXFORMAT_BAYER_GBRG shifted down by 1 becomes PIXFORMAT_BAYER_RGGB + vrgb_pixels_store_binary(p1, x + x_offset, vdebayer_rggb(rows.r1, rows.r2, rows.r3), pred); + } + } +} + +static void vdebayer_grbg_to_binary(image_t *src, rectangle_t *roi, int32_t x_offset, image_t *dst) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = roi->h - 1; y < roi->h; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y + roi->y); + uint32_t *p0 = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(dst, y); + uint32_t *p1 = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(dst, y + 1); + + for (int32_t x = 0; x < roi->w; x += VBAYER_X_STRIDE) { + v4x_rows_t rows = vdebayer_load_rows(src, rowptrs, x + roi->x, offsets); + v128_predicate_t pred = vdebayer_store_pred(roi->w, x); + + vrgb_pixels_store_binary(p0, x + x_offset, vdebayer_grbg(rows.r0, rows.r1, rows.r2), pred); + + if (y == y_end) { + continue; + } + + // PIXFORMAT_BAYER_GRBG shifted down by 1 becomes PIXFORMAT_BAYER_BGGR + vrgb_pixels_store_binary(p1, x + x_offset, vdebayer_bggr(rows.r1, rows.r2, rows.r3), pred); + } + } +} + +static void vdebayer_rggb_to_binary(image_t *src, rectangle_t *roi, int32_t x_offset, image_t *dst) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = roi->h - 1; y < roi->h; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y + roi->y); + uint32_t *p0 = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(dst, y); + uint32_t *p1 = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(dst, y + 1); + + for (int32_t x = 0; x < roi->w; x += VBAYER_X_STRIDE) { + v4x_rows_t rows = vdebayer_load_rows(src, rowptrs, x + roi->x, offsets); + v128_predicate_t pred = vdebayer_store_pred(roi->w, x); + + vrgb_pixels_store_binary(p0, x + x_offset, vdebayer_rggb(rows.r0, rows.r1, rows.r2), pred); + + if (y == y_end) { + continue; + } + + // PIXFORMAT_BAYER_RGGB shifted down by 1 becomes PIXFORMAT_BAYER_GBRG + vrgb_pixels_store_binary(p1, x + x_offset, vdebayer_gbrg(rows.r1, rows.r2, rows.r3), pred); + } + } +} + +static void vdebayer_bggr_to_grayscale(image_t *src, rectangle_t *roi, int32_t x_offset, image_t *dst) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = roi->h - 1; y < roi->h; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y + roi->y); + uint8_t *p0 = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(dst, y) + x_offset; + uint8_t *p1 = p0 + dst->w; + + for (int32_t x = 0; x < roi->w; x += VBAYER_X_STRIDE) { + v4x_rows_t rows = vdebayer_load_rows(src, rowptrs, x + roi->x, offsets); + v128_predicate_t pred = vdebayer_store_pred(roi->w, x); + + vrgb_pixels_store_grayscale(p0, x, vdebayer_bggr(rows.r0, rows.r1, rows.r2), pred); + + if (y == y_end) { + continue; + } + + // PIXFORMAT_BAYER_BGGR shifted down by 1 becomes PIXFORMAT_BAYER_GRBG + vrgb_pixels_store_grayscale(p1, x, vdebayer_grbg(rows.r1, rows.r2, rows.r3), pred); + } + } +} + +static void vdebayer_gbrg_to_grayscale(image_t *src, rectangle_t *roi, int32_t x_offset, image_t *dst) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = roi->h - 1; y < roi->h; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y + roi->y); + uint8_t *p0 = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(dst, y) + x_offset; + uint8_t *p1 = p0 + dst->w; + + for (int32_t x = 0; x < roi->w; x += VBAYER_X_STRIDE) { + v4x_rows_t rows = vdebayer_load_rows(src, rowptrs, x + roi->x, offsets); + v128_predicate_t pred = vdebayer_store_pred(roi->w, x); + + vrgb_pixels_store_grayscale(p0, x, vdebayer_gbrg(rows.r0, rows.r1, rows.r2), pred); + + if (y == y_end) { + continue; + } + + // PIXFORMAT_BAYER_GBRG shifted down by 1 becomes PIXFORMAT_BAYER_RGGB + vrgb_pixels_store_grayscale(p1, x, vdebayer_rggb(rows.r1, rows.r2, rows.r3), pred); + } + } +} + +static void vdebayer_grbg_to_grayscale(image_t *src, rectangle_t *roi, int32_t x_offset, image_t *dst) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = roi->h - 1; y < roi->h; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y + roi->y); + uint8_t *p0 = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(dst, y) + x_offset; + uint8_t *p1 = p0 + dst->w; + + for (int32_t x = 0; x < roi->w; x += VBAYER_X_STRIDE) { + v4x_rows_t rows = vdebayer_load_rows(src, rowptrs, x + roi->x, offsets); + v128_predicate_t pred = vdebayer_store_pred(roi->w, x); + + vrgb_pixels_store_grayscale(p0, x, vdebayer_grbg(rows.r0, rows.r1, rows.r2), pred); + + if (y == y_end) { + continue; + } + + // PIXFORMAT_BAYER_GRBG shifted down by 1 becomes PIXFORMAT_BAYER_BGGR + vrgb_pixels_store_grayscale(p1, x, vdebayer_bggr(rows.r1, rows.r2, rows.r3), pred); + } + } +} + +static void vdebayer_rggb_to_grayscale(image_t *src, rectangle_t *roi, int32_t x_offset, image_t *dst) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = roi->h - 1; y < roi->h; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y + roi->y); + uint8_t *p0 = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(dst, y) + x_offset; + uint8_t *p1 = p0 + dst->w; + + for (int32_t x = 0; x < roi->w; x += VBAYER_X_STRIDE) { + v4x_rows_t rows = vdebayer_load_rows(src, rowptrs, x + roi->x, offsets); + v128_predicate_t pred = vdebayer_store_pred(roi->w, x); + + vrgb_pixels_store_grayscale(p0, x, vdebayer_rggb(rows.r0, rows.r1, rows.r2), pred); + + if (y == y_end) { + continue; + } + + // PIXFORMAT_BAYER_RGGB shifted down by 1 becomes PIXFORMAT_BAYER_GBRG + vrgb_pixels_store_grayscale(p1, x, vdebayer_gbrg(rows.r1, rows.r2, rows.r3), pred); + } + } +} + +static void vdebayer_bggr_to_rgb565(image_t *src, rectangle_t *roi, int32_t x_offset, image_t *dst) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = roi->h - 1; y < roi->h; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y + roi->y); + uint16_t *p0 = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(dst, y) + x_offset; + uint16_t *p1 = p0 + dst->w; + + for (int32_t x = 0; x < roi->w; x += VBAYER_X_STRIDE) { + v4x_rows_t rows = vdebayer_load_rows(src, rowptrs, x + roi->x, offsets); + v128_predicate_t pred = vdebayer_store_pred(roi->w, x); + + vrgb_pixels_store_rgb565(p0, x, vdebayer_bggr(rows.r0, rows.r1, rows.r2), pred); + + if (y == y_end) { + continue; + } + + // PIXFORMAT_BAYER_BGGR shifted down by 1 becomes PIXFORMAT_BAYER_GRBG + vrgb_pixels_store_rgb565(p1, x, vdebayer_grbg(rows.r1, rows.r2, rows.r3), pred); + } + } +} + +static void vdebayer_gbrg_to_rgb565(image_t *src, rectangle_t *roi, int32_t x_offset, image_t *dst) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = roi->h - 1; y < roi->h; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y + roi->y); + uint16_t *p0 = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(dst, y) + x_offset; + uint16_t *p1 = p0 + dst->w; + + for (int32_t x = 0; x < roi->w; x += VBAYER_X_STRIDE) { + v4x_rows_t rows = vdebayer_load_rows(src, rowptrs, x + roi->x, offsets); + v128_predicate_t pred = vdebayer_store_pred(roi->w, x); + + vrgb_pixels_store_rgb565(p0, x, vdebayer_gbrg(rows.r0, rows.r1, rows.r2), pred); + + if (y == y_end) { + continue; + } + + // PIXFORMAT_BAYER_GBRG shifted down by 1 becomes PIXFORMAT_BAYER_RGGB + vrgb_pixels_store_rgb565(p1, x, vdebayer_rggb(rows.r1, rows.r2, rows.r3), pred); + } + } +} + +static void vdebayer_grbg_to_rgb565(image_t *src, rectangle_t *roi, int32_t x_offset, image_t *dst) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = roi->h - 1; y < roi->h; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y + roi->y); + uint16_t *p0 = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(dst, y) + x_offset; + uint16_t *p1 = p0 + dst->w; + + for (int32_t x = 0; x < roi->w; x += VBAYER_X_STRIDE) { + v4x_rows_t rows = vdebayer_load_rows(src, rowptrs, x + roi->x, offsets); + v128_predicate_t pred = vdebayer_store_pred(roi->w, x); + + vrgb_pixels_store_rgb565(p0, x, vdebayer_grbg(rows.r0, rows.r1, rows.r2), pred); + + if (y == y_end) { + continue; + } + + // PIXFORMAT_BAYER_GRBG shifted down by 1 becomes PIXFORMAT_BAYER_BGGR + vrgb_pixels_store_rgb565(p1, x, vdebayer_bggr(rows.r1, rows.r2, rows.r3), pred); + } + } +} + +static void vdebayer_rggb_to_rgb565(image_t *src, rectangle_t *roi, int32_t x_offset, image_t *dst) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = roi->h - 1; y < roi->h; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y + roi->y); + uint16_t *p0 = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(dst, y) + x_offset; + uint16_t *p1 = p0 + dst->w; + + for (int32_t x = 0; x < roi->w; x += VBAYER_X_STRIDE) { + v4x_rows_t rows = vdebayer_load_rows(src, rowptrs, x + roi->x, offsets); + v128_predicate_t pred = vdebayer_store_pred(roi->w, x); + + vrgb_pixels_store_rgb565(p0, x, vdebayer_rggb(rows.r0, rows.r1, rows.r2), pred); + + if (y == y_end) { + continue; + } + + // PIXFORMAT_BAYER_RGGB shifted down by 1 becomes PIXFORMAT_BAYER_GBRG + vrgb_pixels_store_rgb565(p1, x, vdebayer_gbrg(rows.r1, rows.r2, rows.r3), pred); + } + } +} +#else +static void vdebayer_to_binary(image_t *src, rectangle_t *roi, int32_t x_offset, image_t *dst) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = roi->h - 1; y < roi->h; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y + roi->y); + uint32_t *p0 = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(dst, y); + uint32_t *p1 = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(dst, y + 1); + + for (int32_t x = 0; x < roi->w; x += VBAYER_X_STRIDE) { + v4x_rows_t rows = vdebayer_load_rows(src, rowptrs, x + roi->x, offsets); + v128_predicate_t pred = vdebayer_store_pred(roi->w, x); + + vrgb_pixels_store_binary(p0, x + x_offset, vdebayer_all_0(src, rows.r0, rows.r1, rows.r2), pred); + + if (y == y_end) { + continue; + } + + vrgb_pixels_store_binary(p1, x + x_offset, vdebayer_all_1(src, rows.r1, rows.r2, rows.r3), pred); + } + } +} + +static void vdebayer_to_grayscale(image_t *src, rectangle_t *roi, int32_t x_offset, image_t *dst) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = roi->h - 1; y < roi->h; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y + roi->y); + uint8_t *p0 = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(dst, y) + x_offset; + uint8_t *p1 = p0 + dst->w; + + for (int32_t x = 0; x < roi->w; x += VBAYER_X_STRIDE) { + v4x_rows_t rows = vdebayer_load_rows(src, rowptrs, x + roi->x, offsets); + v128_predicate_t pred = vdebayer_store_pred(roi->w, x); + + vrgb_pixels_store_grayscale(p0, x, vdebayer_all_0(src, rows.r0, rows.r1, rows.r2), pred); + + if (y == y_end) { + continue; + } + + vrgb_pixels_store_grayscale(p1, x, vdebayer_all_1(src, rows.r1, rows.r2, rows.r3), pred); + } + } +} + +static void vdebayer_to_rgb565(image_t *src, rectangle_t *roi, int32_t x_offset, image_t *dst) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = roi->h - 1; y < roi->h; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y + roi->y); + uint16_t *p0 = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(dst, y) + x_offset; + uint16_t *p1 = p0 + dst->w; + + for (int32_t x = 0; x < roi->w; x += VBAYER_X_STRIDE) { + v4x_rows_t rows = vdebayer_load_rows(src, rowptrs, x + roi->x, offsets); + v128_predicate_t pred = vdebayer_store_pred(roi->w, x); + + vrgb_pixels_store_rgb565(p0, x, vdebayer_all_0(src, rows.r0, rows.r1, rows.r2), pred); + + if (y == y_end) { + continue; + } + + vrgb_pixels_store_rgb565(p1, x, vdebayer_all_1(src, rows.r1, rows.r2, rows.r3), pred); + } + } +} +#endif // IMLIB_ENABLE_DEBAYER_OPTIMIZATION + +static void vdebayer(image_t *src, rectangle_t *roi, int32_t x_offset, image_t *dst) { + #if defined(IMLIB_ENABLE_DEBAYER_OPTIMIZATION) + switch (src->pixfmt) { + case PIXFORMAT_BAYER_BGGR: { + switch (dst->pixfmt) { + case PIXFORMAT_BINARY: { + vdebayer_bggr_to_binary(src, roi, x_offset, dst); + break; + } + case PIXFORMAT_GRAYSCALE: { + vdebayer_bggr_to_grayscale(src, roi, x_offset, dst); + break; + } + case PIXFORMAT_RGB565: { + vdebayer_bggr_to_rgb565(src, roi, x_offset, dst); + break; + } + default: { + __builtin_unreachable(); + } + } + break; + } + case PIXFORMAT_BAYER_GBRG: { + switch (dst->pixfmt) { + case PIXFORMAT_BINARY: { + vdebayer_gbrg_to_binary(src, roi, x_offset, dst); + break; + } + case PIXFORMAT_GRAYSCALE: { + vdebayer_gbrg_to_grayscale(src, roi, x_offset, dst); + break; + } + case PIXFORMAT_RGB565: { + vdebayer_gbrg_to_rgb565(src, roi, x_offset, dst); + break; + } + default: { + __builtin_unreachable(); + } + } + break; + } + case PIXFORMAT_BAYER_GRBG: { + switch (dst->pixfmt) { + case PIXFORMAT_BINARY: { + vdebayer_grbg_to_binary(src, roi, x_offset, dst); + break; + } + case PIXFORMAT_GRAYSCALE: { + vdebayer_grbg_to_grayscale(src, roi, x_offset, dst); + break; + } + case PIXFORMAT_RGB565: { + vdebayer_grbg_to_rgb565(src, roi, x_offset, dst); + break; + } + default: { + __builtin_unreachable(); + } + } + break; + } + case PIXFORMAT_BAYER_RGGB: { + switch (dst->pixfmt) { + case PIXFORMAT_BINARY: { + vdebayer_rggb_to_binary(src, roi, x_offset, dst); + break; + } + case PIXFORMAT_GRAYSCALE: { + vdebayer_rggb_to_grayscale(src, roi, x_offset, dst); + break; + } + case PIXFORMAT_RGB565: { + vdebayer_rggb_to_rgb565(src, roi, x_offset, dst); + break; + } + default: { + __builtin_unreachable(); + } + } + break; + } + default: { + __builtin_unreachable(); + } + } + #else + switch (dst->pixfmt) { + case PIXFORMAT_BINARY: { + vdebayer_to_binary(src, roi, x_offset, dst); + break; + } + case PIXFORMAT_GRAYSCALE: { + vdebayer_to_grayscale(src, roi, x_offset, dst); + break; + } + case PIXFORMAT_RGB565: { + vdebayer_to_rgb565(src, roi, x_offset, dst); + break; + } + default: { + __builtin_unreachable(); + } + } + #endif // IMLIB_ENABLE_DEBAYER_OPTIMIZATION +} + +// assumes dst->w == src->w +// assumes dst->h == 1 +void imlib_debayer_line(int x_start, int x_end, int y_row, void *dst_row_ptr, pixformat_t pixfmt, image_t *src) { + rectangle_t roi = { + .x = x_start, + .y = y_row, + .w = x_end - x_start, + .h = 1, + }; + image_t dst = { + .w = src->w, + .h = 1, + .pixfmt = pixfmt, + .data = dst_row_ptr, + }; + vdebayer(src, &roi, x_start, &dst); +} + +// assumes dst->w == src->w +// assumes dst->h == src->h +// src and dst may not overlap, but, faster than imlib_debayer_image_awb +void imlib_debayer_image(image_t *dst, image_t *src) { + OMV_PROFILE_START(); + rectangle_t roi = { + .x = 0, + .y = 0, + .w = src->w, + .h = src->h, + }; + vdebayer(src, &roi, 0, dst); + OMV_PROFILE_PRINT(); +} + +#if defined(IMLIB_ENABLE_DEBAYER_OPTIMIZATION) +static void vdebayer_bggr_to_ycbcr(image_t *src, rectangle_t *roi, int8_t *Y0, int8_t *CB, int8_t *CR) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = roi->h - 1; y < roi->h; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y + roi->y); + uint8_t *Y0p0 = ((uint8_t *) Y0) + (y * JPEG_MCU_W); + uint8_t *CBp0 = ((uint8_t *) CB) + (y * JPEG_MCU_W); + uint8_t *CRp0 = ((uint8_t *) CR) + (y * JPEG_MCU_W); + uint8_t *Y0p1 = Y0p0 + JPEG_MCU_W; + uint8_t *CBp1 = CBp0 + JPEG_MCU_W; + uint8_t *CRp1 = CRp0 + JPEG_MCU_W; + + for (int32_t x = 0; x < roi->w; x += VBAYER_X_STRIDE) { + v4x_rows_t rows = vdebayer_load_rows(src, rowptrs, x + roi->x, offsets); + v128_predicate_t pred = vdebayer_store_pred(roi->w, x); + + vrgb_pixels_t pixels0 = vdebayer_bggr(rows.r0, rows.r1, rows.r2); + vstr_u16_narrow_u8_pred(Y0p0 + x, vdebayer_to_y(pixels0), pred); + vstr_u16_narrow_u8_pred(CBp0 + x, vdebayer_to_cb(pixels0), pred); + vstr_u16_narrow_u8_pred(CRp0 + x, vdebayer_to_cr(pixels0), pred); + + if (y == y_end) { + continue; + } + + // PIXFORMAT_BAYER_BGGR shifted down by 1 becomes PIXFORMAT_BAYER_GRBG + vrgb_pixels_t pixels1 = vdebayer_grbg(rows.r1, rows.r2, rows.r3); + vstr_u16_narrow_u8_pred(Y0p1 + x, vdebayer_to_y(pixels1), pred); + vstr_u16_narrow_u8_pred(CBp1 + x, vdebayer_to_cb(pixels1), pred); + vstr_u16_narrow_u8_pred(CRp1 + x, vdebayer_to_cr(pixels1), pred); + } + } +} + +static void vdebayer_gbrg_to_ycbcr(image_t *src, rectangle_t *roi, int8_t *Y0, int8_t *CB, int8_t *CR) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = roi->h - 1; y < roi->h; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y + roi->y); + uint8_t *Y0p0 = ((uint8_t *) Y0) + (y * JPEG_MCU_W); + uint8_t *CBp0 = ((uint8_t *) CB) + (y * JPEG_MCU_W); + uint8_t *CRp0 = ((uint8_t *) CR) + (y * JPEG_MCU_W); + uint8_t *Y0p1 = Y0p0 + JPEG_MCU_W; + uint8_t *CBp1 = CBp0 + JPEG_MCU_W; + uint8_t *CRp1 = CRp0 + JPEG_MCU_W; + + for (int32_t x = 0; x < roi->w; x += VBAYER_X_STRIDE) { + v4x_rows_t rows = vdebayer_load_rows(src, rowptrs, x + roi->x, offsets); + v128_predicate_t pred = vdebayer_store_pred(roi->w, x); + + vrgb_pixels_t pixels0 = vdebayer_gbrg(rows.r0, rows.r1, rows.r2); + vstr_u16_narrow_u8_pred(Y0p0 + x, vdebayer_to_y(pixels0), pred); + vstr_u16_narrow_u8_pred(CBp0 + x, vdebayer_to_cb(pixels0), pred); + vstr_u16_narrow_u8_pred(CRp0 + x, vdebayer_to_cr(pixels0), pred); + + if (y == y_end) { + continue; + } + + // PIXFORMAT_BAYER_GBRG shifted down by 1 becomes PIXFORMAT_BAYER_RGGB + vrgb_pixels_t pixels1 = vdebayer_rggb(rows.r1, rows.r2, rows.r3); + vstr_u16_narrow_u8_pred(Y0p1 + x, vdebayer_to_y(pixels1), pred); + vstr_u16_narrow_u8_pred(CBp1 + x, vdebayer_to_cb(pixels1), pred); + vstr_u16_narrow_u8_pred(CRp1 + x, vdebayer_to_cr(pixels1), pred); + } + } +} + +static void vdebayer_grbg_to_ycbcr(image_t *src, rectangle_t *roi, int8_t *Y0, int8_t *CB, int8_t *CR) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = roi->h - 1; y < roi->h; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y + roi->y); + uint8_t *Y0p0 = ((uint8_t *) Y0) + (y * JPEG_MCU_W); + uint8_t *CBp0 = ((uint8_t *) CB) + (y * JPEG_MCU_W); + uint8_t *CRp0 = ((uint8_t *) CR) + (y * JPEG_MCU_W); + uint8_t *Y0p1 = Y0p0 + JPEG_MCU_W; + uint8_t *CBp1 = CBp0 + JPEG_MCU_W; + uint8_t *CRp1 = CRp0 + JPEG_MCU_W; + + for (int32_t x = 0; x < roi->w; x += VBAYER_X_STRIDE) { + v4x_rows_t rows = vdebayer_load_rows(src, rowptrs, x + roi->x, offsets); + v128_predicate_t pred = vdebayer_store_pred(roi->w, x); + + vrgb_pixels_t pixels0 = vdebayer_grbg(rows.r0, rows.r1, rows.r2); + vstr_u16_narrow_u8_pred(Y0p0 + x, vdebayer_to_y(pixels0), pred); + vstr_u16_narrow_u8_pred(CBp0 + x, vdebayer_to_cb(pixels0), pred); + vstr_u16_narrow_u8_pred(CRp0 + x, vdebayer_to_cr(pixels0), pred); + + if (y == y_end) { + continue; + } + + // PIXFORMAT_BAYER_GRBG shifted down by 1 becomes PIXFORMAT_BAYER_BGGR + vrgb_pixels_t pixels1 = vdebayer_bggr(rows.r1, rows.r2, rows.r3); + vstr_u16_narrow_u8_pred(Y0p1 + x, vdebayer_to_y(pixels1), pred); + vstr_u16_narrow_u8_pred(CBp1 + x, vdebayer_to_cb(pixels1), pred); + vstr_u16_narrow_u8_pred(CRp1 + x, vdebayer_to_cr(pixels1), pred); + } + } +} + +static void vdebayer_rggb_to_ycbcr(image_t *src, rectangle_t *roi, int8_t *Y0, int8_t *CB, int8_t *CR) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = roi->h - 1; y < roi->h; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y + roi->y); + uint8_t *Y0p0 = ((uint8_t *) Y0) + (y * JPEG_MCU_W); + uint8_t *CBp0 = ((uint8_t *) CB) + (y * JPEG_MCU_W); + uint8_t *CRp0 = ((uint8_t *) CR) + (y * JPEG_MCU_W); + uint8_t *Y0p1 = Y0p0 + JPEG_MCU_W; + uint8_t *CBp1 = CBp0 + JPEG_MCU_W; + uint8_t *CRp1 = CRp0 + JPEG_MCU_W; + + for (int32_t x = 0; x < roi->w; x += VBAYER_X_STRIDE) { + v4x_rows_t rows = vdebayer_load_rows(src, rowptrs, x + roi->x, offsets); + v128_predicate_t pred = vdebayer_store_pred(roi->w, x); + + vrgb_pixels_t pixels0 = vdebayer_rggb(rows.r0, rows.r1, rows.r2); + vstr_u16_narrow_u8_pred(Y0p0 + x, vdebayer_to_y(pixels0), pred); + vstr_u16_narrow_u8_pred(CBp0 + x, vdebayer_to_cb(pixels0), pred); + vstr_u16_narrow_u8_pred(CRp0 + x, vdebayer_to_cr(pixels0), pred); + + if (y == y_end) { + continue; + } + + // PIXFORMAT_BAYER_RGGB shifted down by 1 becomes PIXFORMAT_BAYER_GBRG + vrgb_pixels_t pixels1 = vdebayer_gbrg(rows.r1, rows.r2, rows.r3); + vstr_u16_narrow_u8_pred(Y0p1 + x, vdebayer_to_y(pixels1), pred); + vstr_u16_narrow_u8_pred(CBp1 + x, vdebayer_to_cb(pixels1), pred); + vstr_u16_narrow_u8_pred(CRp1 + x, vdebayer_to_cr(pixels1), pred); + } + } +} + +void imlib_debayer_ycbcr(image_t *src, rectangle_t *roi, int8_t *Y0, int8_t *CB, int8_t *CR) { + switch (src->pixfmt) { + case PIXFORMAT_BAYER_BGGR: { + vdebayer_bggr_to_ycbcr(src, roi, Y0, CB, CR); + break; + } + case PIXFORMAT_BAYER_GBRG: { + vdebayer_gbrg_to_ycbcr(src, roi, Y0, CB, CR); + break; + } + case PIXFORMAT_BAYER_GRBG: { + vdebayer_grbg_to_ycbcr(src, roi, Y0, CB, CR); + break; + } + case PIXFORMAT_BAYER_RGGB: { + vdebayer_rggb_to_ycbcr(src, roi, Y0, CB, CR); + break; + } + default: { + __builtin_unreachable(); + } + } +} +#else +void imlib_debayer_ycbcr(image_t *src, rectangle_t *roi, int8_t *Y0, int8_t *CB, int8_t *CR) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = roi->h - 1; y < roi->h; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y + roi->y); + uint8_t *Y0p0 = ((uint8_t *) Y0) + (y * JPEG_MCU_W); + uint8_t *CBp0 = ((uint8_t *) CB) + (y * JPEG_MCU_W); + uint8_t *CRp0 = ((uint8_t *) CR) + (y * JPEG_MCU_W); + uint8_t *Y0p1 = Y0p0 + JPEG_MCU_W; + uint8_t *CBp1 = CBp0 + JPEG_MCU_W; + uint8_t *CRp1 = CRp0 + JPEG_MCU_W; + + for (int32_t x = 0; x < roi->w; x += VBAYER_X_STRIDE) { + v4x_rows_t rows = vdebayer_load_rows(src, rowptrs, x + roi->x, offsets); + v128_predicate_t pred = vdebayer_store_pred(roi->w, x); + + vrgb_pixels_t pixels0 = vdebayer_all_0(src, rows.r0, rows.r1, rows.r2); + vstr_u16_narrow_u8_pred(Y0p0 + x, vdebayer_to_y(pixels0), pred); + vstr_u16_narrow_u8_pred(CBp0 + x, vdebayer_to_cb(pixels0), pred); + vstr_u16_narrow_u8_pred(CRp0 + x, vdebayer_to_cr(pixels0), pred); + + if (y == y_end) { + continue; + } + + // PIXFORMAT_BAYER_RGGB shifted down by 1 becomes PIXFORMAT_BAYER_GBRG + vrgb_pixels_t pixels1 = vdebayer_all_1(src, rows.r1, rows.r2, rows.r3); + vstr_u16_narrow_u8_pred(Y0p1 + x, vdebayer_to_y(pixels1), pred); + vstr_u16_narrow_u8_pred(CBp1 + x, vdebayer_to_cb(pixels1), pred); + vstr_u16_narrow_u8_pred(CRp1 + x, vdebayer_to_cr(pixels1), pred); + } + } +} +#endif // IMLIB_ENABLE_DEBAYER_OPTIMIZATION + +static void vdebayer_bggr_to_grayscale_awb(image_t *src, image_t *dst, image_t *buf, + uint32_t red_gain, uint32_t blue_gain) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = (src->h / VBAYER_Y_STRIDE) * VBAYER_Y_STRIDE; y < y_end; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y); + uint8_t *p0 = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(buf, (y % VBAYER_BUF_BROWS)); + uint8_t *p1 = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(buf, ((y + 1) % VBAYER_BUF_BROWS)); + + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, 0, offsets, vdebayer_load_pred(src, 0)); + v128_predicate_t pred = vdebayer_store_pred(src->w, 0); + + vrgb_pixels_t pixels0 = vdebayer_bggr(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, 0, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + // PIXFORMAT_BAYER_BGGR shifted down by 1 becomes PIXFORMAT_BAYER_GRBG + vrgb_pixels_t pixels1 = vdebayer_grbg(rows.r1, rows.r2, rows.r3); + vrgb_pixels_store_grayscale(p1, 0, vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain), pred); + + int32_t x = VBAYER_X_STRIDE; + for (int32_t x_end = src->w - VBAYER_X_STRIDE; x <= x_end; x += VBAYER_X_STRIDE) { + v4x_rows_t rows; + rows.r0 = vldr_u32_gather_unaligned(rowptrs.p0.u8 + x, offsets); + rows.r1 = vldr_u32_gather_unaligned(rowptrs.p1.u8 + x, offsets); + rows.r2 = vldr_u32_gather_unaligned(rowptrs.p2.u8 + x, offsets); + rows.r3 = vldr_u32_gather_unaligned(rowptrs.p3.u8 + x, offsets); + + vrgb_pixels_t pixels0 = vdebayer_bggr(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + // PIXFORMAT_BAYER_BGGR shifted down by 1 becomes PIXFORMAT_BAYER_GRBG + vrgb_pixels_t pixels1 = vdebayer_grbg(rows.r1, rows.r2, rows.r3); + vrgb_pixels_store_grayscale(p1, x, vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain), pred); + } + + if (x < src->w) { + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, x, offsets, vdebayer_load_pred(src, x)); + v128_predicate_t pred = vdebayer_store_pred(src->w, x); + + vrgb_pixels_t pixels0 = vdebayer_bggr(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + // PIXFORMAT_BAYER_BGGR shifted down by 1 becomes PIXFORMAT_BAYER_GRBG + vrgb_pixels_t pixels1 = vdebayer_grbg(rows.r1, rows.r2, rows.r3); + vrgb_pixels_store_grayscale(p1, x, vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain), pred); + } + + vdebayer_grayscale_buf_copy(y, buf, dst); + } + + // Last odd row. + if (src->h % VBAYER_Y_STRIDE) { + int32_t y = src->h - 1; + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y); + uint8_t *p0 = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(buf, (y % VBAYER_BUF_BROWS)); + + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, 0, offsets, vdebayer_load_pred(src, 0)); + v128_predicate_t pred = vdebayer_store_pred(src->w, 0); + + vrgb_pixels_t pixels0 = vdebayer_bggr(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, 0, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + int32_t x = VBAYER_X_STRIDE; + for (int32_t x_end = src->w - VBAYER_X_STRIDE; x <= x_end; x += VBAYER_X_STRIDE) { + v4x_rows_t rows; + rows.r0 = vldr_u32_gather_unaligned(rowptrs.p0.u8 + x, offsets); + rows.r1 = vldr_u32_gather_unaligned(rowptrs.p1.u8 + x, offsets); + rows.r2 = vldr_u32_gather_unaligned(rowptrs.p2.u8 + x, offsets); + + vrgb_pixels_t pixels0 = vdebayer_bggr(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + } + + if (x < src->w) { + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, x, offsets, vdebayer_load_pred(src, x)); + v128_predicate_t pred = vdebayer_store_pred(src->w, x); + + vrgb_pixels_t pixels0 = vdebayer_bggr(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + } + + vdebayer_grayscale_buf_copy(y, buf, dst); + } +} + +static void vdebayer_gbrg_to_grayscale_awb(image_t *src, image_t *dst, image_t *buf, + uint32_t red_gain, uint32_t blue_gain) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = (src->h / VBAYER_Y_STRIDE) * VBAYER_Y_STRIDE; y < y_end; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y); + uint8_t *p0 = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(buf, (y % VBAYER_BUF_BROWS)); + uint8_t *p1 = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(buf, ((y + 1) % VBAYER_BUF_BROWS)); + + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, 0, offsets, vdebayer_load_pred(src, 0)); + v128_predicate_t pred = vdebayer_store_pred(src->w, 0); + + vrgb_pixels_t pixels0 = vdebayer_gbrg(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, 0, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + // PIXFORMAT_BAYER_GBRG shifted down by 1 becomes PIXFORMAT_BAYER_RGGB + vrgb_pixels_t pixels1 = vdebayer_rggb(rows.r1, rows.r2, rows.r3); + vrgb_pixels_store_grayscale(p1, 0, vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain), pred); + + int32_t x = VBAYER_X_STRIDE; + for (int32_t x_end = src->w - VBAYER_X_STRIDE; x <= x_end; x += VBAYER_X_STRIDE) { + v4x_rows_t rows; + rows.r0 = vldr_u32_gather_unaligned(rowptrs.p0.u8 + x, offsets); + rows.r1 = vldr_u32_gather_unaligned(rowptrs.p1.u8 + x, offsets); + rows.r2 = vldr_u32_gather_unaligned(rowptrs.p2.u8 + x, offsets); + rows.r3 = vldr_u32_gather_unaligned(rowptrs.p3.u8 + x, offsets); + + vrgb_pixels_t pixels0 = vdebayer_gbrg(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + // PIXFORMAT_BAYER_GBRG shifted down by 1 becomes PIXFORMAT_BAYER_RGGB + vrgb_pixels_t pixels1 = vdebayer_rggb(rows.r1, rows.r2, rows.r3); + vrgb_pixels_store_grayscale(p1, x, vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain), pred); + } + + if (x < src->w) { + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, x, offsets, vdebayer_load_pred(src, x)); + v128_predicate_t pred = vdebayer_store_pred(src->w, x); + + vrgb_pixels_t pixels0 = vdebayer_gbrg(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + // PIXFORMAT_BAYER_GBRG shifted down by 1 becomes PIXFORMAT_BAYER_RGGB + vrgb_pixels_t pixels1 = vdebayer_rggb(rows.r1, rows.r2, rows.r3); + vrgb_pixels_store_grayscale(p1, x, vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain), pred); + } + + vdebayer_grayscale_buf_copy(y, buf, dst); + } + + // Last odd row. + if (src->h % VBAYER_Y_STRIDE) { + int32_t y = src->h - 1; + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y); + uint8_t *p0 = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(buf, (y % VBAYER_BUF_BROWS)); + + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, 0, offsets, vdebayer_load_pred(src, 0)); + v128_predicate_t pred = vdebayer_store_pred(src->w, 0); + + vrgb_pixels_t pixels0 = vdebayer_gbrg(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, 0, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + int32_t x = VBAYER_X_STRIDE; + for (int32_t x_end = src->w - VBAYER_X_STRIDE; x <= x_end; x += VBAYER_X_STRIDE) { + v4x_rows_t rows; + rows.r0 = vldr_u32_gather_unaligned(rowptrs.p0.u8 + x, offsets); + rows.r1 = vldr_u32_gather_unaligned(rowptrs.p1.u8 + x, offsets); + rows.r2 = vldr_u32_gather_unaligned(rowptrs.p2.u8 + x, offsets); + + vrgb_pixels_t pixels0 = vdebayer_gbrg(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + } + + if (x < src->w) { + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, x, offsets, vdebayer_load_pred(src, x)); + v128_predicate_t pred = vdebayer_store_pred(src->w, x); + + vrgb_pixels_t pixels0 = vdebayer_gbrg(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + } + + vdebayer_grayscale_buf_copy(y, buf, dst); + } +} + +static void vdebayer_grbg_to_grayscale_awb(image_t *src, image_t *dst, image_t *buf, + uint32_t red_gain, uint32_t blue_gain) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = (src->h / VBAYER_Y_STRIDE) * VBAYER_Y_STRIDE; y < y_end; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y); + uint8_t *p0 = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(buf, (y % VBAYER_BUF_BROWS)); + uint8_t *p1 = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(buf, ((y + 1) % VBAYER_BUF_BROWS)); + + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, 0, offsets, vdebayer_load_pred(src, 0)); + v128_predicate_t pred = vdebayer_store_pred(src->w, 0); + + vrgb_pixels_t pixels0 = vdebayer_grbg(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, 0, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + // PIXFORMAT_BAYER_GRBG shifted down by 1 becomes PIXFORMAT_BAYER_BGGR + vrgb_pixels_t pixels1 = vdebayer_bggr(rows.r1, rows.r2, rows.r3); + vrgb_pixels_store_grayscale(p1, 0, vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain), pred); + + int32_t x = VBAYER_X_STRIDE; + for (int32_t x_end = src->w - VBAYER_X_STRIDE; x <= x_end; x += VBAYER_X_STRIDE) { + v4x_rows_t rows; + rows.r0 = vldr_u32_gather_unaligned(rowptrs.p0.u8 + x, offsets); + rows.r1 = vldr_u32_gather_unaligned(rowptrs.p1.u8 + x, offsets); + rows.r2 = vldr_u32_gather_unaligned(rowptrs.p2.u8 + x, offsets); + rows.r3 = vldr_u32_gather_unaligned(rowptrs.p3.u8 + x, offsets); + + vrgb_pixels_t pixels0 = vdebayer_grbg(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + // PIXFORMAT_BAYER_GRBG shifted down by 1 becomes PIXFORMAT_BAYER_BGGR + vrgb_pixels_t pixels1 = vdebayer_bggr(rows.r1, rows.r2, rows.r3); + vrgb_pixels_store_grayscale(p1, x, vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain), pred); + } + + if (x < src->w) { + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, x, offsets, vdebayer_load_pred(src, x)); + v128_predicate_t pred = vdebayer_store_pred(src->w, x); + + vrgb_pixels_t pixels0 = vdebayer_grbg(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + // PIXFORMAT_BAYER_GRBG shifted down by 1 becomes PIXFORMAT_BAYER_BGGR + vrgb_pixels_t pixels1 = vdebayer_bggr(rows.r1, rows.r2, rows.r3); + vrgb_pixels_store_grayscale(p1, x, vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain), pred); + } + + vdebayer_grayscale_buf_copy(y, buf, dst); + } + + // Last odd row. + if (src->h % VBAYER_Y_STRIDE) { + int32_t y = src->h - 1; + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y); + uint8_t *p0 = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(buf, (y % VBAYER_BUF_BROWS)); + + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, 0, offsets, vdebayer_load_pred(src, 0)); + v128_predicate_t pred = vdebayer_store_pred(src->w, 0); + + vrgb_pixels_t pixels0 = vdebayer_grbg(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, 0, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + int32_t x = VBAYER_X_STRIDE; + for (int32_t x_end = src->w - VBAYER_X_STRIDE; x <= x_end; x += VBAYER_X_STRIDE) { + v4x_rows_t rows; + rows.r0 = vldr_u32_gather_unaligned(rowptrs.p0.u8 + x, offsets); + rows.r1 = vldr_u32_gather_unaligned(rowptrs.p1.u8 + x, offsets); + rows.r2 = vldr_u32_gather_unaligned(rowptrs.p2.u8 + x, offsets); + + vrgb_pixels_t pixels0 = vdebayer_grbg(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + } + + if (x < src->w) { + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, x, offsets, vdebayer_load_pred(src, x)); + v128_predicate_t pred = vdebayer_store_pred(src->w, x); + + vrgb_pixels_t pixels0 = vdebayer_grbg(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + } + + vdebayer_grayscale_buf_copy(y, buf, dst); + } +} + +static void vdebayer_rggb_to_grayscale_awb(image_t *src, image_t *dst, image_t *buf, + uint32_t red_gain, uint32_t blue_gain) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = (src->h / VBAYER_Y_STRIDE) * VBAYER_Y_STRIDE; y < y_end; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y); + uint8_t *p0 = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(buf, (y % VBAYER_BUF_BROWS)); + uint8_t *p1 = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(buf, ((y + 1) % VBAYER_BUF_BROWS)); + + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, 0, offsets, vdebayer_load_pred(src, 0)); + v128_predicate_t pred = vdebayer_store_pred(src->w, 0); + + vrgb_pixels_t pixels0 = vdebayer_rggb(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, 0, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + // PIXFORMAT_BAYER_RGGB shifted down by 1 becomes PIXFORMAT_BAYER_GBRG + vrgb_pixels_t pixels1 = vdebayer_gbrg(rows.r1, rows.r2, rows.r3); + vrgb_pixels_store_grayscale(p1, 0, vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain), pred); + + int32_t x = VBAYER_X_STRIDE; + for (int32_t x_end = src->w - VBAYER_X_STRIDE; x <= x_end; x += VBAYER_X_STRIDE) { + v4x_rows_t rows; + rows.r0 = vldr_u32_gather_unaligned(rowptrs.p0.u8 + x, offsets); + rows.r1 = vldr_u32_gather_unaligned(rowptrs.p1.u8 + x, offsets); + rows.r2 = vldr_u32_gather_unaligned(rowptrs.p2.u8 + x, offsets); + rows.r3 = vldr_u32_gather_unaligned(rowptrs.p3.u8 + x, offsets); + + vrgb_pixels_t pixels0 = vdebayer_rggb(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + // PIXFORMAT_BAYER_RGGB shifted down by 1 becomes PIXFORMAT_BAYER_GBRG + vrgb_pixels_t pixels1 = vdebayer_gbrg(rows.r1, rows.r2, rows.r3); + vrgb_pixels_store_grayscale(p1, x, vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain), pred); + } + + if (x < src->w) { + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, x, offsets, vdebayer_load_pred(src, x)); + v128_predicate_t pred = vdebayer_store_pred(src->w, x); + + vrgb_pixels_t pixels0 = vdebayer_rggb(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + // PIXFORMAT_BAYER_RGGB shifted down by 1 becomes PIXFORMAT_BAYER_GBRG + vrgb_pixels_t pixels1 = vdebayer_gbrg(rows.r1, rows.r2, rows.r3); + vrgb_pixels_store_grayscale(p1, x, vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain), pred); + } + + vdebayer_grayscale_buf_copy(y, buf, dst); + } + + // Last odd row. + if (src->h % VBAYER_Y_STRIDE) { + int32_t y = src->h - 1; + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y); + uint8_t *p0 = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(buf, (y % VBAYER_BUF_BROWS)); + + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, 0, offsets, vdebayer_load_pred(src, 0)); + v128_predicate_t pred = vdebayer_store_pred(src->w, 0); + + vrgb_pixels_t pixels0 = vdebayer_rggb(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, 0, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + int32_t x = VBAYER_X_STRIDE; + for (int32_t x_end = src->w - VBAYER_X_STRIDE; x <= x_end; x += VBAYER_X_STRIDE) { + v4x_rows_t rows; + rows.r0 = vldr_u32_gather_unaligned(rowptrs.p0.u8 + x, offsets); + rows.r1 = vldr_u32_gather_unaligned(rowptrs.p1.u8 + x, offsets); + rows.r2 = vldr_u32_gather_unaligned(rowptrs.p2.u8 + x, offsets); + + vrgb_pixels_t pixels0 = vdebayer_rggb(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + } + + if (x < src->w) { + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, x, offsets, vdebayer_load_pred(src, x)); + v128_predicate_t pred = vdebayer_store_pred(src->w, x); + + vrgb_pixels_t pixels0 = vdebayer_rggb(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_grayscale(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + } + + vdebayer_grayscale_buf_copy(y, buf, dst); + } +} + +static void vdebayer_bggr_to_rgb565_awb(image_t *src, image_t *dst, image_t *buf, + uint32_t red_gain, uint32_t blue_gain) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = (src->h / VBAYER_Y_STRIDE) * VBAYER_Y_STRIDE; y < y_end; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y); + uint16_t *p0 = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(buf, (y % VBAYER_BUF_BROWS)); + uint16_t *p1 = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(buf, ((y + 1) % VBAYER_BUF_BROWS)); + + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, 0, offsets, vdebayer_load_pred(src, 0)); + v128_predicate_t pred = vdebayer_store_pred(src->w, 0); + + vrgb_pixels_t pixels0 = vdebayer_bggr(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_rgb565(p0, 0, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + // PIXFORMAT_BAYER_BGGR shifted down by 1 becomes PIXFORMAT_BAYER_GRBG + vrgb_pixels_t pixels1 = vdebayer_grbg(rows.r1, rows.r2, rows.r3); + vrgb_pixels_store_rgb565(p1, 0, vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain), pred); + + int32_t x = VBAYER_X_STRIDE; + for (int32_t x_end = src->w - VBAYER_X_STRIDE; x <= x_end; x += VBAYER_X_STRIDE) { + v4x_rows_t rows; + rows.r0 = vldr_u32_gather_unaligned(rowptrs.p0.u8 + x, offsets); + rows.r1 = vldr_u32_gather_unaligned(rowptrs.p1.u8 + x, offsets); + rows.r2 = vldr_u32_gather_unaligned(rowptrs.p2.u8 + x, offsets); + rows.r3 = vldr_u32_gather_unaligned(rowptrs.p3.u8 + x, offsets); + + vrgb_pixels_t pixels0 = vdebayer_bggr(rows.r0, rows.r1, rows.r2); + vstr_u16(p0 + x, vrgb_pixels_to_rgb565(vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain))); + + // PIXFORMAT_BAYER_BGGR shifted down by 1 becomes PIXFORMAT_BAYER_GRBG + vrgb_pixels_t pixels1 = vdebayer_grbg(rows.r1, rows.r2, rows.r3); + vstr_u16(p1 + x, vrgb_pixels_to_rgb565(vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain))); + } + + if (x < src->w) { + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, x, offsets, vdebayer_load_pred(src, x)); + v128_predicate_t pred = vdebayer_store_pred(src->w, x); + + vrgb_pixels_t pixels0 = vdebayer_bggr(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_rgb565(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + // PIXFORMAT_BAYER_BGGR shifted down by 1 becomes PIXFORMAT_BAYER_GRBG + vrgb_pixels_t pixels1 = vdebayer_grbg(rows.r1, rows.r2, rows.r3); + vrgb_pixels_store_rgb565(p1, x, vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain), pred); + } + + vdebayer_rgb565_buf_copy(y, buf, dst); + } + + // Last odd row. + if (src->h % VBAYER_Y_STRIDE) { + int32_t y = src->h - 1; + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y); + uint16_t *p0 = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(buf, (y % VBAYER_BUF_BROWS)); + + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, 0, offsets, vdebayer_load_pred(src, 0)); + v128_predicate_t pred = vdebayer_store_pred(src->w, 0); + + vrgb_pixels_t pixels0 = vdebayer_bggr(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_rgb565(p0, 0, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + int32_t x = VBAYER_X_STRIDE; + for (int32_t x_end = src->w - VBAYER_X_STRIDE; x <= x_end; x += VBAYER_X_STRIDE) { + v4x_rows_t rows; + rows.r0 = vldr_u32_gather_unaligned(rowptrs.p0.u8 + x, offsets); + rows.r1 = vldr_u32_gather_unaligned(rowptrs.p1.u8 + x, offsets); + rows.r2 = vldr_u32_gather_unaligned(rowptrs.p2.u8 + x, offsets); + + vrgb_pixels_t pixels0 = vdebayer_bggr(rows.r0, rows.r1, rows.r2); + vstr_u16(p0 + x, vrgb_pixels_to_rgb565(vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain))); + } + + if (x < src->w) { + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, x, offsets, vdebayer_load_pred(src, x)); + v128_predicate_t pred = vdebayer_store_pred(src->w, x); + + vrgb_pixels_t pixels0 = vdebayer_bggr(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_rgb565(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + } + + vdebayer_rgb565_buf_copy(y, buf, dst); + } +} + +static void vdebayer_gbrg_to_rgb565_awb(image_t *src, image_t *dst, image_t *buf, + uint32_t red_gain, uint32_t blue_gain) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = (src->h / VBAYER_Y_STRIDE) * VBAYER_Y_STRIDE; y < y_end; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y); + uint16_t *p0 = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(buf, (y % VBAYER_BUF_BROWS)); + uint16_t *p1 = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(buf, ((y + 1) % VBAYER_BUF_BROWS)); + + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, 0, offsets, vdebayer_load_pred(src, 0)); + v128_predicate_t pred = vdebayer_store_pred(src->w, 0); + + vrgb_pixels_t pixels0 = vdebayer_gbrg(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_rgb565(p0, 0, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + // PIXFORMAT_BAYER_GBRG shifted down by 1 becomes PIXFORMAT_BAYER_RGGB + vrgb_pixels_t pixels1 = vdebayer_rggb(rows.r1, rows.r2, rows.r3); + vrgb_pixels_store_rgb565(p1, 0, vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain), pred); + + int32_t x = VBAYER_X_STRIDE; + for (int32_t x_end = src->w - VBAYER_X_STRIDE; x <= x_end; x += VBAYER_X_STRIDE) { + v4x_rows_t rows; + rows.r0 = vldr_u32_gather_unaligned(rowptrs.p0.u8 + x, offsets); + rows.r1 = vldr_u32_gather_unaligned(rowptrs.p1.u8 + x, offsets); + rows.r2 = vldr_u32_gather_unaligned(rowptrs.p2.u8 + x, offsets); + rows.r3 = vldr_u32_gather_unaligned(rowptrs.p3.u8 + x, offsets); + + vrgb_pixels_t pixels0 = vdebayer_gbrg(rows.r0, rows.r1, rows.r2); + vstr_u16(p0 + x, vrgb_pixels_to_rgb565(vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain))); + + // PIXFORMAT_BAYER_GBRG shifted down by 1 becomes PIXFORMAT_BAYER_RGGB + vrgb_pixels_t pixels1 = vdebayer_rggb(rows.r1, rows.r2, rows.r3); + vstr_u16(p1 + x, vrgb_pixels_to_rgb565(vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain))); + } + + if (x < src->w) { + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, x, offsets, vdebayer_load_pred(src, x)); + v128_predicate_t pred = vdebayer_store_pred(src->w, x); + + vrgb_pixels_t pixels0 = vdebayer_gbrg(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_rgb565(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + // PIXFORMAT_BAYER_GBRG shifted down by 1 becomes PIXFORMAT_BAYER_RGGB + vrgb_pixels_t pixels1 = vdebayer_rggb(rows.r1, rows.r2, rows.r3); + vrgb_pixels_store_rgb565(p1, x, vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain), pred); + } + + vdebayer_rgb565_buf_copy(y, buf, dst); + } + + // Last odd row. + if (src->h % VBAYER_Y_STRIDE) { + int32_t y = src->h - 1; + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y); + uint16_t *p0 = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(buf, (y % VBAYER_BUF_BROWS)); + + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, 0, offsets, vdebayer_load_pred(src, 0)); + v128_predicate_t pred = vdebayer_store_pred(src->w, 0); + + vrgb_pixels_t pixels0 = vdebayer_gbrg(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_rgb565(p0, 0, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + int32_t x = VBAYER_X_STRIDE; + for (int32_t x_end = src->w - VBAYER_X_STRIDE; x <= x_end; x += VBAYER_X_STRIDE) { + v4x_rows_t rows; + rows.r0 = vldr_u32_gather_unaligned(rowptrs.p0.u8 + x, offsets); + rows.r1 = vldr_u32_gather_unaligned(rowptrs.p1.u8 + x, offsets); + rows.r2 = vldr_u32_gather_unaligned(rowptrs.p2.u8 + x, offsets); + + vrgb_pixels_t pixels0 = vdebayer_gbrg(rows.r0, rows.r1, rows.r2); + vstr_u16(p0 + x, vrgb_pixels_to_rgb565(vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain))); + } + + if (x < src->w) { + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, x, offsets, vdebayer_load_pred(src, x)); + v128_predicate_t pred = vdebayer_store_pred(src->w, x); + + vrgb_pixels_t pixels0 = vdebayer_gbrg(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_rgb565(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + } + + vdebayer_rgb565_buf_copy(y, buf, dst); + } +} + +static void vdebayer_grbg_to_rgb565_awb(image_t *src, image_t *dst, image_t *buf, + uint32_t red_gain, uint32_t blue_gain) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = (src->h / VBAYER_Y_STRIDE) * VBAYER_Y_STRIDE; y < y_end; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y); + uint16_t *p0 = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(buf, (y % VBAYER_BUF_BROWS)); + uint16_t *p1 = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(buf, ((y + 1) % VBAYER_BUF_BROWS)); + + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, 0, offsets, vdebayer_load_pred(src, 0)); + v128_predicate_t pred = vdebayer_store_pred(src->w, 0); + + vrgb_pixels_t pixels0 = vdebayer_grbg(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_rgb565(p0, 0, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + // PIXFORMAT_BAYER_GRBG shifted down by 1 becomes PIXFORMAT_BAYER_BGGR + vrgb_pixels_t pixels1 = vdebayer_bggr(rows.r1, rows.r2, rows.r3); + vrgb_pixels_store_rgb565(p1, 0, vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain), pred); + + int32_t x = VBAYER_X_STRIDE; + for (int32_t x_end = src->w - VBAYER_X_STRIDE; x <= x_end; x += VBAYER_X_STRIDE) { + v4x_rows_t rows; + rows.r0 = vldr_u32_gather_unaligned(rowptrs.p0.u8 + x, offsets); + rows.r1 = vldr_u32_gather_unaligned(rowptrs.p1.u8 + x, offsets); + rows.r2 = vldr_u32_gather_unaligned(rowptrs.p2.u8 + x, offsets); + rows.r3 = vldr_u32_gather_unaligned(rowptrs.p3.u8 + x, offsets); + + vrgb_pixels_t pixels0 = vdebayer_grbg(rows.r0, rows.r1, rows.r2); + vstr_u16(p0 + x, vrgb_pixels_to_rgb565(vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain))); + + // PIXFORMAT_BAYER_GRBG shifted down by 1 becomes PIXFORMAT_BAYER_BGGR + vrgb_pixels_t pixels1 = vdebayer_bggr(rows.r1, rows.r2, rows.r3); + vstr_u16(p1 + x, vrgb_pixels_to_rgb565(vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain))); + } + + if (x < src->w) { + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, x, offsets, vdebayer_load_pred(src, x)); + v128_predicate_t pred = vdebayer_store_pred(src->w, x); + + vrgb_pixels_t pixels0 = vdebayer_grbg(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_rgb565(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + // PIXFORMAT_BAYER_GRBG shifted down by 1 becomes PIXFORMAT_BAYER_BGGR + vrgb_pixels_t pixels1 = vdebayer_bggr(rows.r1, rows.r2, rows.r3); + vrgb_pixels_store_rgb565(p1, x, vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain), pred); + } + + vdebayer_rgb565_buf_copy(y, buf, dst); + } + + // Last odd row. + if (src->h % VBAYER_Y_STRIDE) { + int32_t y = src->h - 1; + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y); + uint16_t *p0 = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(buf, (y % VBAYER_BUF_BROWS)); + + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, 0, offsets, vdebayer_load_pred(src, 0)); + v128_predicate_t pred = vdebayer_store_pred(src->w, 0); + + vrgb_pixels_t pixels0 = vdebayer_grbg(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_rgb565(p0, 0, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + int32_t x = VBAYER_X_STRIDE; + for (int32_t x_end = src->w - VBAYER_X_STRIDE; x <= x_end; x += VBAYER_X_STRIDE) { + v4x_rows_t rows; + rows.r0 = vldr_u32_gather_unaligned(rowptrs.p0.u8 + x, offsets); + rows.r1 = vldr_u32_gather_unaligned(rowptrs.p1.u8 + x, offsets); + rows.r2 = vldr_u32_gather_unaligned(rowptrs.p2.u8 + x, offsets); + + vrgb_pixels_t pixels0 = vdebayer_grbg(rows.r0, rows.r1, rows.r2); + vstr_u16(p0 + x, vrgb_pixels_to_rgb565(vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain))); + } + + if (x < src->w) { + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, x, offsets, vdebayer_load_pred(src, x)); + v128_predicate_t pred = vdebayer_store_pred(src->w, x); + + vrgb_pixels_t pixels0 = vdebayer_grbg(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_rgb565(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + } + + vdebayer_rgb565_buf_copy(y, buf, dst); + } +} + +static void vdebayer_rggb_to_rgb565_awb(image_t *src, image_t *dst, image_t *buf, + uint32_t red_gain, uint32_t blue_gain) { + // Load pixels, but, each set of 4 pixels overlaps the previous by 2 pixels. + v128_t offsets = vidup_u32_unaligned(0, 2); + + for (int32_t y = 0, y_end = (src->h / VBAYER_Y_STRIDE) * VBAYER_Y_STRIDE; y < y_end; y += VBAYER_Y_STRIDE) { + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y); + uint16_t *p0 = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(buf, (y % VBAYER_BUF_BROWS)); + uint16_t *p1 = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(buf, ((y + 1) % VBAYER_BUF_BROWS)); + + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, 0, offsets, vdebayer_load_pred(src, 0)); + v128_predicate_t pred = vdebayer_store_pred(src->w, 0); + + vrgb_pixels_t pixels0 = vdebayer_rggb(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_rgb565(p0, 0, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + // PIXFORMAT_BAYER_RGGB shifted down by 1 becomes PIXFORMAT_BAYER_GBRG + vrgb_pixels_t pixels1 = vdebayer_gbrg(rows.r1, rows.r2, rows.r3); + vrgb_pixels_store_rgb565(p1, 0, vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain), pred); + + int32_t x = VBAYER_X_STRIDE; + for (int32_t x_end = src->w - VBAYER_X_STRIDE; x <= x_end; x += VBAYER_X_STRIDE) { + v4x_rows_t rows; + rows.r0 = vldr_u32_gather_unaligned(rowptrs.p0.u8 + x, offsets); + rows.r1 = vldr_u32_gather_unaligned(rowptrs.p1.u8 + x, offsets); + rows.r2 = vldr_u32_gather_unaligned(rowptrs.p2.u8 + x, offsets); + rows.r3 = vldr_u32_gather_unaligned(rowptrs.p3.u8 + x, offsets); + + vrgb_pixels_t pixels0 = vdebayer_rggb(rows.r0, rows.r1, rows.r2); + vstr_u16(p0 + x, vrgb_pixels_to_rgb565(vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain))); + + // PIXFORMAT_BAYER_RGGB shifted down by 1 becomes PIXFORMAT_BAYER_GBRG + vrgb_pixels_t pixels1 = vdebayer_gbrg(rows.r1, rows.r2, rows.r3); + vstr_u16(p1 + x, vrgb_pixels_to_rgb565(vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain))); + } + + if (x < src->w) { + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, x, offsets, vdebayer_load_pred(src, x)); + v128_predicate_t pred = vdebayer_store_pred(src->w, x); + + vrgb_pixels_t pixels0 = vdebayer_rggb(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_rgb565(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + // PIXFORMAT_BAYER_RGGB shifted down by 1 becomes PIXFORMAT_BAYER_GBRG + vrgb_pixels_t pixels1 = vdebayer_gbrg(rows.r1, rows.r2, rows.r3); + vrgb_pixels_store_rgb565(p1, x, vdebayer_apply_rb_gain(pixels1, red_gain, blue_gain), pred); + } + + vdebayer_rgb565_buf_copy(y, buf, dst); + } + + // Last odd row. + if (src->h % VBAYER_Y_STRIDE) { + int32_t y = src->h - 1; + v4x_row_ptrs_t rowptrs = vdebayer_rowptrs_init(src, y); + uint16_t *p0 = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(buf, (y % VBAYER_BUF_BROWS)); + + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, 0, offsets, vdebayer_load_pred(src, 0)); + v128_predicate_t pred = vdebayer_store_pred(src->w, 0); + + vrgb_pixels_t pixels0 = vdebayer_rggb(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_rgb565(p0, 0, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + + int32_t x = VBAYER_X_STRIDE; + for (int32_t x_end = src->w - VBAYER_X_STRIDE; x <= x_end; x += VBAYER_X_STRIDE) { + v4x_rows_t rows; + rows.r0 = vldr_u32_gather_unaligned(rowptrs.p0.u8 + x, offsets); + rows.r1 = vldr_u32_gather_unaligned(rowptrs.p1.u8 + x, offsets); + rows.r2 = vldr_u32_gather_unaligned(rowptrs.p2.u8 + x, offsets); + + vrgb_pixels_t pixels0 = vdebayer_rggb(rows.r0, rows.r1, rows.r2); + vstr_u16(p0 + x, vrgb_pixels_to_rgb565(vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain))); + } + + if (x < src->w) { + v4x_rows_t rows = vdebayer_load_rows_inner(rowptrs, x, offsets, vdebayer_load_pred(src, x)); + v128_predicate_t pred = vdebayer_store_pred(src->w, x); + + vrgb_pixels_t pixels0 = vdebayer_rggb(rows.r0, rows.r1, rows.r2); + vrgb_pixels_store_rgb565(p0, x, vdebayer_apply_rb_gain(pixels0, red_gain, blue_gain), pred); + } + + vdebayer_rgb565_buf_copy(y, buf, dst); + } +} + +static void vdebayer_bggr_to_grayscale_awb_quarter(image_t *src, image_t *dst, uint32_t red_gain, uint32_t blue_gain) { + for (int32_t y = 0; y < src->h; y += VBAYER_Y_STRIDE) { + v2x_row_ptrs_t rowptrs = vdebayer_quarter_rowptrs_init(src, y); + uint8_t *p = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(dst, (y / 2)); + + for (int32_t x = 0; x < src->w; x += VBAYER_X_STRIDE_2X2) { + int32_t len = src->w - x; + v2x_rows_t bg, gr; + + if (len >= VBAYER_X_STRIDE_2X2) { + bg = vld2_u8(rowptrs.p0.u8 + x); + gr = vld2_u8(rowptrs.p1.u8 + x); + } else { + bg = vld2_u8_len(rowptrs.p0.u8 + x, len); + gr = vld2_u8_len(rowptrs.p1.u8 + x, len); + } + + vrgb_pixels_t packed = { + .r = gr.r1, + .g = vhadd_u8(gr.r0, bg.r1), + .b = bg.r0 + }; + + vdebayer_store_packed_grayscale(p + (x / 2), packed, red_gain, blue_gain, dst->w - (x / 2)); + } + } +} + +static void vdebayer_gbrg_to_grayscale_awb_quarter(image_t *src, image_t *dst, uint32_t red_gain, uint32_t blue_gain) { + for (int32_t y = 0; y < src->h; y += VBAYER_Y_STRIDE) { + v2x_row_ptrs_t rowptrs = vdebayer_quarter_rowptrs_init(src, y); + uint8_t *p = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(dst, (y / 2)); + + for (int32_t x = 0; x < src->w; x += VBAYER_X_STRIDE_2X2) { + int32_t len = src->w - x; + v2x_rows_t gb, rg; + + if (len >= VBAYER_X_STRIDE_2X2) { + gb = vld2_u8(rowptrs.p0.u8 + x); + rg = vld2_u8(rowptrs.p1.u8 + x); + } else { + gb = vld2_u8_len(rowptrs.p0.u8 + x, len); + rg = vld2_u8_len(rowptrs.p1.u8 + x, len); + } + + vrgb_pixels_t packed = { + .r = rg.r0, + .g = vhadd_u8(gb.r0, rg.r1), + .b = gb.r1 + }; + + vdebayer_store_packed_grayscale(p + (x / 2), packed, red_gain, blue_gain, dst->w - (x / 2)); + } + } +} + +static void vdebayer_grbg_to_grayscale_awb_quarter(image_t *src, image_t *dst, uint32_t red_gain, uint32_t blue_gain) { + for (int32_t y = 0; y < src->h; y += VBAYER_Y_STRIDE) { + v2x_row_ptrs_t rowptrs = vdebayer_quarter_rowptrs_init(src, y); + uint8_t *p = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(dst, (y / 2)); + + for (int32_t x = 0; x < src->w; x += VBAYER_X_STRIDE_2X2) { + int32_t len = src->w - x; + v2x_rows_t gr, bg; + + if (len >= VBAYER_X_STRIDE_2X2) { + gr = vld2_u8(rowptrs.p0.u8 + x); + bg = vld2_u8(rowptrs.p1.u8 + x); + } else { + gr = vld2_u8_len(rowptrs.p0.u8 + x, len); + bg = vld2_u8_len(rowptrs.p1.u8 + x, len); + } + + vrgb_pixels_t packed = { + .r = gr.r1, + .g = vhadd_u8(gr.r0, bg.r1), + .b = bg.r0 + }; + + vdebayer_store_packed_grayscale(p + (x / 2), packed, red_gain, blue_gain, dst->w - (x / 2)); + } + } +} + +static void vdebayer_rggb_to_grayscale_awb_quarter(image_t *src, image_t *dst, uint32_t red_gain, uint32_t blue_gain) { + for (int32_t y = 0; y < src->h; y += VBAYER_Y_STRIDE) { + v2x_row_ptrs_t rowptrs = vdebayer_quarter_rowptrs_init(src, y); + uint8_t *p = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(dst, (y / 2)); + + for (int32_t x = 0; x < src->w; x += VBAYER_X_STRIDE_2X2) { + int32_t len = src->w - x; + v2x_rows_t rg, gb; + + if (len >= VBAYER_X_STRIDE_2X2) { + rg = vld2_u8(rowptrs.p0.u8 + x); + gb = vld2_u8(rowptrs.p1.u8 + x); + } else { + rg = vld2_u8_len(rowptrs.p0.u8 + x, len); + gb = vld2_u8_len(rowptrs.p1.u8 + x, len); + } + + vrgb_pixels_t packed = { + .r = rg.r0, + .g = vhadd_u8(gb.r0, rg.r1), + .b = gb.r1 + }; + + vdebayer_store_packed_grayscale(p + (x / 2), packed, red_gain, blue_gain, dst->w - (x / 2)); + } + } +} + +static void vdebayer_bggr_to_rgb565_awb_quarter(image_t *src, image_t *dst, uint32_t red_gain, uint32_t blue_gain) { + for (int32_t y = 0; y < src->h; y += VBAYER_Y_STRIDE) { + v2x_row_ptrs_t rowptrs = vdebayer_quarter_rowptrs_init(src, y); + uint16_t *p = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(dst, (y / 2)); + + for (int32_t x = 0; x < src->w; x += VBAYER_X_STRIDE_2X2) { + int32_t len = src->w - x; + v2x_rows_t bg, gr; + + if (len >= VBAYER_X_STRIDE_2X2) { + bg = vld2_u8(rowptrs.p0.u8 + x); + gr = vld2_u8(rowptrs.p1.u8 + x); + } else { + bg = vld2_u8_len(rowptrs.p0.u8 + x, len); + gr = vld2_u8_len(rowptrs.p1.u8 + x, len); + } + + vrgb_pixels_t packed = { + .r = gr.r1, + .g = vhadd_u8(gr.r0, bg.r1), + .b = bg.r0 + }; + + vdebayer_store_packed_rgb565(p + (x / 2), packed, red_gain, blue_gain, dst->w - (x / 2)); + } + } +} + +static void vdebayer_gbrg_to_rgb565_awb_quarter(image_t *src, image_t *dst, uint32_t red_gain, uint32_t blue_gain) { + for (int32_t y = 0; y < src->h; y += VBAYER_Y_STRIDE) { + v2x_row_ptrs_t rowptrs = vdebayer_quarter_rowptrs_init(src, y); + uint16_t *p = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(dst, (y / 2)); + + for (int32_t x = 0; x < src->w; x += VBAYER_X_STRIDE_2X2) { + int32_t len = src->w - x; + v2x_rows_t gb, rg; + + if (len >= VBAYER_X_STRIDE_2X2) { + gb = vld2_u8(rowptrs.p0.u8 + x); + rg = vld2_u8(rowptrs.p1.u8 + x); + } else { + gb = vld2_u8_len(rowptrs.p0.u8 + x, len); + rg = vld2_u8_len(rowptrs.p1.u8 + x, len); + } + + vrgb_pixels_t packed = { + .r = rg.r0, + .g = vhadd_u8(gb.r0, rg.r1), + .b = gb.r1 + }; + + vdebayer_store_packed_rgb565(p + (x / 2), packed, red_gain, blue_gain, dst->w - (x / 2)); + } + } +} + +static void vdebayer_grbg_to_rgb565_awb_quarter(image_t *src, image_t *dst, uint32_t red_gain, uint32_t blue_gain) { + for (int32_t y = 0; y < src->h; y += VBAYER_Y_STRIDE) { + v2x_row_ptrs_t rowptrs = vdebayer_quarter_rowptrs_init(src, y); + uint16_t *p = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(dst, (y / 2)); + + for (int32_t x = 0; x < src->w; x += VBAYER_X_STRIDE_2X2) { + int32_t len = src->w - x; + v2x_rows_t gr, bg; + + if (len >= VBAYER_X_STRIDE_2X2) { + gr = vld2_u8(rowptrs.p0.u8 + x); + bg = vld2_u8(rowptrs.p1.u8 + x); + } else { + gr = vld2_u8_len(rowptrs.p0.u8 + x, len); + bg = vld2_u8_len(rowptrs.p1.u8 + x, len); + } + + vrgb_pixels_t packed = { + .r = gr.r1, + .g = vhadd_u8(gr.r0, bg.r1), + .b = bg.r0 + }; + + vdebayer_store_packed_rgb565(p + (x / 2), packed, red_gain, blue_gain, dst->w - (x / 2)); + } + } +} + +static void vdebayer_rggb_to_rgb565_awb_quarter(image_t *src, image_t *dst, uint32_t red_gain, uint32_t blue_gain) { + for (int32_t y = 0; y < src->h; y += VBAYER_Y_STRIDE) { + v2x_row_ptrs_t rowptrs = vdebayer_quarter_rowptrs_init(src, y); + uint16_t *p = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(dst, (y / 2)); + + for (int32_t x = 0; x < src->w; x += VBAYER_X_STRIDE_2X2) { + int32_t len = src->w - x; + v2x_rows_t rg, gb; + + if (len >= VBAYER_X_STRIDE_2X2) { + rg = vld2_u8(rowptrs.p0.u8 + x); + gb = vld2_u8(rowptrs.p1.u8 + x); + } else { + rg = vld2_u8_len(rowptrs.p0.u8 + x, len); + gb = vld2_u8_len(rowptrs.p1.u8 + x, len); + } + + vrgb_pixels_t packed = { + .r = rg.r0, + .g = vhadd_u8(gb.r0, rg.r1), + .b = gb.r1 + }; + + vdebayer_store_packed_rgb565(p + (x / 2), packed, red_gain, blue_gain, dst->w - (x / 2)); + } + } +} + +// assumes dst->w == src->w (or dst->w == src->w / 2) +// assumes dst->h == src->h (or dst->h == src->h / 2) +// src and dst may overlap, but, slower than imlib_debayer_image +// BINARY: Not supported +// GRAYSCALE: src->data == dst->data +// RGB565: src->data == dst->data + image_size(src) +// YUV422: Not supported +void imlib_debayer_image_awb(image_t *dst, image_t *src, bool fast, uint32_t r_out, uint32_t g_out, uint32_t b_out) { + OMV_PROFILE_START(); + + uint32_t red_gain = IM_DIV(g_out * 32, r_out); + red_gain = IM_MIN(red_gain, 128U); + + uint32_t blue_gain = IM_DIV(g_out * 32, b_out); + blue_gain = IM_MIN(blue_gain, 128U); + + if (fast) { + switch (src->pixfmt) { + case PIXFORMAT_BAYER_BGGR: { + switch (dst->pixfmt) { + case PIXFORMAT_GRAYSCALE: { + vdebayer_bggr_to_grayscale_awb_quarter(src, dst, red_gain, blue_gain); + break; + } + case PIXFORMAT_RGB565: { + vdebayer_bggr_to_rgb565_awb_quarter(src, dst, red_gain, blue_gain); + break; + } + default: { + __builtin_unreachable(); + } + } + break; + } + case PIXFORMAT_BAYER_GBRG: { + switch (dst->pixfmt) { + case PIXFORMAT_GRAYSCALE: { + vdebayer_gbrg_to_grayscale_awb_quarter(src, dst, red_gain, blue_gain); + break; + } + case PIXFORMAT_RGB565: { + vdebayer_gbrg_to_rgb565_awb_quarter(src, dst, red_gain, blue_gain); + break; + } + default: { + __builtin_unreachable(); + } + } + break; + } + case PIXFORMAT_BAYER_GRBG: { + switch (dst->pixfmt) { + case PIXFORMAT_GRAYSCALE: { + vdebayer_grbg_to_grayscale_awb_quarter(src, dst, red_gain, blue_gain); + break; + } + case PIXFORMAT_RGB565: { + vdebayer_grbg_to_rgb565_awb_quarter(src, dst, red_gain, blue_gain); + break; + } + default: { + __builtin_unreachable(); + } + } + break; + } + case PIXFORMAT_BAYER_RGGB: { + switch (dst->pixfmt) { + case PIXFORMAT_GRAYSCALE: { + vdebayer_rggb_to_grayscale_awb_quarter(src, dst, red_gain, blue_gain); + break; + } + case PIXFORMAT_RGB565: { + vdebayer_rggb_to_rgb565_awb_quarter(src, dst, red_gain, blue_gain); + break; + } + default: { + __builtin_unreachable(); + } + } + break; + } + default: { + __builtin_unreachable(); + } + } + } else { + switch (dst->pixfmt) { + case PIXFORMAT_GRAYSCALE: { + image_t buf = { + .w = dst->w, + .h = VBAYER_BUF_BROWS, + .pixfmt = PIXFORMAT_GRAYSCALE, + .data = fb_alloc(dst->w * VBAYER_BUF_BROWS * sizeof(uint8_t), FB_ALLOC_PREFER_SPEED), + }; + + switch (src->pixfmt) { + case PIXFORMAT_BAYER_BGGR: { + vdebayer_bggr_to_grayscale_awb(src, dst, &buf, red_gain, blue_gain); + break; + } + case PIXFORMAT_BAYER_GBRG: { + vdebayer_gbrg_to_grayscale_awb(src, dst, &buf, red_gain, blue_gain); + break; + } + case PIXFORMAT_BAYER_GRBG: { + vdebayer_grbg_to_grayscale_awb(src, dst, &buf, red_gain, blue_gain); + break; + } + case PIXFORMAT_BAYER_RGGB: { + vdebayer_rggb_to_grayscale_awb(src, dst, &buf, red_gain, blue_gain); + break; + } + default: { + __builtin_unreachable(); + } + } + + // Copy any remaining lines from the buffer image... + for (int32_t y = IM_MAX(dst->h - VBAYER_BUF_KSIZE, 0); y < dst->h; y++) { + vmemcpy_8(IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(dst, y), + IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(&buf, (y % VBAYER_BUF_BROWS)), + IMAGE_GRAYSCALE_LINE_LEN_BYTES(dst)); + } + + fb_free(); // buf.data + break; + } + case PIXFORMAT_RGB565: { + image_t buf = { + .w = dst->w, + .h = VBAYER_BUF_BROWS, + .pixfmt = PIXFORMAT_RGB565, + .data = fb_alloc(dst->w * VBAYER_BUF_BROWS * sizeof(uint16_t), FB_ALLOC_PREFER_SPEED), + }; + + switch (src->pixfmt) { + case PIXFORMAT_BAYER_BGGR: { + vdebayer_bggr_to_rgb565_awb(src, dst, &buf, red_gain, blue_gain); + break; + } + case PIXFORMAT_BAYER_GBRG: { + vdebayer_gbrg_to_rgb565_awb(src, dst, &buf, red_gain, blue_gain); + break; + } + case PIXFORMAT_BAYER_GRBG: { + vdebayer_grbg_to_rgb565_awb(src, dst, &buf, red_gain, blue_gain); + break; + } + case PIXFORMAT_BAYER_RGGB: { + vdebayer_rggb_to_rgb565_awb(src, dst, &buf, red_gain, blue_gain); + break; + } + default: { + __builtin_unreachable(); + } + } + + // Copy any remaining lines from the buffer image... + for (int32_t y = IM_MAX(dst->h - VBAYER_BUF_KSIZE, 0); y < dst->h; y++) { + vmemcpy_16(IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(dst, y), + IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(&buf, (y % VBAYER_BUF_BROWS)), + IMAGE_RGB565_LINE_LEN_BYTES(dst)); + } + + fb_free(); // buf.data + break; + } + default: { + __builtin_unreachable(); + } + } + } + + OMV_PROFILE_PRINT(); +} diff --git a/src/omv/imlib/imlib.h b/src/omv/imlib/imlib.h index ea9fe26ad..06bea41f1 100644 --- a/src/omv/imlib/imlib.h +++ b/src/omv/imlib/imlib.h @@ -1165,8 +1165,10 @@ void imlib_fill_image_from_float(image_t *img, int w, int h, float *data, float // Bayer Image Processing pixformat_t imlib_bayer_shift(pixformat_t pixfmt, int x, int y, bool transpose); +void imlib_debayer_ycbcr(image_t *src, rectangle_t *roi, int8_t *Y0, int8_t *CB, int8_t *CR); void imlib_debayer_line(int x_start, int x_end, int y_row, void *dst_row_ptr, pixformat_t pixfmt, image_t *src); void imlib_debayer_image(image_t *dst, image_t *src); +void imlib_debayer_image_awb(image_t *dst, image_t *src, bool fast, uint32_t r_out, uint32_t g_out, uint32_t b_out); // YUV Image Processing pixformat_t imlib_yuv_shift(pixformat_t pixfmt, int x); diff --git a/src/omv/imlib/jpege.c b/src/omv/imlib/jpege.c index 11ae10a73..564c9be01 100644 --- a/src/omv/imlib/jpege.c +++ b/src/omv/imlib/jpege.c @@ -295,665 +295,14 @@ void jpeg_get_mcu(image_t *src, int x_offset, int y_offset, int dx, int dy, memset(CR, 0, JPEG_444_GS_MCU_SIZE); } - int src_w = src->w, w_limit = src_w - 1, w_limit_m_1 = w_limit - 1; - int src_h = src->h, h_limit = src_h - 1, h_limit_m_1 = h_limit - 1; - - if (x_offset && y_offset && (x_offset < (src_w - JPEG_MCU_W)) && (y_offset < (src_h - JPEG_MCU_H))) { - for (int y = y_offset - 1, yy = y + JPEG_MCU_H - 1, index_e = 0, index_o = JPEG_MCU_W; y < yy; y += 2, - index_e += JPEG_MCU_W, - index_o += JPEG_MCU_W) { - uint8_t *rowptr_grgr_0 = src->data + (y * src_w); - uint8_t *rowptr_bgbg_1 = rowptr_grgr_0 + src_w; - uint8_t *rowptr_grgr_2 = rowptr_bgbg_1 + src_w; - uint8_t *rowptr_bgbg_3 = rowptr_grgr_2 + src_w; - - for (int x = x_offset - 1, xx = x + JPEG_MCU_W - 1; x < xx; x += 2, index_e += 2, index_o += 2) { - uint32_t row_grgr_0 = *((uint32_t *) (rowptr_grgr_0 + x)); - uint32_t row_bgbg_1 = *((uint32_t *) (rowptr_bgbg_1 + x)); - uint32_t row_grgr_2 = *((uint32_t *) (rowptr_grgr_2 + x)); - uint32_t row_bgbg_3 = *((uint32_t *) (rowptr_bgbg_3 + x)); - - int r_pixels_0, g_pixels_0, b_pixels_0; - - switch (src->pixfmt) { - case PIXFORMAT_BAYER_BGGR: { - #if defined(ARM_MATH_DSP) - int row_02 = __UHADD8(row_grgr_0, row_grgr_2); - int row_1g = __UHADD8(row_bgbg_1, __PKHTB(row_bgbg_1, row_bgbg_1, 16)); - - r_pixels_0 = __UXTB16(__UHADD8(row_02, __PKHTB(row_02, row_02, 16))); - g_pixels_0 = __UXTB16(__UHADD8(row_1g, __PKHTB(row_1g, row_02, 8))); - b_pixels_0 = __UXTB16_RORn(__UHADD8(row_bgbg_1, __PKHBT(row_bgbg_1, row_bgbg_1, 16)), 8); - #else - - int r0 = ((row_grgr_0 & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - int r2 = (((row_grgr_0 >> 16) & 0xFF) + ((row_grgr_2 >> 16) & 0xFF)) >> 1; - r_pixels_0 = (r2 << 16) | ((r0 + r2) >> 1); - - int g0 = (row_grgr_0 >> 8) & 0xFF; - int g1 = (((row_bgbg_1 >> 16) & 0xFF) + (row_bgbg_1 & 0xFF)) >> 1; - int g2 = (row_grgr_2 >> 8) & 0xFF; - g_pixels_0 = (row_bgbg_1 & 0xFF0000) | ((((g0 + g2) >> 1) + g1) >> 1); - - int b1 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_1 >> 8) & 0xFF)) >> 1; - b_pixels_0 = (b1 << 16) | ((row_bgbg_1 >> 8) & 0xFF); - - #endif - break; - } - case PIXFORMAT_BAYER_GBRG: { - #if defined(ARM_MATH_DSP) - int row_02 = __UHADD8(row_grgr_0, row_grgr_2); - int row_1g = __UHADD8(row_bgbg_1, __PKHBT(row_bgbg_1, row_bgbg_1, 16)); - - r_pixels_0 = __UXTB16_RORn(__UHADD8(row_02, __PKHBT(row_02, row_02, 16)), 8); - g_pixels_0 = __UXTB16_RORn(__UHADD8(row_1g, __PKHBT(row_1g, row_02, 8)), 8); - b_pixels_0 = __UXTB16(__UHADD8(row_bgbg_1, __PKHTB(row_bgbg_1, row_bgbg_1, 16))); - #else - - int r0 = (((row_grgr_0 >> 8) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - int r2 = (((row_grgr_0 >> 24) & 0xFF) + ((row_grgr_2 >> 24) & 0xFF)) >> 1; - r_pixels_0 = r0 | (((r0 + r2) >> 1) << 16); - - int g0 = (row_grgr_0 >> 16) & 0xFF; - int g1 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_1 >> 8) & 0xFF)) >> 1; - int g2 = (row_grgr_2 >> 16) & 0xFF; - g_pixels_0 = ((row_bgbg_1 >> 8) & 0xFF) | (((((g0 + g2) >> 1) + g1) >> 1) << 16); - - int b1 = (((row_bgbg_1 >> 16) & 0xFF) + (row_bgbg_1 & 0xFF)) >> 1; - b_pixels_0 = b1 | (row_bgbg_1 & 0xFF0000); - - #endif - break; - } - case PIXFORMAT_BAYER_GRBG: { - #if defined(ARM_MATH_DSP) - int row_02 = __UHADD8(row_grgr_0, row_grgr_2); - int row_1g = __UHADD8(row_bgbg_1, __PKHBT(row_bgbg_1, row_bgbg_1, 16)); - - r_pixels_0 = __UXTB16(__UHADD8(row_bgbg_1, __PKHTB(row_bgbg_1, row_bgbg_1, 16))); - g_pixels_0 = __UXTB16_RORn(__UHADD8(row_1g, __PKHBT(row_1g, row_02, 8)), 8); - b_pixels_0 = __UXTB16_RORn(__UHADD8(row_02, __PKHBT(row_02, row_02, 16)), 8); - #else - - int r1 = (((row_bgbg_1 >> 16) & 0xFF) + (row_bgbg_1 & 0xFF)) >> 1; - r_pixels_0 = r1 | (row_bgbg_1 & 0xFF0000); - - int g0 = (row_grgr_0 >> 16) & 0xFF; - int g1 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_1 >> 8) & 0xFF)) >> 1; - int g2 = (row_grgr_2 >> 16) & 0xFF; - g_pixels_0 = ((row_bgbg_1 >> 8) & 0xFF) | (((((g0 + g2) >> 1) + g1) >> 1) << 16); - - int b0 = (((row_grgr_0 >> 8) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - int b2 = (((row_grgr_0 >> 24) & 0xFF) + ((row_grgr_2 >> 24) & 0xFF)) >> 1; - b_pixels_0 = b0 | (((b0 + b2) >> 1) << 16); - - #endif - break; - } - case PIXFORMAT_BAYER_RGGB: { - #if defined(ARM_MATH_DSP) - int row_02 = __UHADD8(row_grgr_0, row_grgr_2); - int row_1g = __UHADD8(row_bgbg_1, __PKHTB(row_bgbg_1, row_bgbg_1, 16)); - - r_pixels_0 = __UXTB16_RORn(__UHADD8(row_bgbg_1, __PKHBT(row_bgbg_1, row_bgbg_1, 16)), 8); - g_pixels_0 = __UXTB16(__UHADD8(row_1g, __PKHTB(row_1g, row_02, 8))); - b_pixels_0 = __UXTB16(__UHADD8(row_02, __PKHTB(row_02, row_02, 16))); - #else - - int r1 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_1 >> 8) & 0xFF)) >> 1; - r_pixels_0 = (r1 << 16) | ((row_bgbg_1 >> 8) & 0xFF); - - int g0 = (row_grgr_0 >> 8) & 0xFF; - int g1 = (((row_bgbg_1 >> 16) & 0xFF) + (row_bgbg_1 & 0xFF)) >> 1; - int g2 = (row_grgr_2 >> 8) & 0xFF; - g_pixels_0 = (row_bgbg_1 & 0xFF0000) | ((((g0 + g2) >> 1) + g1) >> 1); - - int b0 = ((row_grgr_0 & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - int b2 = (((row_grgr_0 >> 16) & 0xFF) + ((row_grgr_2 >> 16) & 0xFF)) >> 1; - b_pixels_0 = (b2 << 16) | ((b0 + b2) >> 1); - - #endif - break; - } - default: { - r_pixels_0 = 0; - g_pixels_0 = 0; - b_pixels_0 = 0; - break; - } - } - - int y0 = ((r_pixels_0 * 38) + (g_pixels_0 * 75) + (b_pixels_0 * 15)) >> 7; - - #if (OMV_JPEG_CODEC_ENABLE == 0) - y0 ^= 0x800080; - #endif - - Y0[index_e] = y0, Y0[index_e + 1] = y0 >> 16; - - int u0 = __SSUB16(b_pixels_0 * 64, (r_pixels_0 * 21) + (g_pixels_0 * 43)) >> 7; - - #if (OMV_JPEG_CODEC_ENABLE == 1) - u0 ^= 0x800080; - #endif - - CB[index_e] = u0, CB[index_e + 1] = u0 >> 16; - - int v0 = __SSUB16(r_pixels_0 * 64, (g_pixels_0 * 54) + (b_pixels_0 * 10)) >> 7; - - #if (OMV_JPEG_CODEC_ENABLE == 1) - v0 ^= 0x800080; - #endif - - CR[index_e] = v0, CR[index_e + 1] = v0 >> 16; - - int r_pixels_1, g_pixels_1, b_pixels_1; - - switch (src->pixfmt) { - case PIXFORMAT_BAYER_BGGR: { - #if defined(ARM_MATH_DSP) - int row_13 = __UHADD8(row_bgbg_1, row_bgbg_3); - int row_2g = __UHADD8(row_grgr_2, __PKHBT(row_grgr_2, row_grgr_2, 16)); - - r_pixels_1 = __UXTB16(__UHADD8(row_grgr_2, __PKHTB(row_grgr_2, row_grgr_2, 16))); - g_pixels_1 = __UXTB16_RORn(__UHADD8(row_2g, __PKHBT(row_2g, row_13, 8)), 8); - b_pixels_1 = __UXTB16_RORn(__UHADD8(row_13, __PKHBT(row_13, row_13, 16)), 8); - #else - - int r2 = (((row_grgr_2 >> 16) & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - r_pixels_1 = (row_grgr_2 & 0xFF0000) | r2; - - int g1 = (row_bgbg_1 >> 16) & 0xFF; - int g2 = (((row_grgr_2 >> 24) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - int g3 = (row_bgbg_3 >> 16) & 0xFF; - g_pixels_1 = (((((g1 + g3) >> 1) + g2) >> 1) << 16) | ((row_grgr_2 >> 8) & 0xFF); - - int b1 = (((row_bgbg_1 >> 8) & 0xFF) + ((row_bgbg_3 >> 8) & 0xFF)) >> 1; - int b3 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_3 >> 24) & 0xFF)) >> 1; - b_pixels_1 = (((b1 + b3) >> 1) << 16) | b1; - - #endif - break; - } - case PIXFORMAT_BAYER_GBRG: { - #if defined(ARM_MATH_DSP) - int row_13 = __UHADD8(row_bgbg_1, row_bgbg_3); - int row_2g = __UHADD8(row_grgr_2, __PKHTB(row_grgr_2, row_grgr_2, 16)); - - r_pixels_1 = __UXTB16_RORn(__UHADD8(row_grgr_2, __PKHBT(row_grgr_2, row_grgr_2, 16)), 8); - g_pixels_1 = __UXTB16(__UHADD8(row_2g, __PKHTB(row_2g, row_13, 8))); - b_pixels_1 = __UXTB16(__UHADD8(row_13, __PKHTB(row_13, row_13, 16))); - #else - - int r2 = (((row_grgr_2 >> 24) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - r_pixels_1 = ((row_grgr_2 >> 8) & 0xFF) | (r2 << 16); - - int g1 = (row_bgbg_1 >> 8) & 0xFF; - int g2 = (((row_grgr_2 >> 16) & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - int g3 = (row_bgbg_3 >> 8) & 0xFF; - g_pixels_1 = ((((g1 + g3) >> 1) + g2) >> 1) | (row_grgr_2 & 0xFF0000); - - int b1 = ((row_bgbg_1 & 0xFF) + (row_bgbg_3 & 0xFF)) >> 1; - int b3 = (((row_bgbg_1 >> 16) & 0xFF) + ((row_bgbg_3 >> 16) & 0xFF)) >> 1; - b_pixels_1 = ((b1 + b3) >> 1) | (b3 << 16); - - #endif - break; - } - case PIXFORMAT_BAYER_GRBG: { - #if defined(ARM_MATH_DSP) - int row_13 = __UHADD8(row_bgbg_1, row_bgbg_3); - int row_2g = __UHADD8(row_grgr_2, __PKHTB(row_grgr_2, row_grgr_2, 16)); - - r_pixels_1 = __UXTB16(__UHADD8(row_13, __PKHTB(row_13, row_13, 16))); - g_pixels_1 = __UXTB16(__UHADD8(row_2g, __PKHTB(row_2g, row_13, 8))); - b_pixels_1 = __UXTB16_RORn(__UHADD8(row_grgr_2, __PKHBT(row_grgr_2, row_grgr_2, 16)), 8); - #else - - int r1 = ((row_bgbg_1 & 0xFF) + (row_bgbg_3 & 0xFF)) >> 1; - int r3 = (((row_bgbg_1 >> 16) & 0xFF) + ((row_bgbg_3 >> 16) & 0xFF)) >> 1; - r_pixels_1 = ((r1 + r3) >> 1) | (r3 << 16); - - int g1 = (row_bgbg_1 >> 8) & 0xFF; - int g2 = (((row_grgr_2 >> 16) & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - int g3 = (row_bgbg_3 >> 8) & 0xFF; - g_pixels_1 = ((((g1 + g3) >> 1) + g2) >> 1) | (row_grgr_2 & 0xFF0000); - - int b2 = (((row_grgr_2 >> 24) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - b_pixels_1 = ((row_grgr_2 >> 8) & 0xFF) | (b2 << 16); - - #endif - break; - } - case PIXFORMAT_BAYER_RGGB: { - #if defined(ARM_MATH_DSP) - int row_13 = __UHADD8(row_bgbg_1, row_bgbg_3); - int row_2g = __UHADD8(row_grgr_2, __PKHBT(row_grgr_2, row_grgr_2, 16)); - - r_pixels_1 = __UXTB16_RORn(__UHADD8(row_13, __PKHBT(row_13, row_13, 16)), 8); - g_pixels_1 = __UXTB16_RORn(__UHADD8(row_2g, __PKHBT(row_2g, row_13, 8)), 8); - b_pixels_1 = __UXTB16(__UHADD8(row_grgr_2, __PKHTB(row_grgr_2, row_grgr_2, 16))); - #else - - int r1 = (((row_bgbg_1 >> 8) & 0xFF) + ((row_bgbg_3 >> 8) & 0xFF)) >> 1; - int r3 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_3 >> 24) & 0xFF)) >> 1; - r_pixels_1 = (((r1 + r3) >> 1) << 16) | r1; - - int g1 = (row_bgbg_1 >> 16) & 0xFF; - int g2 = (((row_grgr_2 >> 24) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - int g3 = (row_bgbg_3 >> 16) & 0xFF; - g_pixels_1 = (((((g1 + g3) >> 1) + g2) >> 1) << 16) | ((row_grgr_2 >> 8) & 0xFF); - - int b2 = (((row_grgr_2 >> 16) & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - b_pixels_1 = (row_grgr_2 & 0xFF0000) | b2; - - #endif - break; - } - default: { - r_pixels_1 = 0; - g_pixels_1 = 0; - b_pixels_1 = 0; - break; - } - } - - int y1 = ((r_pixels_1 * 38) + (g_pixels_1 * 75) + (b_pixels_1 * 15)) >> 7; - - #if (OMV_JPEG_CODEC_ENABLE == 0) - y1 ^= 0x800080; - #endif - - Y0[index_o] = y1, Y0[index_o + 1] = y1 >> 16; - - int u1 = __SSUB16(b_pixels_1 * 64, (r_pixels_1 * 21) + (g_pixels_1 * 43)) >> 7; - - #if (OMV_JPEG_CODEC_ENABLE == 1) - u1 ^= 0x800080; - #endif - - CB[index_o] = u1, CB[index_o + 1] = u1 >> 16; - - int v1 = __SSUB16(r_pixels_1 * 64, (g_pixels_1 * 54) + (b_pixels_1 * 10)) >> 7; - - #if (OMV_JPEG_CODEC_ENABLE == 1) - v1 ^= 0x800080; - #endif - - CR[index_o] = v1, CR[index_o + 1] = v1 >> 16; - } - } - } else { - // If dy is odd this loop will produce 1 extra boundary row in the MCU. - // This is okay given the boundary checking code below. - for (int y = y_offset, yy = y + dy, index_e = 0, index_o = JPEG_MCU_W; y < yy; y += 2) { - uint8_t *rowptr_grgr_0, *rowptr_bgbg_1, *rowptr_grgr_2, *rowptr_bgbg_3; - - // keep row pointers in bounds - if (y == 0) { - rowptr_bgbg_1 = src->data; - rowptr_grgr_2 = rowptr_bgbg_1 + ((src_h >= 2) ? src_w : 0); - rowptr_bgbg_3 = rowptr_bgbg_1 + ((src_h >= 3) ? (src_w * 2) : 0); - rowptr_grgr_0 = rowptr_grgr_2; - } else if (y == h_limit_m_1) { - rowptr_grgr_0 = src->data + ((y - 1) * src_w); - rowptr_bgbg_1 = rowptr_grgr_0 + src_w; - rowptr_grgr_2 = rowptr_bgbg_1 + src_w; - rowptr_bgbg_3 = rowptr_bgbg_1; - } else if (y >= h_limit) { - rowptr_grgr_0 = src->data + ((y - 1) * src_w); - rowptr_bgbg_1 = rowptr_grgr_0 + src_w; - rowptr_grgr_2 = rowptr_grgr_0; - rowptr_bgbg_3 = rowptr_bgbg_1; - } else { - // get 4 neighboring rows - rowptr_grgr_0 = src->data + ((y - 1) * src_w); - rowptr_bgbg_1 = rowptr_grgr_0 + src_w; - rowptr_grgr_2 = rowptr_bgbg_1 + src_w; - rowptr_bgbg_3 = rowptr_grgr_2 + src_w; - } - - // If dx is odd this loop will produce 1 extra boundary column in the MCU. - // This is okay given the boundary checking code below. - for (int x = x_offset, xx = x + dx; x < xx; x += 2, index_e += 2, index_o += 2) { - uint32_t row_grgr_0, row_bgbg_1, row_grgr_2, row_bgbg_3; - - // keep pixels in bounds - if (x == 0) { - if (src_w >= 4) { - row_grgr_0 = *((uint32_t *) rowptr_grgr_0); - row_bgbg_1 = *((uint32_t *) rowptr_bgbg_1); - row_grgr_2 = *((uint32_t *) rowptr_grgr_2); - row_bgbg_3 = *((uint32_t *) rowptr_bgbg_3); - } else if (src_w >= 3) { - row_grgr_0 = *((uint16_t *) rowptr_grgr_0) | (*(rowptr_grgr_0 + 2) << 16); - row_bgbg_1 = *((uint16_t *) rowptr_bgbg_1) | (*(rowptr_bgbg_1 + 2) << 16); - row_grgr_2 = *((uint16_t *) rowptr_grgr_2) | (*(rowptr_grgr_2 + 2) << 16); - row_bgbg_3 = *((uint16_t *) rowptr_bgbg_3) | (*(rowptr_bgbg_3 + 2) << 16); - } else if (src_w >= 2) { - row_grgr_0 = *((uint16_t *) rowptr_grgr_0); - row_grgr_0 = (row_grgr_0 << 16) | row_grgr_0; - row_bgbg_1 = *((uint16_t *) rowptr_bgbg_1); - row_bgbg_1 = (row_bgbg_1 << 16) | row_bgbg_1; - row_grgr_2 = *((uint16_t *) rowptr_grgr_2); - row_grgr_2 = (row_grgr_2 << 16) | row_grgr_2; - row_bgbg_3 = *((uint16_t *) rowptr_bgbg_3); - row_bgbg_3 = (row_bgbg_3 << 16) | row_bgbg_3; - } else { - row_grgr_0 = *(rowptr_grgr_0) * 0x01010101; - row_bgbg_1 = *(rowptr_bgbg_1) * 0x01010101; - row_grgr_2 = *(rowptr_grgr_2) * 0x01010101; - row_bgbg_3 = *(rowptr_bgbg_3) * 0x01010101; - } - // The starting point needs to be offset by 1. The below patterns are actually - // rgrg, gbgb, rgrg, and gbgb. So, shift left and backfill the missing border pixel. - row_grgr_0 = (row_grgr_0 << 8) | __UXTB_RORn(row_grgr_0, 8); - row_bgbg_1 = (row_bgbg_1 << 8) | __UXTB_RORn(row_bgbg_1, 8); - row_grgr_2 = (row_grgr_2 << 8) | __UXTB_RORn(row_grgr_2, 8); - row_bgbg_3 = (row_bgbg_3 << 8) | __UXTB_RORn(row_bgbg_3, 8); - } else if (x == w_limit_m_1) { - row_grgr_0 = *((uint32_t *) (rowptr_grgr_0 + x - 2)); - row_grgr_0 = (row_grgr_0 >> 8) | ((row_grgr_0 << 8) & 0xff000000); - row_bgbg_1 = *((uint32_t *) (rowptr_bgbg_1 + x - 2)); - row_bgbg_1 = (row_bgbg_1 >> 8) | ((row_bgbg_1 << 8) & 0xff000000); - row_grgr_2 = *((uint32_t *) (rowptr_grgr_2 + x - 2)); - row_grgr_2 = (row_grgr_2 >> 8) | ((row_grgr_2 << 8) & 0xff000000); - row_bgbg_3 = *((uint32_t *) (rowptr_bgbg_3 + x - 2)); - row_bgbg_3 = (row_bgbg_3 >> 8) | ((row_bgbg_1 << 8) & 0xff000000); - } else if (x >= w_limit) { - row_grgr_0 = *((uint16_t *) (rowptr_grgr_0 + x - 1)); - row_grgr_0 = (row_grgr_0 << 16) | row_grgr_0; - row_bgbg_1 = *((uint16_t *) (rowptr_bgbg_1 + x - 1)); - row_bgbg_1 = (row_bgbg_1 << 16) | row_bgbg_1; - row_grgr_2 = *((uint16_t *) (rowptr_grgr_2 + x - 1)); - row_grgr_2 = (row_grgr_2 << 16) | row_grgr_2; - row_bgbg_3 = *((uint16_t *) (rowptr_bgbg_3 + x - 1)); - row_bgbg_3 = (row_bgbg_3 << 16) | row_bgbg_3; - } else { - // get 4 neighboring rows - row_grgr_0 = *((uint32_t *) (rowptr_grgr_0 + x - 1)); - row_bgbg_1 = *((uint32_t *) (rowptr_bgbg_1 + x - 1)); - row_grgr_2 = *((uint32_t *) (rowptr_grgr_2 + x - 1)); - row_bgbg_3 = *((uint32_t *) (rowptr_bgbg_3 + x - 1)); - } - - int r_pixels_0, g_pixels_0, b_pixels_0; - - switch (src->pixfmt) { - case PIXFORMAT_BAYER_BGGR: { - #if defined(ARM_MATH_DSP) - int row_02 = __UHADD8(row_grgr_0, row_grgr_2); - int row_1g = __UHADD8(row_bgbg_1, __PKHTB(row_bgbg_1, row_bgbg_1, 16)); - - r_pixels_0 = __UXTB16(__UHADD8(row_02, __PKHTB(row_02, row_02, 16))); - g_pixels_0 = __UXTB16(__UHADD8(row_1g, __PKHTB(row_1g, row_02, 8))); - b_pixels_0 = __UXTB16_RORn(__UHADD8(row_bgbg_1, __PKHBT(row_bgbg_1, row_bgbg_1, 16)), 8); - #else - - int r0 = ((row_grgr_0 & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - int r2 = (((row_grgr_0 >> 16) & 0xFF) + ((row_grgr_2 >> 16) & 0xFF)) >> 1; - r_pixels_0 = (r2 << 16) | ((r0 + r2) >> 1); - - int g0 = (row_grgr_0 >> 8) & 0xFF; - int g1 = (((row_bgbg_1 >> 16) & 0xFF) + (row_bgbg_1 & 0xFF)) >> 1; - int g2 = (row_grgr_2 >> 8) & 0xFF; - g_pixels_0 = (row_bgbg_1 & 0xFF0000) | ((((g0 + g2) >> 1) + g1) >> 1); - - int b1 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_1 >> 8) & 0xFF)) >> 1; - b_pixels_0 = (b1 << 16) | ((row_bgbg_1 >> 8) & 0xFF); - - #endif - break; - } - case PIXFORMAT_BAYER_GBRG: { - #if defined(ARM_MATH_DSP) - int row_02 = __UHADD8(row_grgr_0, row_grgr_2); - int row_1g = __UHADD8(row_bgbg_1, __PKHBT(row_bgbg_1, row_bgbg_1, 16)); - - r_pixels_0 = __UXTB16_RORn(__UHADD8(row_02, __PKHBT(row_02, row_02, 16)), 8); - g_pixels_0 = __UXTB16_RORn(__UHADD8(row_1g, __PKHBT(row_1g, row_02, 8)), 8); - b_pixels_0 = __UXTB16(__UHADD8(row_bgbg_1, __PKHTB(row_bgbg_1, row_bgbg_1, 16))); - #else - - int r0 = (((row_grgr_0 >> 8) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - int r2 = (((row_grgr_0 >> 24) & 0xFF) + ((row_grgr_2 >> 24) & 0xFF)) >> 1; - r_pixels_0 = r0 | (((r0 + r2) >> 1) << 16); - - int g0 = (row_grgr_0 >> 16) & 0xFF; - int g1 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_1 >> 8) & 0xFF)) >> 1; - int g2 = (row_grgr_2 >> 16) & 0xFF; - g_pixels_0 = ((row_bgbg_1 >> 8) & 0xFF) | (((((g0 + g2) >> 1) + g1) >> 1) << 16); - - int b1 = (((row_bgbg_1 >> 16) & 0xFF) + (row_bgbg_1 & 0xFF)) >> 1; - b_pixels_0 = b1 | (row_bgbg_1 & 0xFF0000); - - #endif - break; - } - case PIXFORMAT_BAYER_GRBG: { - #if defined(ARM_MATH_DSP) - int row_02 = __UHADD8(row_grgr_0, row_grgr_2); - int row_1g = __UHADD8(row_bgbg_1, __PKHBT(row_bgbg_1, row_bgbg_1, 16)); - - r_pixels_0 = __UXTB16(__UHADD8(row_bgbg_1, __PKHTB(row_bgbg_1, row_bgbg_1, 16))); - g_pixels_0 = __UXTB16_RORn(__UHADD8(row_1g, __PKHBT(row_1g, row_02, 8)), 8); - b_pixels_0 = __UXTB16_RORn(__UHADD8(row_02, __PKHBT(row_02, row_02, 16)), 8); - #else - - int r1 = (((row_bgbg_1 >> 16) & 0xFF) + (row_bgbg_1 & 0xFF)) >> 1; - r_pixels_0 = r1 | (row_bgbg_1 & 0xFF0000); - - int g0 = (row_grgr_0 >> 16) & 0xFF; - int g1 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_1 >> 8) & 0xFF)) >> 1; - int g2 = (row_grgr_2 >> 16) & 0xFF; - g_pixels_0 = ((row_bgbg_1 >> 8) & 0xFF) | (((((g0 + g2) >> 1) + g1) >> 1) << 16); - - int b0 = (((row_grgr_0 >> 8) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - int b2 = (((row_grgr_0 >> 24) & 0xFF) + ((row_grgr_2 >> 24) & 0xFF)) >> 1; - b_pixels_0 = b0 | (((b0 + b2) >> 1) << 16); - - #endif - break; - } - case PIXFORMAT_BAYER_RGGB: { - #if defined(ARM_MATH_DSP) - int row_02 = __UHADD8(row_grgr_0, row_grgr_2); - int row_1g = __UHADD8(row_bgbg_1, __PKHTB(row_bgbg_1, row_bgbg_1, 16)); - - r_pixels_0 = __UXTB16_RORn(__UHADD8(row_bgbg_1, __PKHBT(row_bgbg_1, row_bgbg_1, 16)), 8); - g_pixels_0 = __UXTB16(__UHADD8(row_1g, __PKHTB(row_1g, row_02, 8))); - b_pixels_0 = __UXTB16(__UHADD8(row_02, __PKHTB(row_02, row_02, 16))); - #else - - int r1 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_1 >> 8) & 0xFF)) >> 1; - r_pixels_0 = (r1 << 16) | ((row_bgbg_1 >> 8) & 0xFF); - - int g0 = (row_grgr_0 >> 8) & 0xFF; - int g1 = (((row_bgbg_1 >> 16) & 0xFF) + (row_bgbg_1 & 0xFF)) >> 1; - int g2 = (row_grgr_2 >> 8) & 0xFF; - g_pixels_0 = (row_bgbg_1 & 0xFF0000) | ((((g0 + g2) >> 1) + g1) >> 1); - - int b0 = ((row_grgr_0 & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - int b2 = (((row_grgr_0 >> 16) & 0xFF) + ((row_grgr_2 >> 16) & 0xFF)) >> 1; - b_pixels_0 = (b2 << 16) | ((b0 + b2) >> 1); - - #endif - break; - } - default: { - r_pixels_0 = 0; - g_pixels_0 = 0; - b_pixels_0 = 0; - break; - } - } - - int y0 = ((r_pixels_0 * 38) + (g_pixels_0 * 75) + (b_pixels_0 * 15)) >> 7; - - #if (OMV_JPEG_CODEC_ENABLE == 0) - y0 ^= 0x800080; - #endif - - Y0[index_e] = y0, Y0[index_e + 1] = y0 >> 16; - - int u0 = __SSUB16(b_pixels_0 * 64, (r_pixels_0 * 21) + (g_pixels_0 * 43)) >> 7; - - #if (OMV_JPEG_CODEC_ENABLE == 1) - u0 ^= 0x800080; - #endif - - CB[index_e] = u0, CB[index_e + 1] = u0 >> 16; - - int v0 = __SSUB16(r_pixels_0 * 64, (g_pixels_0 * 54) + (b_pixels_0 * 10)) >> 7; - - #if (OMV_JPEG_CODEC_ENABLE == 1) - v0 ^= 0x800080; - #endif - - CR[index_e] = v0, CR[index_e + 1] = v0 >> 16; - - int r_pixels_1, g_pixels_1, b_pixels_1; - - switch (src->pixfmt) { - case PIXFORMAT_BAYER_BGGR: { - #if defined(ARM_MATH_DSP) - int row_13 = __UHADD8(row_bgbg_1, row_bgbg_3); - int row_2g = __UHADD8(row_grgr_2, __PKHBT(row_grgr_2, row_grgr_2, 16)); - - r_pixels_1 = __UXTB16(__UHADD8(row_grgr_2, __PKHTB(row_grgr_2, row_grgr_2, 16))); - g_pixels_1 = __UXTB16_RORn(__UHADD8(row_2g, __PKHBT(row_2g, row_13, 8)), 8); - b_pixels_1 = __UXTB16_RORn(__UHADD8(row_13, __PKHBT(row_13, row_13, 16)), 8); - #else - - int r2 = (((row_grgr_2 >> 16) & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - r_pixels_1 = (row_grgr_2 & 0xFF0000) | r2; - - int g1 = (row_bgbg_1 >> 16) & 0xFF; - int g2 = (((row_grgr_2 >> 24) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - int g3 = (row_bgbg_3 >> 16) & 0xFF; - g_pixels_1 = (((((g1 + g3) >> 1) + g2) >> 1) << 16) | ((row_grgr_2 >> 8) & 0xFF); - - int b1 = (((row_bgbg_1 >> 8) & 0xFF) + ((row_bgbg_3 >> 8) & 0xFF)) >> 1; - int b3 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_3 >> 24) & 0xFF)) >> 1; - b_pixels_1 = (((b1 + b3) >> 1) << 16) | b1; - - #endif - break; - } - case PIXFORMAT_BAYER_GBRG: { - #if defined(ARM_MATH_DSP) - int row_13 = __UHADD8(row_bgbg_1, row_bgbg_3); - int row_2g = __UHADD8(row_grgr_2, __PKHTB(row_grgr_2, row_grgr_2, 16)); - - r_pixels_1 = __UXTB16_RORn(__UHADD8(row_grgr_2, __PKHBT(row_grgr_2, row_grgr_2, 16)), 8); - g_pixels_1 = __UXTB16(__UHADD8(row_2g, __PKHTB(row_2g, row_13, 8))); - b_pixels_1 = __UXTB16(__UHADD8(row_13, __PKHTB(row_13, row_13, 16))); - #else - - int r2 = (((row_grgr_2 >> 24) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - r_pixels_1 = ((row_grgr_2 >> 8) & 0xFF) | (r2 << 16); - - int g1 = (row_bgbg_1 >> 8) & 0xFF; - int g2 = (((row_grgr_2 >> 16) & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - int g3 = (row_bgbg_3 >> 8) & 0xFF; - g_pixels_1 = ((((g1 + g3) >> 1) + g2) >> 1) | (row_grgr_2 & 0xFF0000); - - int b1 = ((row_bgbg_1 & 0xFF) + (row_bgbg_3 & 0xFF)) >> 1; - int b3 = (((row_bgbg_1 >> 16) & 0xFF) + ((row_bgbg_3 >> 16) & 0xFF)) >> 1; - b_pixels_1 = ((b1 + b3) >> 1) | (b3 << 16); - - #endif - break; - } - case PIXFORMAT_BAYER_GRBG: { - #if defined(ARM_MATH_DSP) - int row_13 = __UHADD8(row_bgbg_1, row_bgbg_3); - int row_2g = __UHADD8(row_grgr_2, __PKHTB(row_grgr_2, row_grgr_2, 16)); - - r_pixels_1 = __UXTB16(__UHADD8(row_13, __PKHTB(row_13, row_13, 16))); - g_pixels_1 = __UXTB16(__UHADD8(row_2g, __PKHTB(row_2g, row_13, 8))); - b_pixels_1 = __UXTB16_RORn(__UHADD8(row_grgr_2, __PKHBT(row_grgr_2, row_grgr_2, 16)), 8); - #else - - int r1 = ((row_bgbg_1 & 0xFF) + (row_bgbg_3 & 0xFF)) >> 1; - int r3 = (((row_bgbg_1 >> 16) & 0xFF) + ((row_bgbg_3 >> 16) & 0xFF)) >> 1; - r_pixels_1 = ((r1 + r3) >> 1) | (r3 << 16); - - int g1 = (row_bgbg_1 >> 8) & 0xFF; - int g2 = (((row_grgr_2 >> 16) & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - int g3 = (row_bgbg_3 >> 8) & 0xFF; - g_pixels_1 = ((((g1 + g3) >> 1) + g2) >> 1) | (row_grgr_2 & 0xFF0000); - - int b2 = (((row_grgr_2 >> 24) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - b_pixels_1 = ((row_grgr_2 >> 8) & 0xFF) | (b2 << 16); - - #endif - break; - } - case PIXFORMAT_BAYER_RGGB: { - #if defined(ARM_MATH_DSP) - int row_13 = __UHADD8(row_bgbg_1, row_bgbg_3); - int row_2g = __UHADD8(row_grgr_2, __PKHBT(row_grgr_2, row_grgr_2, 16)); - - r_pixels_1 = __UXTB16_RORn(__UHADD8(row_13, __PKHBT(row_13, row_13, 16)), 8); - g_pixels_1 = __UXTB16_RORn(__UHADD8(row_2g, __PKHBT(row_2g, row_13, 8)), 8); - b_pixels_1 = __UXTB16(__UHADD8(row_grgr_2, __PKHTB(row_grgr_2, row_grgr_2, 16))); - #else - - int r1 = (((row_bgbg_1 >> 8) & 0xFF) + ((row_bgbg_3 >> 8) & 0xFF)) >> 1; - int r3 = (((row_bgbg_1 >> 24) & 0xFF) + ((row_bgbg_3 >> 24) & 0xFF)) >> 1; - r_pixels_1 = (((r1 + r3) >> 1) << 16) | r1; - - int g1 = (row_bgbg_1 >> 16) & 0xFF; - int g2 = (((row_grgr_2 >> 24) & 0xFF) + ((row_grgr_2 >> 8) & 0xFF)) >> 1; - int g3 = (row_bgbg_3 >> 16) & 0xFF; - g_pixels_1 = (((((g1 + g3) >> 1) + g2) >> 1) << 16) | ((row_grgr_2 >> 8) & 0xFF); - - int b2 = (((row_grgr_2 >> 16) & 0xFF) + (row_grgr_2 & 0xFF)) >> 1; - b_pixels_1 = (row_grgr_2 & 0xFF0000) | b2; - - #endif - break; - } - default: { - r_pixels_1 = 0; - g_pixels_1 = 0; - b_pixels_1 = 0; - break; - } - } - - int y1 = ((r_pixels_1 * 38) + (g_pixels_1 * 75) + (b_pixels_1 * 15)) >> 7; - - #if (OMV_JPEG_CODEC_ENABLE == 0) - y1 ^= 0x800080; - #endif - - Y0[index_o] = y1, Y0[index_o + 1] = y1 >> 16; - - int u1 = __SSUB16(b_pixels_1 * 64, (r_pixels_1 * 21) + (g_pixels_1 * 43)) >> 7; - - #if (OMV_JPEG_CODEC_ENABLE == 1) - u1 ^= 0x800080; - #endif - - CB[index_o] = u1, CB[index_o + 1] = u1 >> 16; - - int v1 = __SSUB16(r_pixels_1 * 64, (g_pixels_1 * 54) + (b_pixels_1 * 10)) >> 7; - - #if (OMV_JPEG_CODEC_ENABLE == 1) - v1 ^= 0x800080; - #endif - - CR[index_o] = v1, CR[index_o + 1] = v1 >> 16; - } - - int inc = (JPEG_MCU_W * 2) - (((dx + 1) / 2) * 2); // Handle boundary column. - index_e += inc; - index_o += inc; - } - } + rectangle_t roi = { + .x = x_offset, + .y = y_offset, + .w = dx, + .h = dy + }; + + imlib_debayer_ycbcr(src, &roi, Y0, CB, CR); break; } } diff --git a/src/omv/imlib/simd.h b/src/omv/imlib/simd.h new file mode 100644 index 000000000..67b600b38 --- /dev/null +++ b/src/omv/imlib/simd.h @@ -0,0 +1,1781 @@ +/* + * This file is part of the OpenMV project. + * + * Copyright (c) 2013-2024 Ibrahim Abdelkader + * Copyright (c) 2013-2024 Kwabena W. Agyeman + * + * This work is licensed under the MIT license, see the file LICENSE for details. + * + * SIMD abstraction. + */ +#include +#include + +#if (__ARM_ARCH >= 8) +#define VECTOR_SIZE_BYTES 16 +#else +#define VECTOR_SIZE_BYTES 4 +#endif + +#define INT8_VECTOR_SIZE (VECTOR_SIZE_BYTES / 1U) +#define UINT8_VECTOR_SIZE (VECTOR_SIZE_BYTES / 1U) + +#define INT16_VECTOR_SIZE (VECTOR_SIZE_BYTES / 2U) +#define UINT16_VECTOR_SIZE (VECTOR_SIZE_BYTES / 2U) + +#define INT32_VECTOR_SIZE (VECTOR_SIZE_BYTES / 4U) +#define UINT32_VECTOR_SIZE (VECTOR_SIZE_BYTES / 4U) + +#if (VECTOR_SIZE_BYTES >= 8) +#define INT64_VECTOR_SIZE (VECTOR_SIZE_BYTES / 8U) +#define UINT64_VECTOR_SIZE (VECTOR_SIZE_BYTES / 8U) +#endif + +#if (__ARM_ARCH >= 8) +typedef int8x16_t v128_s8_t; +typedef uint8x16_t v128_u8_t; + +typedef int16x8_t v128_s16_t; +typedef uint16x8_t v128_u16_t; + +typedef int32x4_t v128_s32_t; +typedef uint32x4_t v128_u32_t; + +#if (VECTOR_SIZE_BYTES >= 8) +typedef int64x2_t v128_s64_t; +typedef uint64x2_t v128_u64_t; +#endif + +typedef mve_pred16_t v128_predicate_t; +#else +typedef int8_t v128_s8_t __attribute__ ((vector_size(VECTOR_SIZE_BYTES))); +typedef uint8_t v128_u8_t __attribute__ ((vector_size(VECTOR_SIZE_BYTES))); + +typedef int16_t v128_s16_t __attribute__ ((vector_size(VECTOR_SIZE_BYTES))); +typedef uint16_t v128_u16_t __attribute__ ((vector_size(VECTOR_SIZE_BYTES))); + +typedef int32_t v128_s32_t __attribute__ ((vector_size(VECTOR_SIZE_BYTES))); +typedef uint32_t v128_u32_t __attribute__ ((vector_size(VECTOR_SIZE_BYTES))); + +#if (VECTOR_SIZE_BYTES >= 8) +typedef int64_t v128_s64_t __attribute__ ((vector_size(VECTOR_SIZE_BYTES))); +typedef uint64_t v128_u64_t __attribute__ ((vector_size(VECTOR_SIZE_BYTES))); +#endif + +typedef uint32_t v128_predicate_t; +#endif + +typedef union { + v128_s8_t s8; + v128_u8_t u8; + v128_s16_t s16; + v128_u16_t u16; + v128_s32_t s32; + v128_u32_t u32; + #if (VECTOR_SIZE_BYTES >= 8) + v128_s64_t s64; + v128_u64_t u64; + #endif +} v128_t; + +// These structures are meant to be returned via inline functions so that the compiler can optimize +// them across function boundaries. DO NOT return these via reference as the compiler will NOT +// treat them as local variables anymore and will NOT optimize them across function boundaries. +// Note: Values in the structures are named on-purpose force constant indexing as they are meant to +// be optimized away by the compiler. + +typedef union vrow_ptr { + uint8_t *u8; + int8_t *s8; + uint16_t *u16; + int16_t *s16; + uint32_t *u32; + int32_t *s32; + #if (VECTOR_SIZE_BYTES >= 8) + uint64_t *u64; + int64_t *s64; + #endif +} vrow_ptr_t; + +typedef struct v2x_row_ptrs { + vrow_ptr_t p0, p1; +} v2x_row_ptrs_t; + +typedef struct v2x_rows { + v128_t r0, r1; +} v2x_rows_t; + +typedef struct v3x_row_ptrs { + vrow_ptr_t p0, p1, p2; +} v3x_row_ptrs_t; + +typedef struct v3x_rows { + v128_t r0, r1, r2; +} v3x_rows_t; + +typedef struct v4x_row_ptrs { + vrow_ptr_t p0, p1, p2, p3; +} v4x_row_ptrs_t; + +typedef struct v4x_rows { + v128_t r0, r1, r2, r3; +} v4x_rows_t; + +typedef struct vrgb_pixels { + v128_t r, g, b; +} vrgb_pixels_t; + +static inline v128_predicate_t vpredicate_8(uint32_t n) { + #if (__ARM_ARCH >= 8) + return vctp8q(n); + #else + return IM_MIN(n, UINT8_VECTOR_SIZE); + #endif +} + +static inline v128_predicate_t vpredicate_16(uint32_t n) { + #if (__ARM_ARCH >= 8) + return vctp16q(n); + #else + return IM_MIN(n, UINT16_VECTOR_SIZE); + #endif +} + +static inline v128_predicate_t vpredicate_32(uint32_t n) { + #if (__ARM_ARCH >= 8) + return vctp32q(n); + #else + return IM_MIN(n, UINT32_VECTOR_SIZE); + #endif +} + +#if (VECTOR_SIZE_BYTES >= 8) +static inline v128_predicate_t vpredicate_64(uint32_t n) { + #if (__ARM_ARCH >= 8) + return vctp64q(n); + #else + return IM_MIN(n, UINT64_VECTOR_SIZE); + #endif +} +#endif + +static inline uint32_t vpredicate_8_get_mask(v128_predicate_t pred) { + #if (__ARM_ARCH >= 8) + return pred; + #else + return (1 << pred) - 1; + #endif +} + +static inline uint32_t vpredicate_16_get_mask(v128_predicate_t pred) { + #if (__ARM_ARCH >= 8) + return pred; + #else + return (1 << (pred * 2)) - 1; + #endif +} + +static inline uint32_t vpredicate_32_get_mask(v128_predicate_t pred) { + #if (__ARM_ARCH >= 8) + return pred; + #else + return (1 << (pred * 4)) - 1; + #endif +} + +#if (VECTOR_SIZE_BYTES >= 8) +static inline uint32_t vpredicate_64_get_mask(v128_predicate_t pred) { + #if (__ARM_ARCH >= 8) + return pred; + #else + return (1 << (pred * 8)) - 1; + #endif +} +#endif + +static inline uint32_t vpredicate_8_get_n(v128_predicate_t pred) { + #if (__ARM_ARCH >= 8) + return 32 - __CLZ(pred); + #else + return pred; + #endif +} + +static inline uint32_t vpredicate_16_get_n(v128_predicate_t pred) { + #if (__ARM_ARCH >= 8) + return (32 - __CLZ(pred)) / 2; + #else + return pred; + #endif +} + +static inline uint32_t vpredicate_32_get_n(v128_predicate_t pred) { + #if (__ARM_ARCH >= 8) + return (32 - __CLZ(pred)) / 4; + #else + return pred; + #endif +} + +static inline uint32_t vpredicate_64_get_n(v128_predicate_t pred) { + #if (__ARM_ARCH >= 8) + return (32 - __CLZ(pred)) / 8; + #else + return pred; + #endif +} + +static inline bool vpredicate_8_all_lanes_active(v128_predicate_t pred) { + #if (__ARM_ARCH >= 8) + return pred == ((1 << VECTOR_SIZE_BYTES) - 1); + #else + return pred == (VECTOR_SIZE_BYTES / 1U); + #endif +} + +static inline bool vpredicate_16_all_lanes_active(v128_predicate_t pred) { + #if (__ARM_ARCH >= 8) + return pred == ((1 << VECTOR_SIZE_BYTES) - 1); + #else + return pred == (VECTOR_SIZE_BYTES / 2U); + #endif +} + +static inline bool vpredicate_32_all_lanes_active(v128_predicate_t pred) { + #if (__ARM_ARCH >= 8) + return pred == ((1 << VECTOR_SIZE_BYTES) - 1); + #else + return pred == (VECTOR_SIZE_BYTES / 4U); + #endif +} + +static inline bool vpredicate_64_all_lanes_active(v128_predicate_t pred) { + #if (__ARM_ARCH >= 8) + return pred == ((1 << VECTOR_SIZE_BYTES) - 1); + #else + return pred == (VECTOR_SIZE_BYTES / 8U); + #endif +} + +static inline v128_predicate_t vpredicate_8_add(v128_predicate_t pred, uint32_t x) { + #if (__ARM_ARCH >= 8) + return (pred << x) | ((1 << x) - 1); + #else + return pred + x; + #endif +} + +static inline v128_predicate_t vpredicate_16_add(v128_predicate_t pred, uint32_t x) { + #if (__ARM_ARCH >= 8) + return (pred << (x * 2)) | ((1 << (x * 2)) - 1); + #else + return pred + x; + #endif +} + +static inline v128_predicate_t vpredicate_32_add(v128_predicate_t pred, uint32_t x) { + #if (__ARM_ARCH >= 8) + return (pred << (x * 4)) | ((1 << (x * 4)) - 1); + #else + return pred + x; + #endif +} + +static inline v128_predicate_t vpredicate_64_add(v128_predicate_t pred, uint32_t x) { + #if (__ARM_ARCH >= 8) + return (pred << (x * 8)) | ((1 << (x * 8)) - 1); + #else + return pred + x; + #endif +} + +static inline v128_t vhadd_u8(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vhaddq(v0.u8, v1.u8); + #elif (__ARM_ARCH >= 7) + return (v128_t) { + .u32 = { __UHADD8(v0.u32[0], v1.u32[0]) } + }; + #else + return (v128_t) { + .u8 = (v0.u8 + v1.u8) >> 1 + }; + #endif +} + +static inline v128_t vhadd_s8(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vhaddq(v0.s8, v1.s8); + #elif (__ARM_ARCH >= 7) + return (v128_t) { + .s32 = { __SHADD8(v0.s32[0], v1.s32[0]) } + }; + #else + return (v128_t) { + .s8 = (v0.s8 + v1.s8) >> 1 + }; + #endif +} + +static inline v128_t vhadd_u16(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vhaddq(v0.u16, v1.u16); + #elif (__ARM_ARCH >= 7) + return (v128_t) { + .u32 = { __UHADD16(v0.u32[0], v1.u32[0]) } + }; + #else + return (v128_t) { + .u16 = (v0.u16 + v1.u16) >> 1 + }; + #endif +} + +static inline v128_t vhadd_s16(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vhaddq(v0.s16, v1.s16); + #elif (__ARM_ARCH >= 7) + return (v128_t) { + .s32 = { __SHADD16(v0.s32[0], v1.s32[0]) } + }; + #else + return (v128_t) { + .s16 = (v0.s16 + v1.s16) >> 1 + }; + #endif +} + +static inline v128_t vuxtb16(v128_t v0) { + #if (__ARM_ARCH >= 8) + return (v128_t) vmovlbq(v0.u8); + #elif (__ARM_ARCH >= 7) + return (v128_t) { + .u32 = { __UXTB16(v0.u32[0]) } + }; + #else + v128_t r; + r.u16[0] = v0.u8[0]; + r.u16[1] = v0.u8[2]; + return r; + #endif +} + +static inline v128_t vsxtb16(v128_t v0) { + #if (__ARM_ARCH >= 8) + return (v128_t) vmovlbq(v0.s8); + #elif (__ARM_ARCH >= 7) + return (v128_t) { + .u32 = { __SXTB16(v0.u32[0]) } + }; + #else + v128_t r; + r.s16[0] = v0.s8[0]; + r.s16[1] = v0.s8[2]; + return r; + #endif +} + +static inline v128_t vuxtb16_ror8(v128_t v0) { + #if (__ARM_ARCH >= 8) + return (v128_t) vmovltq(v0.u8); + #elif (__ARM_ARCH >= 7) + return (v128_t) { + .u32 = { __UXTB16_RORn(v0.u32[0], 8) } + }; + #else + v128_t r; + r.u16[0] = v0.u8[1]; + r.u16[1] = v0.u8[3]; + return r; + #endif +} + +static inline v128_t vsxtb16_ror8(v128_t v0) { + #if (__ARM_ARCH >= 8) + return (v128_t) vmovltq(v0.s8); + #elif (__ARM_ARCH >= 7) + return (v128_t) { + .u32 = { __UXTB16_RORn(v0.u32[0], 8) } + }; + #else + v128_t r; + r.s16[0] = v0.s8[1]; + r.s16[1] = v0.s8[3]; + return r; + #endif +} + +static inline v128_t vuxtb32(v128_t v0) { + #if (__ARM_ARCH >= 8) + return (v128_t) vmovlbq(v0.u16); + // #elif (__ARM_ARCH >= 7) + // return (v128_t) { + // .u32 = { __UXTH(v0.u32[0]) } + // }; + #else + v128_t r; + r.u32[0] = v0.u16[0]; + return r; + #endif +} + +static inline v128_t vsxtb32(v128_t v0) { + #if (__ARM_ARCH >= 8) + return (v128_t) vmovlbq(v0.s16); + // #elif (__ARM_ARCH >= 7) + // return (v128_t) { + // .u32 = { __SXTH(v0.u32[0]) } + // }; + #else + v128_t r; + r.s32[0] = v0.s16[0]; + return r; + #endif +} + +static inline v128_t vpkhbt(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vsliq_n_u32(v0.u32, v1.u32, 16); + #elif (__ARM_ARCH >= 7) + return (v128_t) { + .u32 = { __PKHBT(v0.u32[0], v1.u32[0], 16) } + }; + #else + v128_t r; + r.u16[0] = v0.u16[0]; + r.u16[1] = v1.u16[0]; + return r; + #endif +} + +static inline v128_t vpkhbt_ror8(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vshrntq_n_u32(v0.u16, v1.u32, 8); + #elif (__ARM_ARCH >= 7) + return (v128_t) { + .u32 = { __PKHBT(v0.u32[0], v1.u32[0], 8) } + }; + #else + v128_t r; + r.u16[0] = v0.u16[0]; + r.u16[1] = v1.u32[0] >> 8; + return r; + #endif +} + +static inline v128_t vpkhtb(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vsriq_n_u32(v0.u32, v1.u32, 16); + #elif (__ARM_ARCH >= 7) + return (v128_t) { + .u32 = { __PKHTB(v0.u32[0], v1.u32[0], 16) } + }; + #else + v128_t r; + r.u16[0] = v1.u16[1]; + r.u16[1] = v0.u16[1]; + return r; + #endif +} + +static inline v128_t vpkhtb_ror8(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vshrnbq_n_s32(v0.s16, v1.s32, 8); + #elif (__ARM_ARCH >= 7) + return (v128_t) { + .u32 = { __PKHTB(v0.u32[0], v1.u32[0], 8) } + }; + #else + v128_t r; + r.s16[0] = v1.s32[0] >> 8; + r.s16[1] = v0.s16[1]; + return r; + #endif +} + +static inline v128_t vmov_u16_narrow_u8_lo(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vmovnbq(v0.u8, v1.u16); + #elif (__ARM_ARCH >= 7) + uint32_t t0 = v1.u32[0]; + uint32_t t1 = __USUB8(0xFF00FF00, 0x00FF00FF); (void) t1; + return (v128_t) { + .u32 = { __SEL(v0.u32[0], t0) } + }; + #else + return (v128_t) { + .u32 = { (v0.u32[0] & 0xFF00FF00) | (v1.u32[0] & 0x00FF00FF) } + }; + #endif +} + +static inline v128_t vmov_u16_narrow_u8_hi(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vmovntq(v0.u8, v1.u16); + #elif (__ARM_ARCH >= 7) + uint32_t t0 = v1.u32[0] << 8; + uint32_t t1 = __USUB8(0x00FF00FF, 0xFF00FF00); (void) t1; + return (v128_t) { + .u32 = { __SEL(v0.u32[0], t0) } + }; + #else + return (v128_t) { + .u32 = { (v0.u32[0] & 0x00FF00FF) | ((v1.u32[0] << 8) & 0xFF00FF00) } + }; + #endif +} + +#if (__ARM_ARCH >= 8) +#define vusat_s16_narrow_u8_lo(v0, v1, shift) ((v128_t) vqshrunbq_n_s16(v0.u8, v1.s16, shift)) +#else +static inline v128_t vusat_s16_narrow_u8_lo(v128_t v0, v128_t v1, uint32_t shift) { + #if (__ARM_ARCH >= 7) + uint32_t t0 = __USAT16(v1.u32[0], 8 + shift) >> shift; + uint32_t t1 = __USUB8(0xFF00FF00, 0x00FF00FF); (void) t1; + return (v128_t) { + .u32 = { __SEL(v0.u32[0], t0) } + }; + #else // There's a software implementation of __USAT16 in the ARM CMSIS extension if needed + return (v128_t) { + .u32 = { (v0.u32[0] & 0xFF00FF00) | ((__USAT16(v1.u32[0], 8 + shift) >> shift) & 0x00FF00FF) } + }; + #endif +} +#endif + +#if (__ARM_ARCH >= 8) +#define vusat_s16_narrow_u8_hi(v0, v1, shift) ((v128_t) vqshruntq_n_s16(v0.u8, v1.s16, shift)) +#else +static inline v128_t vusat_s16_narrow_u8_hi(v128_t v0, v128_t v1, uint32_t shift) { + #if (__ARM_ARCH >= 7) + uint32_t t0 = __USAT16(v1.u32[0], 8 + shift) << (8 - shift); + uint32_t t1 = __USUB8(0x00FF00FF, 0xFF00FF00); (void) t1; + return (v128_t) { + .u32 = { __SEL(v0.u32[0], t0) } + }; + #else // There's a software implementation of __USAT16 in the ARM CMSIS extension if needed + return (v128_t) { + .u32 = { (v0.u32[0] & 0x00FF00FF) | ((__USAT16(v1.u32[0], 8 + shift) << (8 - shift)) & 0xFF00FF00) } + }; + #endif +} +#endif + +#if (__ARM_ARCH >= 8) +#define vget_u8(v0, n) vgetq_lane_u8(v0.u8, n) +#else +static inline uint8_t vget_u8(v128_t v0, uint32_t n) { + return v0.u8[n]; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vget_s8(v0, n) vgetq_lane_s8(v0.s8, n) +#else +static inline int8_t vget_s8(v128_t v0, uint32_t n) { + return v0.s8[n]; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vget_u16(v0, n) vgetq_lane_u16(v0.u16, n) +#else +static inline uint16_t vget_u16(v128_t v0, uint32_t n) { + return v0.u16[n]; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vget_s16(v0, n) vgetq_lane_s16(v0.s16, n) +#else +static inline int16_t vget_s16(v128_t v0, uint32_t n) { + return v0.s16[n]; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vget_u32(v0, n) vgetq_lane_u32(v0.u32, n) +#else +static inline uint32_t vget_u32(v128_t v0, uint32_t n) { + return v0.u32[n]; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vget_s32(v0, n) vgetq_lane_s32(v0.s32, n) +#else +static inline int32_t vget_s32(v128_t v0, uint32_t n) { + return v0.s32[n]; +} +#endif + +#if (VECTOR_SIZE_BYTES >= 8) +#if (__ARM_ARCH >= 8) +#define vget_u64(v0, n) vgetq_lane_u64(v0.u64, n) +#else +static inline uint64_t vget_u64(v128_t v0, uint32_t n) { + return v0.u64[n]; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vget_s64(v0, n) vgetq_lane_s64(v0.s64, n) +#else +static inline int64_t vget_s64(v128_t v0, uint32_t n) { + return v0.s64[n]; +} +#endif +#endif + +#if (__ARM_ARCH >= 8) +#define vset_u8(v0, n, x) ((v128_t) vsetq_lane_u8(x, v0.u8, n)) +#else +static inline v128_t vset_u8(v128_t v0, uint32_t n, uint8_t x) { + v0.u8[n] = x; + return v0; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vset_s8(v0, n, x) ((v128_t) vsetq_lane_s8(x, v0.s8, n)) +#else +static inline v128_t vset_s8(v128_t v0, uint32_t n, int8_t x) { + v0.s8[n] = x; + return v0; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vset_u16(v0, n, x) ((v128_t) vsetq_lane_u16(x, v0.u16, n)) +#else +static inline v128_t vset_u16(v128_t v0, uint32_t n, uint16_t x) { + v0.u16[n] = x; + return v0; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vset_s16(v0, n, x) ((v128_t) vsetq_lane_s16(x, v0.s16, n)) +#else +static inline v128_t vset_s16(v128_t v0, uint32_t n, int16_t x) { + v0.s16[n] = x; + return v0; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vset_u32(v0, n, x) ((v128_t) vsetq_lane_u32(x, v0.u32, n)) +#else +static inline v128_t vset_u32(v128_t v0, uint32_t n, uint32_t x) { + v0.u32[n] = x; + return v0; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vset_s32(v0, n, x) ((v128_t) vsetq_lane_s32(x, v0.s32, n)) +#else +static inline v128_t vset_s32(v128_t v0, uint32_t n, int32_t x) { + v0.s32[n] = x; + return v0; +} +#endif + +#if (VECTOR_SIZE_BYTES >= 8) +#if (__ARM_ARCH >= 8) +#define vset_u64(v0, n, x) ((v128_t) vsetq_lane_u64(x, v0.u64, n)) +#else +static inline v128_t vset_u64(v128_t v0, uint32_t n, uint64_t x) { + v0.u64[n] = x; + return v0; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vset_s64(v0, n, x) ((v128_t) vsetq_lane_s64(x, v0.s64, n)) +#else +static inline v128_t vset_s64(v128_t v0, uint32_t n, int64_t x) { + v0.s64[n] = x; + return v0; +} +#endif +#endif + +// GCC does not vectorize assignment from a scalar to a vector. +#if (__ARM_ARCH >= 8) +#define vdup_u8(x) ((v128_t) vdupq_n_u8(x)) +#else +static inline v128_t vdup_u8(uint8_t x) { + return (v128_t) { + .u32 = { x * 0x01010101 } + }; +} +#endif + +// GCC does not vectorize assignment from a scalar to a vector. +#if (__ARM_ARCH >= 8) +#define vdup_s8(x) ((v128_t) vdupq_n_s8(x)) +#else +static inline v128_t vdup_s8(int8_t x) { + return (v128_t) { + .s32 = { (x & 0xFF) * 0x01010101 } + }; +} +#endif + +// GCC does not vectorize assignment from a scalar to a vector. +#if (__ARM_ARCH >= 8) +#define vdup_u16(x) ((v128_t) vdupq_n_u16(x)) +#else +static inline v128_t vdup_u16(uint16_t x) { + return (v128_t) { + .u32 = { x * 0x00010001 } + }; +} +#endif + +// GCC does not vectorize assignment from a scalar to a vector. +#if (__ARM_ARCH >= 8) +#define vdup_s16(x) ((v128_t) vdupq_n_s16(x)) +#else +static inline v128_t vdup_s16(int16_t x) { + return (v128_t) { + .s32 = { (x & 0xFFFF) * 0x00010001 } + }; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vdup_u32(x) ((v128_t) vdupq_n_u32(x)) +#else +static inline v128_t vdup_u32(uint32_t x) { + return (v128_t) { + .u32 = { x } + }; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vdup_s32(x) ((v128_t) vdupq_n_s32(x)) +#else +static inline v128_t vdup_s32(int32_t x) { + return (v128_t) { + .s32 = { x } + }; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vidup_u8(start, increment) ((v128_t) vidupq_n_u8(start, increment)) +#else +static inline v128_t vidup_u8(uint32_t start, uint32_t increment) { + v128_t r; + r.u8[0] = start; + r.u8[1] = start + increment; + r.u8[2] = start + (increment * 2); + r.u8[3] = start + (increment * 3); + return r; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vidup_u16(start, increment) ((v128_t) vidupq_n_u16(start, increment)) +#else +static inline v128_t vidup_u16(uint32_t start, uint32_t increment) { + v128_t r; + r.u16[0] = start; + r.u16[1] = start + increment; + return r; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vidup_u32(start, increment) ((v128_t) vidupq_n_u32(start, increment)) +#else +static inline v128_t vidup_u32(uint32_t start, uint32_t increment) { + return (v128_t) { + .u32 = { start } + }; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vidup_u32_unaligned(start, increment) ({ \ + v128_t offsets = (v128_t) vidupq_n_u32(start, increment); \ + offsets.u32 = vsliq_n_u32(offsets.u32, offsets.u32, 8); \ + offsets.u32 = vsliq_n_u32(offsets.u32, offsets.u32, 16); \ + offsets.u8 = vaddq(offsets.u8, viwdupq_n_u8(0, 4, 1)); \ + offsets; \ + }) +#else +static inline v128_t vidup_u32_unaligned(uint32_t start, uint32_t increment) { + return (v128_t) { + .u32 = { start } + }; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vshlc(v0, reg, n) ((v128_t) vshlcq(v0.u32, reg, n)) +#else +static inline v128_t vshlc(v128_t v0, uint32_t *reg, uint32_t n) { + v128_t r = (v128_t) { + .u32 = { (v0.u32[0] << n) | ((*reg) & ((1 << n) - 1)) } + }; + *reg = v0.u32[0] >> (32 - n); + return r; +} +#endif + +static inline v128_t vadd_u32(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vaddq(v0.u32, v1.u32); + #else + return (v128_t) { + .u32 = v0.u32 + v1.u32 + }; + #endif +} + +static inline v128_t vadd_s32(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vaddq(v0.s32, v1.s32); + #else + return (v128_t) { + .s32 = v0.s32 + v1.s32 + }; + #endif +} + +static inline v128_t vsub_u8(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vsubq(v0.u8, v1.u8); + #elif (__ARM_ARCH >= 7) + return (v128_t) { + .u32 = { __USUB8(v0.u32[0], v1.u32[0]) } + }; + #else + return (v128_t) { + .u8 = v0.u8 - v1.u8 + }; + #endif +} + +static inline v128_t vsub_s8(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vsubq(v0.s8, v1.s8); + #elif (__ARM_ARCH >= 7) + return (v128_t) { + .u32 = { __SSUB8(v0.u32[0], v1.u32[0]) } + }; + #else + return (v128_t) { + .s8 = v0.s8 - v1.s8 + }; + #endif +} + +static inline v128_t vsub_u16(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vsubq(v0.u16, v1.u16); + #elif (__ARM_ARCH >= 7) + return (v128_t) { + .u32 = { __USUB16(v0.u32[0], v1.u32[0]) } + }; + #else + return (v128_t) { + .u16 = v0.u16 - v1.u16 + }; + #endif +} + +static inline v128_t vsub_s16(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vsubq(v0.s16, v1.s16); + #elif (__ARM_ARCH >= 7) + return (v128_t) { + .u32 = { __SSUB16(v0.u32[0], v1.u32[0]) } + }; + #else + return (v128_t) { + .s16 = v0.s16 - v1.s16 + }; + #endif +} + +#if (__ARM_ARCH >= 8) +#define vsli_u8(v0, v1, n) ((v128_t) vsliq_n_u8(v0.u8, v1.u8, n)) +#else +static inline v128_t vsli_u8(v128_t v0, v128_t v1, uint32_t n) { + uint8_t mask = (1 << n) - 1; + return (v128_t) { + .u8 = (v1.u8 << n) | (v0.u8 & mask) + }; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vsli_u16(v0, v1, n) ((v128_t) vsliq_n_u16(v0.u16, v1.u16, n)) +#else +static inline v128_t vsli_u16(v128_t v0, v128_t v1, uint32_t n) { + uint16_t mask = (1 << n) - 1; + return (v128_t) { + .u16 = (v1.u16 << n) | (v0.u16 & mask) + }; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vsli_u32(v0, v1, n) ((v128_t) vsliq_n_u32(v0.u32, v1.u32, n)) +#else +static inline v128_t vsli_u32(v128_t v0, v128_t v1, uint32_t n) { + uint32_t mask = (1 << n) - 1; + return (v128_t) { + .u32 = (v1.u32 << n) | (v0.u32 & mask) + }; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vsri_u8(v0, v1, n) ((v128_t) vsriq_n_u8(v0.u8, v1.u8, n)) +#else +static inline v128_t vsri_u8(v128_t v0, v128_t v1, uint32_t n) { + uint8_t mask = ~((1 << (8 - n)) - 1); + return (v128_t) { + .u8 = (v1.u8 >> n) | (v0.u8 & mask) + }; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vsri_u16(v0, v1, n) ((v128_t) vsriq_n_u16(v0.u16, v1.u16, n)) +#else +static inline v128_t vsri_u16(v128_t v0, v128_t v1, uint32_t n) { + uint16_t mask = ~((1 << (16 - n)) - 1); + return (v128_t) { + .u16 = (v1.u16 >> n) | (v0.u16 & mask) + }; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vsri_u32(v0, v1, n) ((v128_t) vsriq_n_u32(v0.u32, v1.u32, n)) +#else +static inline v128_t vsri_u32(v128_t v0, v128_t v1, uint32_t n) { + uint32_t mask = ~((1 << (32 - n)) - 1); + return (v128_t) { + .u32 = (v1.u32 >> n) | (v0.u32 & mask) + }; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vasr_s16(v0, n) ((v128_t) vshrq(v0.s16, n)) +#else +static inline v128_t vasr_s16(v128_t v0, uint32_t n) { + return (v128_t) { + .s16 = v0.s16 >> n + }; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vasr_s32(v0, n) ((v128_t) vshrq(v0.s32, n)) +#else +static inline v128_t vasr_s32(v128_t v0, uint32_t n) { + return (v128_t) { + .s32 = v0.s32 >> n + }; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vlsl_u16(v0, n) ((v128_t) vshlq_n(v0.u16, n)) +#else +static inline v128_t vlsl_u16(v128_t v0, uint32_t n) { + return (v128_t) { + .u16 = v0.u16 << n + }; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vlsl_u32(v0, n) ((v128_t) vshlq_n(v0.u32, n)) +#else +static inline v128_t vlsl_u32(v128_t v0, uint32_t n) { + return (v128_t) { + .u32 = v0.u32 << n + }; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vlsl_s16(v0, n) ((v128_t) vshlq_n(v0.s16, n)) +#else +static inline v128_t vlsl_s16(v128_t v0, uint32_t n) { + return (v128_t) { + .s16 = v0.s16 << n + }; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vlsl_s32(v0, n) ((v128_t) vshlq_n(v0.s32, n)) +#else +static inline v128_t vlsl_s32(v128_t v0, uint32_t n) { + return (v128_t) { + .s32 = v0.s32 << n + }; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vlsr_u16(v0, n) ((v128_t) vshrq(v0.u16, n)) +#else +static inline v128_t vlsr_u16(v128_t v0, uint32_t n) { + return (v128_t) { + .u16 = v0.u16 >> n + }; +} +#endif + +#if (__ARM_ARCH >= 8) +#define vlsr_u32(v0, n) ((v128_t) vshrq(v0.u32, n)) +#else +static inline v128_t vlsr_u32(v128_t v0, uint32_t n) { + return (v128_t) { + .u32 = v0.u32 >> n + }; +} +#endif + +static inline v128_t vand_u32(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vandq(v0.u32, v1.u32); + #else + return (v128_t) { + .u32 = v0.u32 & v1.u32 + }; + #endif +} + +static inline v128_t vshl_u8(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vshlq(v0.u8, v1.s8); + #else + return (v128_t) { + .u8 = v0.u8 << v1.u8 + }; + #endif +} + +static inline v128_t vshl_u16(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vshlq(v0.u16, v1.s16); + #else + return (v128_t) { + .u16 = v0.u16 << v1.u16 + }; + #endif +} + +static inline v128_t vand_s32(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vandq(v0.s32, v1.s32); + #else + return (v128_t) { + .s32 = v0.s32 & v1.s32 + }; + #endif +} + +static inline v128_t vorr_u32(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vorrq(v0.u32, v1.u32); + #else + return (v128_t) { + .u32 = v0.u32 | v1.u32 + }; + #endif +} + +static inline v128_t vorr_s32(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vorrq(v0.s32, v1.s32); + #else + return (v128_t) { + .s32 = v0.s32 | v1.s32 + }; + #endif +} + +static inline v128_t veor_u32(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) veorq(v0.u32, v1.u32); + #else + return (v128_t) { + .u32 = v0.u32 ^ v1.u32 + }; + #endif +} + +static inline v128_t veor_s32(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) veorq(v0.s32, v1.s32); + #else + return (v128_t) { + .s32 = v0.s32 ^ v1.s32 + }; + #endif +} + +static inline v128_t vmul_u32(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vmulq(v0.u32, v1.u32); + #else + return (v128_t) { + .u32 = v0.u32 * v1.u32 + }; + #endif +} + +static inline v128_t vmul_s32(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return (v128_t) vmulq(v0.s32, v1.s32); + #else + return (v128_t) { + .s32 = v0.s32 * v1.s32 + }; + #endif +} + +static inline v128_t vmla_u32(v128_t v0, v128_t v1, v128_t v2) { + #if (__ARM_ARCH >= 8) + return (v128_t) vaddq(vmulq(v0.u32, v1.u32), v2.u32); + #else + return (v128_t) { + .u32 = (v0.u32 * v1.u32) + v2.u32 + }; + #endif +} + +static inline v128_t vmla_s32(v128_t v0, v128_t v1, v128_t v2) { + #if (__ARM_ARCH >= 8) + return (v128_t) vaddq(vmulq(v0.s32, v1.s32), v2.s32); + #else + return (v128_t) { + .s32 = (v0.s32 * v1.s32) + v2.s32 + }; + #endif +} + +static inline v128_t vmul_n_u16(v128_t v0, uint16_t x) { + #if (__ARM_ARCH >= 8) + return (v128_t) vmulq_n_u16(v0.u16, x); + #else + return (v128_t) { + .u16 = v0.u16 * x + }; + #endif +} + +static inline v128_t vmul_n_u32(v128_t v0, uint32_t x) { + #if (__ARM_ARCH >= 8) + return (v128_t) vmulq_n_u32(v0.u32, x); + #else + return (v128_t) { + .u32 = v0.u32 * x + }; + #endif +} + +static inline v128_t vmul_n_s16(v128_t v0, int16_t x) { + #if (__ARM_ARCH >= 8) + return (v128_t) vmulq_n_s16(v0.s16, x); + #else + return (v128_t) { + .s16 = v0.s16 * x + }; + #endif +} + +static inline v128_t vmul_n_s32(v128_t v0, int32_t x) { + #if (__ARM_ARCH >= 8) + return (v128_t) vmulq_n_s32(v0.s32, x); + #else + return (v128_t) { + .s32 = v0.s32 * x + }; + #endif +} + +static inline v128_t vmla_n_u16(v128_t v0, uint16_t x, v128_t v2) { + #if (__ARM_ARCH >= 8) + return (v128_t) vmlaq_n_u16(v2.u16, v0.u16, x); + #else + return (v128_t) { + .u16 = (v0.u16 * x) + v2.u16 + }; + #endif +} + +static inline v128_t vmla_n_u32(v128_t v0, uint32_t x, v128_t v2) { + #if (__ARM_ARCH >= 8) + return (v128_t) vmlaq_n_u32(v2.u32, v0.u32, x); + #else + return (v128_t) { + .u32 = (v0.u32 * x) + v2.u32 + }; + #endif +} + +static inline v128_t vmla_n_s16(v128_t v0, int16_t x, v128_t v2) { + #if (__ARM_ARCH >= 8) + return (v128_t) vmlaq_n_s16(v2.s16, v0.s16, x); + #else + return (v128_t) { + .s16 = (v0.s16 * x) + v2.s16 + }; + #endif +} + +static inline v128_t vmla_n_s32(v128_t v0, int32_t x, v128_t v2) { + #if (__ARM_ARCH >= 8) + return (v128_t) vmlaq_n_s32(v2.s32, v0.s32, x); + #else + return (v128_t) { + .s32 = (v0.s32 * x) + v2.s32 + }; + #endif +} + +static inline uint32_t vmladav_u16(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return vmladavq_u16(v0.u16, v1.u16); + #else + return (v0.u16[0] * v1.u16[0]) + (v0.u16[1] * v1.u16[1]); + #endif +} + +static inline int32_t vmladav_s16(v128_t v0, v128_t v1) { + #if (__ARM_ARCH >= 8) + return vmladavq_s16(v0.s16, v1.s16); + #elif (__ARM_ARCH >= 7) + return __SMUAD(v0.u32[0], v1.u32[0]); + #else + return (v0.s16[0] * v1.s16[0]) + (v0.s16[1] * v1.s16[1]); + #endif +} + +static inline uint32_t vmladava_u16(v128_t v0, v128_t v1, uint32_t acc) { + #if (__ARM_ARCH >= 8) + return vmladavaq_u16(acc, v0.u16, v1.u16); + #else + return acc + (v0.u16[0] * v1.u16[0]) + (v0.u16[1] * v1.u16[1]); + #endif +} + +static inline int32_t vmladava_s16(v128_t v0, v128_t v1, int32_t acc) { + #if (__ARM_ARCH >= 8) + return vmladavaq_s16(acc, v0.s16, v1.s16); + #elif (__ARM_ARCH >= 7) + return __SMLAD(v0.u32[0], v1.u32[0], acc); + #else + return acc + (v0.s16[0] * v1.s16[0]) + (v0.s16[1] * v1.s16[1]); + #endif +} + +static inline v128_t vldr_u8(const uint8_t *p) { + #if (__ARM_ARCH >= 8) + return (v128_t) vldrbq_u8(p); + #else + return (v128_t) { + .u32 = { *((const uint32_t *) p) } + }; + #endif +} + +static inline v128_t vldr_u8_pred(const uint8_t *p, v128_predicate_t pred) { + #if (__ARM_ARCH >= 8) + return (v128_t) vldrbq_z_u8(p, pred); + #else + v128_t v0; + + if (pred > 3) { + v0.u32[0] = *((const uint32_t *) p); + } else if (pred > 2) { + v0.u32[0] = *((const uint16_t *) p); + v0.u8[2] = p[2]; + } else if (pred > 1) { + v0.u32[0] = *((const uint16_t *) p); + } else { + v0.u32[0] = p[0]; + } + + return v0; + #endif +} + +static inline void vstr_u8(uint8_t *p, v128_t v0) { + #if (__ARM_ARCH >= 8) + vstrbq(p, v0.u8); + #else + *((uint32_t *) p) = v0.u32[0]; + #endif +} + +static inline void vstr_u8_pred(uint8_t *p, v128_t v0, v128_predicate_t pred) { + #if (__ARM_ARCH >= 8) + vstrbq_p_u8(p, v0.u8, pred); + #else + if (pred > 3) { + *((uint32_t *) p) = v0.u32[0]; + } else if (pred > 2) { + *((uint16_t *) p) = v0.u16[0]; + p[2] = v0.u8[2]; + } else if (pred > 1) { + *((uint16_t *) p) = v0.u16[0]; + } else { + p[0] = v0.u8[0]; + } + #endif +} + +static inline v128_t vldr_u16(const uint16_t *p) { + #if (__ARM_ARCH >= 8) + return (v128_t) vldrhq_u16(p); + #else + return (v128_t) { + .u32 = { *((const uint32_t *) p) } + }; + #endif +} + +static inline v128_t vldr_u16_pred(const uint16_t *p, v128_predicate_t pred) { + #if (__ARM_ARCH >= 8) + return (v128_t) vldrhq_z_u16(p, pred); + #else + v128_t v0; + + if (pred > 1) { + v0.u32[0] = *((const uint32_t *) p); + } else { + v0.u32[0] = p[0]; + } + + return v0; + #endif +} + +static inline void vstr_u16(uint16_t *p, v128_t v0) { + #if (__ARM_ARCH >= 8) + vstrhq(p, v0.u16); + #else + *((uint32_t *) p) = v0.u32[0]; + #endif +} + +static inline void vstr_u16_pred(uint16_t *p, v128_t v0, v128_predicate_t pred) { + #if (__ARM_ARCH >= 8) + vstrhq_p_u16(p, v0.u16, pred); + #else + if (pred > 1) { + *((uint32_t *) p) = v0.u32[0]; + } else { + p[0] = v0.u16[0]; + } + #endif +} + +static inline v128_t vldr_u8_widen_u16_pred(uint8_t *p, v128_predicate_t pred) { + #if (__ARM_ARCH >= 8) + return (v128_t) vldrbq_z_u16(p, pred); + #else + v128_t v0; + + if (pred > 1) { + v0.u32[0] = *((uint16_t *) p); + v0.u8[2] = v0.u8[1]; + v0.u8[1] = 0; + } else { + v0.u32[0] = *p; + } + + return v0; + #endif +} + +static inline void vstr_u16_narrow_u8_pred(uint8_t *p, v128_t v0, v128_predicate_t pred) { + #if (__ARM_ARCH >= 8) + vstrbq_p_u16(p, v0.u16, pred); + #else + *p = v0.u32[0]; + if (pred > 1) { + *((uint8_t *) (p + 1)) = v0.u8[2]; + } + #endif +} + +static inline v128_t vldr_u32_gather_unaligned(const uint8_t *p, v128_t offsets) { + #if (__ARM_ARCH >= 8) + // vldrwq_gather_offset cannot handle unaligned loads. + return (v128_t) vldrbq_gather_offset(p, offsets.u8); + #else + return (v128_t) { + .u32 = { *((const uint32_t *) (p + offsets.u32[0])) } + }; + #endif +} + +static inline v4x_rows_t vldr_u32_gather_pred_x4_unaligned(v4x_row_ptrs_t rowptrs, + uint32_t x, + v128_t offsets, + v128_predicate_t pred) { + const uint8_t *p0 = rowptrs.p0.u8 + x; + const uint8_t *p1 = rowptrs.p1.u8 + x; + const uint8_t *p2 = rowptrs.p2.u8 + x; + const uint8_t *p3 = rowptrs.p3.u8 + x; + v4x_rows_t rows; + #if (__ARM_ARCH >= 8) + // TODO: Move into a predicate block. + // vldrwq_gather_offset_z_u32 cannot handle unaligned loads. + rows.r0 = (v128_t) vldrbq_gather_offset_z_u8(p0, offsets.u8, pred); + rows.r1 = (v128_t) vldrbq_gather_offset_z_u8(p1, offsets.u8, pred); + rows.r2 = (v128_t) vldrbq_gather_offset_z_u8(p2, offsets.u8, pred); + rows.r3 = (v128_t) vldrbq_gather_offset_z_u8(p3, offsets.u8, pred); + #else + if (pred > 3) { + rows.r0.u32[0] = *((const uint32_t *) (p0 + offsets.u32[0])); + rows.r1.u32[0] = *((const uint32_t *) (p1 + offsets.u32[0])); + rows.r2.u32[0] = *((const uint32_t *) (p2 + offsets.u32[0])); + rows.r3.u32[0] = *((const uint32_t *) (p3 + offsets.u32[0])); + } else if (pred > 2) { + rows.r0.u32[0] = *((const uint16_t *) (p0 + offsets.u32[0])); + rows.r1.u32[0] = *((const uint16_t *) (p1 + offsets.u32[0])); + rows.r2.u32[0] = *((const uint16_t *) (p2 + offsets.u32[0])); + rows.r3.u32[0] = *((const uint16_t *) (p3 + offsets.u32[0])); + rows.r0.u8[2] = p0[2 + offsets.u32[0]]; + rows.r1.u8[2] = p1[2 + offsets.u32[0]]; + rows.r2.u8[2] = p2[2 + offsets.u32[0]]; + rows.r3.u8[2] = p3[2 + offsets.u32[0]]; + } else if (pred > 1) { + rows.r0.u32[0] = *((const uint16_t *) (p0 + offsets.u32[0])); + rows.r1.u32[0] = *((const uint16_t *) (p1 + offsets.u32[0])); + rows.r2.u32[0] = *((const uint16_t *) (p2 + offsets.u32[0])); + rows.r3.u32[0] = *((const uint16_t *) (p3 + offsets.u32[0])); + } else { + rows.r0.u32[0] = p0[offsets.u32[0]]; + rows.r1.u32[0] = p1[offsets.u32[0]]; + rows.r2.u32[0] = p2[offsets.u32[0]]; + rows.r3.u32[0] = p3[offsets.u32[0]]; + } + #endif + return rows; +} + +static inline v2x_rows_t vld2_u8(const uint8_t *p) { + #if (__ARM_ARCH >= 8) + uint8x16x2_t r = vld2q(p); + v2x_rows_t rows; + rows.r0.u8 = r.val[0]; + rows.r1.u8 = r.val[1]; + return rows; + #else + v128_t r0; + r0.u8[0] = p[0]; + r0.u8[1] = p[2]; + r0.u8[2] = p[4]; + r0.u8[3] = p[6]; + v128_t r1; + r1.u8[0] = p[1]; + r1.u8[1] = p[3]; + r1.u8[2] = p[5]; + r1.u8[3] = p[7]; + return (v2x_rows_t) { + .r0 = r0, + .r1 = r1 + }; + #endif +} + +static inline v2x_rows_t vld2_u8_len(const uint8_t *p, uint32_t len) { + len = (len > (UINT8_VECTOR_SIZE * 2)) ? (UINT8_VECTOR_SIZE * 2) : len; + + v2x_rows_t rows; + + for (uint32_t i = 0; i < len; i++) { + if (i % 2) { + rows.r1.u8[i / 2] = p[i]; + } else { + rows.r0.u8[i / 2] = p[i]; + } + } + + return rows; +} + +static inline v2x_rows_t vld2_u16(const uint16_t *p) { + #if (__ARM_ARCH >= 8) + uint16x8x2_t r = vld2q(p); + v2x_rows_t rows; + rows.r0.u16 = r.val[0]; + rows.r1.u16 = r.val[1]; + return rows; + #else + v128_t r0; + r0.u16[0] = p[0]; + r0.u16[1] = p[2]; + v128_t r1; + r1.u16[0] = p[1]; + r1.u16[1] = p[3]; + return (v2x_rows_t) { + .r0 = r0, + .r1 = r1 + }; + #endif +} + +static inline void vst2_u8(uint8_t *p, v2x_rows_t v0) { + #if (__ARM_ARCH >= 8) + uint8x16x2_t rows; + rows.val[0] = v0.r0.u8; + rows.val[1] = v0.r1.u8; + vst2q(p, rows); + #else + p[0] = v0.r0.u8[0]; + p[1] = v0.r1.u8[0]; + p[2] = v0.r0.u8[1]; + p[3] = v0.r1.u8[1]; + p[4] = v0.r0.u8[2]; + p[5] = v0.r1.u8[2]; + p[6] = v0.r0.u8[3]; + p[7] = v0.r1.u8[3]; + #endif +} + +static inline void vst2_u16(uint16_t *p, v2x_rows_t v0) { + #if (__ARM_ARCH >= 8) + uint16x8x2_t rows; + rows.val[0] = v0.r0.u16; + rows.val[1] = v0.r1.u16; + vst2q(p, rows); + #else + p[0] = v0.r0.u16[0]; + p[1] = v0.r1.u16[0]; + p[2] = v0.r0.u16[1]; + p[3] = v0.r1.u16[1]; + #endif +} + +static inline void vst2_u16_len(uint16_t *p, v2x_rows_t v0, uint32_t len) { + len = (len > (UINT16_VECTOR_SIZE * 2)) ? (UINT16_VECTOR_SIZE * 2) : len; + + for (uint32_t i = 0; i < len; i++) { + if (i % 2) { + p[i] = v0.r1.u16[i / 2]; + } else { + p[i] = v0.r0.u16[i / 2]; + } + } +} + +// n is in bytes, but, known to be a multiple of 1 with 1-byte alignment. +static inline void vmemcpy_8(void *dest, void *src, size_t n) { + #if (__ARM_ARCH >= 8) + uint8_t *dest8 = (uint8_t *) dest; + uint8_t *src8 = (uint8_t *) src; + for (; ((int32_t) n) > 0; n -= UINT8_VECTOR_SIZE) { + mve_pred16_t p = vctp8q(n); + vstrbq_p_u8(dest8, vldrbq_z_u8(src8, p), p); + dest8 += UINT8_VECTOR_SIZE; + src8 += UINT8_VECTOR_SIZE; + } + #elif (__ARM_ARCH > 6) + // ARM Cortex-M4/M7 Processors can access memory using unaligned 32-bit reads/writes. + uint32_t *dest32 = (uint32_t *) dest; + uint32_t *src32 = (uint32_t *) src; + + for (; n > 4; n -= 4) { + *dest32++ = *src32++; + } + + uint8_t *dest8 = (uint8_t *) dest32; + uint8_t *src8 = (uint8_t *) src32; + + for (; n > 0; n -= 1) { + *dest8++ = *src8++; + } + #else + memcpy(dest, src, n); + #endif +} + +// n is in bytes, but, known to be a multiple of 2 with 2-byte alignment. +static inline void vmemcpy_16(void *dest, void *src, size_t n) { + #if (__ARM_ARCH >= 8) + n = n / 2; + uint16_t *dest16 = (uint16_t *) dest; + uint16_t *src16 = (uint16_t *) src; + for (; ((int32_t) n) > 0; n -= UINT16_VECTOR_SIZE) { + mve_pred16_t p = vctp16q(n); + vstrhq_p_u16(dest16, vldrhq_z_u16(src16, p), p); + dest16 += UINT16_VECTOR_SIZE; + src16 += UINT16_VECTOR_SIZE; + } + #elif (__ARM_ARCH > 6) + // ARM Cortex-M4/M7 Processors can access memory using unaligned 32-bit reads/writes. + uint32_t *dest32 = (uint32_t *) dest; + uint32_t *src32 = (uint32_t *) src; + + for (; n > 4; n -= 4) { + *dest32++ = *src32++; + } + + uint16_t *dest16 = (uint16_t *) dest32; + uint16_t *src16 = (uint16_t *) src32; + + for (; n > 0; n -= 2) { + *dest16++ = *src16++; + } + #else + memcpy(dest, src, n); + #endif +} + +// n is in bytes, but, known to be a multiple of 4 with 4-byte alignment. +static inline void vmemcpy_32(void *dest, void *src, size_t n) { + #if (__ARM_ARCH >= 8) + n = n / 4; + uint32_t *dest32 = (uint32_t *) dest; + uint32_t *src32 = (uint32_t *) src; + for (; ((int32_t) n) > 0; n -= UINT32_VECTOR_SIZE) { + mve_pred16_t p = vctp32q(n); + vstrwq_p_u32(dest32, vldrwq_z_u32(src32, p), p); + dest32 += UINT32_VECTOR_SIZE; + src32 += UINT32_VECTOR_SIZE; + } + #elif (__ARM_ARCH > 6) + uint32_t *dest32 = (uint32_t *) dest; + uint32_t *src32 = (uint32_t *) src; + + for (; n > 0; n -= 4) { + *dest32++ = *src32++; + } + #else + memcpy(dest, src, n); + #endif +} + +#if (VECTOR_SIZE_BYTES >= 8) +// n is in bytes, but, known to be a multiple of 8 with 8-byte alignment. +static inline void vmemcpy_64(void *dest, void *src, size_t n) { + // There are no 64-bit vector load and store instructions. + #if (__ARM_ARCH > 6) + uint64_t *dest64 = (uint64_t *) dest; + uint64_t *src64 = (uint64_t *) src; + + for (; n > 0; n -= 8) { + *dest64++ = *src64++; + } + #else + memcpy(dest, src, n); + #endif +} +#endif + +// In the case of vectors larger than 32-bits the pattern is repeated for every 32-bits. +// +// pixels.r = MSB [0, R1, 0, R0] LSB pixels where each pixel is 8-bits. +// pixels.g = MSB [0, G1, 0, G0] LSB pixels where each pixel is 8-bits. +// pixels.b = MSB [0, B1, 0, B0] LSB pixels where each pixel is 8-bits. +// +// Y == ((R * 38) + (G * 75) + (B * 15)) / 128 +// +// Returns 2x uint8_t Grayscale (MSB [garbage, G1, garbage, G0] LSB) pixels for every 32-bits. +static inline v128_t vrgb_pixels_to_grayscale(vrgb_pixels_t pixels) { + pixels.r = vmul_n_u32(pixels.r, 38); + pixels.r = vmla_n_u32(pixels.g, 75, pixels.r); + pixels.r = vmla_n_u32(pixels.b, 15, pixels.r); + return vlsr_u32(pixels.r, 7); +} + +// In the case of vectors larger than 32-bits the pattern is repeated for every 32-bits. +// +// pixels.r = MSB [0, R1, 0, R0] LSB pixels where each pixel is 8-bits. +// pixels.g = MSB [0, G1, 0, G0] LSB pixels where each pixel is 8-bits. +// pixels.b = MSB [0, B1, 0, B0] LSB pixels where each pixel is 8-bits. +// +// Returns 2x uint16_t RGB565 (MSB [RGB1, RGB0] LSB) pixels for every 32-bits. +static inline v128_t vrgb_pixels_to_rgb565(vrgb_pixels_t pixels) { + #if (__ARM_ARCH >= 8) + pixels.r = vlsr_u16(pixels.r, 3); + pixels.g = vlsr_u16(pixels.g, 2); + pixels.b = vlsr_u16(pixels.b, 3); + return vsli_u16(vsli_u16(pixels.b, pixels.g, 5), pixels.r, 11); + #else + pixels.r = vand_u32(vlsl_u32(pixels.r, 8), vdup_u16(0xf800)); + pixels.g = vand_u32(vlsl_u32(pixels.g, 3), vdup_u16(0x07e0)); + pixels.b = vand_u32(vlsr_u32(pixels.b, 3), vdup_u16(0x001f)); + return vorr_u32(pixels.r, vorr_u32(pixels.g, pixels.b)); + #endif +} + +// In the case of vectors larger than 32-bits the pattern is repeated for every 32-bits. +// +// pixels.r = MSB [0, R1, 0, R0] LSB pixels where each pixel is 8-bits. +// pixels.g = MSB [0, G1, 0, G0] LSB pixels where each pixel is 8-bits. +// pixels.b = MSB [0, B1, 0, B0] LSB pixels where each pixel is 8-bits. +// +// Stores 2x GRAYSCALE pixels for every 32-bits. +static inline void vrgb_pixels_store_grayscale(uint8_t *p, uint32_t x, vrgb_pixels_t pixels, v128_predicate_t pred) { + vstr_u16_narrow_u8_pred(p + x, vrgb_pixels_to_grayscale(pixels), pred); +} + +// In the case of vectors larger than 32-bits the pattern is repeated for every 32-bits. +// +// pixels.r = MSB [0, R1, 0, R0] LSB pixels where each pixel is 8-bits. +// pixels.g = MSB [0, G1, 0, G0] LSB pixels where each pixel is 8-bits. +// pixels.b = MSB [0, B1, 0, B0] LSB pixels where each pixel is 8-bits. +// +// Stores 2x RGB565 pixels for every 32-bits. +static inline void vrgb_pixels_store_rgb565(uint16_t *p, uint32_t x, vrgb_pixels_t pixels, v128_predicate_t pred) { + vstr_u16_pred(p + x, vrgb_pixels_to_rgb565(pixels), pred); +} + +// In the case of vectors larger than 32-bits the pattern is repeated for every 32-bits. +// +// pixels.r = MSB [0, R1, 0, R0] LSB pixels where each pixel is 8-bits. +// pixels.g = MSB [0, G1, 0, G0] LSB pixels where each pixel is 8-bits. +// pixels.b = MSB [0, B1, 0, B0] LSB pixels where each pixel is 8-bits. +// +// Stores 2x binary pixels for every 32-bits. +static inline void vrgb_pixels_store_binary(uint32_t *p, uint32_t x, vrgb_pixels_t pixels, v128_predicate_t pred) { + v128_t binary = vand_u32(vlsr_u32(vrgb_pixels_to_grayscale(pixels), 7), vdup_u16(1)); + + // Turn the binary pixels that are in each 16-bit lane into a binary number which effectively + // concatenates them all together. + // + // E.g. [bN*(1< bN...b1b0 + // + // The signed version vmladav is used on purpose since it has access to __SMUAD on ARMv7. + uint32_t bits = vmladav_s16(binary, vshl_u16(vdup_u16(1), vidup_u16(0, 1))); + + uint32_t index = x >> 5; + uint32_t offset = x & 0x1f; + uint32_t remaining = 32 - offset; + uint32_t count = vpredicate_16_get_n(pred); + uint32_t min = (remaining < count) ? remaining : count; + uint32_t mask = (1 << min) - 1; + + uint32_t v = p[index]; + v = (v & ~(mask << offset)) | ((bits & mask) << offset); + p[index] = v; + + if (count > min) { + mask = (1 << (count - min)) - 1; + + v = p[index + 1]; + v = (v & ~mask) | (bits & mask); + p[index + 1] = v; + } +} From 13b68a3ee8dcd17917b478eeff37069529bc6139 Mon Sep 17 00:00:00 2001 From: "Kwabena W. Agyeman" Date: Fri, 23 Aug 2024 18:00:32 -0700 Subject: [PATCH 2/2] sensors/ov7725.c: Fix sensor bayer pattern. --- src/omv/sensors/ov7725.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/omv/sensors/ov7725.c b/src/omv/sensors/ov7725.c index 826b4a217..2c510c493 100644 --- a/src/omv/sensors/ov7725.c +++ b/src/omv/sensors/ov7725.c @@ -684,7 +684,7 @@ int ov7725_init(sensor_t *sensor) { sensor->rgb_swap = 1; sensor->blc_size = 8; sensor->yuv_format = SUBFORMAT_ID_YVU422; - sensor->cfa_format = SUBFORMAT_ID_GBRG; + sensor->cfa_format = SUBFORMAT_ID_BGGR; return 0; }