imlib/jpege: Improve jpeg YUV422/420 UV sub-sampling speed.

This commit is contained in:
Kwabena W. Agyeman 2024-02-16 16:51:12 -08:00
parent 29202a2566
commit 59fea5bc52

View File

@ -1711,11 +1711,59 @@ bool jpeg_compress(image_t *src, image_t *dst, int quality, bool realloc) {
}
// horizontal subsampling of U & V
#if defined(ARM_MATH_DSP)
uint32_t *UDUp0 = (uint32_t *) UDU;
uint32_t *VDUp0 = (uint32_t *) VDU;
uint32_t *UDUp1 = (uint32_t *) (UDU + JPEG_444_GS_MCU_SIZE);
uint32_t *VDUp1 = (uint32_t *) (VDU + JPEG_444_GS_MCU_SIZE);
#else
int8_t *UDUp0 = UDU;
int8_t *VDUp0 = VDU;
int8_t *UDUp1 = UDUp0 + JPEG_444_GS_MCU_SIZE;
int8_t *VDUp1 = VDUp0 + JPEG_444_GS_MCU_SIZE;
#endif
for (int j = 0; j < JPEG_444_GS_MCU_SIZE; j += JPEG_MCU_W) {
#if defined(ARM_MATH_DSP)
uint32_t UDUp0_3210 = *UDUp0++;
uint32_t UDUp0_avg_32_10 = __SHADD8(UDUp0_3210, __UXTB16_RORn(UDUp0_3210, 8));
UDU_avg[j] = UDUp0_avg_32_10;
UDU_avg[j + 1] = UDUp0_avg_32_10 >> 16;
uint32_t UDUp0_7654 = *UDUp0++;
uint32_t UDUp0_avg_76_54 = __SHADD8(UDUp0_7654, __UXTB16_RORn(UDUp0_7654, 8));
UDU_avg[j + 2] = UDUp0_avg_76_54;
UDU_avg[j + 3] = UDUp0_avg_76_54 >> 16;
uint32_t UDUp1_3210 = *UDUp1++;
uint32_t UDUp1_avg_32_10 = __SHADD8(UDUp1_3210, __UXTB16_RORn(UDUp1_3210, 8));
UDU_avg[j + 4] = UDUp1_avg_32_10;
UDU_avg[j + 5] = UDUp1_avg_32_10 >> 16;
uint32_t UDUp1_7654 = *UDUp1++;
uint32_t UDUp1_avg_76_54 = __SHADD8(UDUp1_7654, __UXTB16_RORn(UDUp1_7654, 8));
UDU_avg[j + 6] = UDUp1_avg_76_54;
UDU_avg[j + 7] = UDUp1_avg_76_54 >> 16;
uint32_t VDUp0_3210 = *VDUp0++;
uint32_t VDUp0_avg_32_10 = __SHADD8(VDUp0_3210, __UXTB16_RORn(VDUp0_3210, 8));
VDU_avg[j] = VDUp0_avg_32_10;
VDU_avg[j + 1] = VDUp0_avg_32_10 >> 16;
uint32_t VDUp0_7654 = *VDUp0++;
uint32_t VDUp0_avg_76_54 = __SHADD8(VDUp0_7654, __UXTB16_RORn(VDUp0_7654, 8));
VDU_avg[j + 2] = VDUp0_avg_76_54;
VDU_avg[j + 3] = VDUp0_avg_76_54 >> 16;
uint32_t VDUp1_3210 = *VDUp1++;
uint32_t VDUp1_avg_32_10 = __SHADD8(VDUp1_3210, __UXTB16_RORn(VDUp1_3210, 8));
VDU_avg[j + 4] = VDUp1_avg_32_10;
VDU_avg[j + 5] = VDUp1_avg_32_10 >> 16;
uint32_t VDUp1_7654 = *VDUp1++;
uint32_t VDUp1_avg_76_54 = __SHADD8(VDUp1_7654, __UXTB16_RORn(VDUp1_7654, 8));
VDU_avg[j + 6] = VDUp1_avg_76_54;
VDU_avg[j + 7] = VDUp1_avg_76_54 >> 16;
#else
for (int i = 0; i < JPEG_MCU_W; i += 2) {
UDU_avg[j + (i / 2)] = (UDUp0[i] + UDUp0[i + 1]) / 2;
VDU_avg[j + (i / 2)] = (VDUp0[i] + VDUp0[i + 1]) / 2;
@ -1726,6 +1774,7 @@ bool jpeg_compress(image_t *src, image_t *dst, int quality, bool realloc) {
VDUp0 += JPEG_MCU_W;
UDUp1 += JPEG_MCU_W;
VDUp1 += JPEG_MCU_W;
#endif
}
DCU = jpeg_processDU(&jpeg_buf, UDU_avg, fdtbl_UV, DCU, UVDC_HT, UVAC_HT);
@ -1778,6 +1827,10 @@ bool jpeg_compress(image_t *src, image_t *dst, int quality, bool realloc) {
y_offset -= (JPEG_MCU_H * 2);
// horizontal and vertical subsampling of U & V
#if defined(ARM_MATH_DSP)
uint32_t *UDUp = (uint32_t *) UDU;
uint32_t *VDUp = (uint32_t *) VDU;
#else
int8_t *UDUp0 = UDU;
int8_t *VDUp0 = VDU;
int8_t *UDUp1 = UDUp0 + JPEG_444_GS_MCU_SIZE;
@ -1786,8 +1839,52 @@ bool jpeg_compress(image_t *src, image_t *dst, int quality, bool realloc) {
int8_t *VDUp2 = VDUp1 + JPEG_444_GS_MCU_SIZE;
int8_t *UDUp3 = UDUp2 + JPEG_444_GS_MCU_SIZE;
int8_t *VDUp3 = VDUp2 + JPEG_444_GS_MCU_SIZE;
#endif
for (int j = 0, k = JPEG_444_GS_MCU_SIZE / 2; k < JPEG_444_GS_MCU_SIZE;
j += JPEG_MCU_W, k += JPEG_MCU_W) {
#if defined(ARM_MATH_DSP)
for (int i = 0; i < 4; i++) {
int index = ((i & 2) ? k : j) + ((i & 1) * 4);
uint32_t UDU_r0_3210 = UDUp[i * 16];
uint32_t UDU_r0_avg_32_10 = __SHADD8(UDU_r0_3210, __UXTB16_RORn(UDU_r0_3210, 8));
uint32_t UDU_r0_7654 = UDUp[(i * 16) + 1];
uint32_t UDU_r0_avg_76_54 = __SHADD8(UDU_r0_7654, __UXTB16_RORn(UDU_r0_7654, 8));
uint32_t UDU_r1_3210 = UDUp[(i * 16) + 2];
uint32_t UDU_r1_avg_32_10 = __SHADD8(UDU_r1_3210, __UXTB16_RORn(UDU_r1_3210, 8));
uint32_t UDU_r1_7654 = UDUp[(i * 16) + 3];
uint32_t UDU_r1_avg_76_54 = __SHADD8(UDU_r1_7654, __UXTB16_RORn(UDU_r1_7654, 8));
uint32_t UDU_r0_r1_avg_32_10 = __SHADD8(UDU_r0_avg_32_10, UDU_r1_avg_32_10);
UDU_avg[index] = UDU_r0_r1_avg_32_10;
UDU_avg[index + 1] = UDU_r0_r1_avg_32_10 >> 16;
uint32_t UDU_r0_r1_avg_76_54 = __SHADD8(UDU_r0_avg_76_54, UDU_r1_avg_76_54);
UDU_avg[index + 2] = UDU_r0_r1_avg_76_54;
UDU_avg[index + 3] = UDU_r0_r1_avg_76_54 >> 16;
uint32_t VDU_r0_3210 = VDUp[i * 16];
uint32_t VDU_r0_avg_32_10 = __SHADD8(VDU_r0_3210, __UXTB16_RORn(VDU_r0_3210, 8));
uint32_t VDU_r0_7654 = VDUp[(i * 16) + 1];
uint32_t VDU_r0_avg_76_54 = __SHADD8(VDU_r0_7654, __UXTB16_RORn(VDU_r0_7654, 8));
uint32_t VDU_r1_3210 = VDUp[(i * 16) + 2];
uint32_t VDU_r1_avg_32_10 = __SHADD8(VDU_r1_3210, __UXTB16_RORn(VDU_r1_3210, 8));
uint32_t VDU_r1_7654 = VDUp[(i * 16) + 3];
uint32_t VDU_r1_avg_76_54 = __SHADD8(VDU_r1_7654, __UXTB16_RORn(VDU_r1_7654, 8));
uint32_t VDU_r0_r1_avg_32_10 = __SHADD8(VDU_r0_avg_32_10, VDU_r1_avg_32_10);
VDU_avg[index] = VDU_r0_r1_avg_32_10;
VDU_avg[index + 1] = VDU_r0_r1_avg_32_10 >> 16;
uint32_t VDU_r0_r1_avg_76_54 = __SHADD8(VDU_r0_avg_76_54, VDU_r1_avg_76_54);
VDU_avg[index + 2] = VDU_r0_r1_avg_76_54;
VDU_avg[index + 3] = VDU_r0_r1_avg_76_54 >> 16;
}
UDUp += 4;
VDUp += 4;
#else
for (int i = 0; i < JPEG_MCU_W; i += 2) {
UDU_avg[j + (i / 2)] =
(UDUp0[i] + UDUp0[i + 1] + UDUp0[i + JPEG_MCU_W] + UDUp0[i + 1 + JPEG_MCU_W]) / 4;
@ -1814,6 +1911,7 @@ bool jpeg_compress(image_t *src, image_t *dst, int quality, bool realloc) {
VDUp2 += JPEG_MCU_W * 2;
UDUp3 += JPEG_MCU_W * 2;
VDUp3 += JPEG_MCU_W * 2;
#endif
}
DCU = jpeg_processDU(&jpeg_buf, UDU_avg, fdtbl_UV, DCU, UVDC_HT, UVAC_HT);