From e154c9b8a6b1d88e3f84bcc85c347ce7b25c1fe6 Mon Sep 17 00:00:00 2001 From: Larry Bank Date: Wed, 1 Apr 2020 03:46:45 +0200 Subject: [PATCH] optimied sensor copy to allow ful res capture --- src/omv/ov5640.c | 9 +++++---- src/omv/sensor.c | 34 ++++++++++++++++++++++------------ 2 files changed, 27 insertions(+), 16 deletions(-) diff --git a/src/omv/ov5640.c b/src/omv/ov5640.c index 656351b49..3d7d9f4cc 100644 --- a/src/omv/ov5640.c +++ b/src/omv/ov5640.c @@ -426,7 +426,7 @@ static int set_pixformat(sensor_t *sensor, pixformat_t pixformat) break; case PIXFORMAT_GRAYSCALE: case PIXFORMAT_YUV422: - ret |= cambus_writeb2(sensor->slv_addr, FORMAT_CONTROL, 0x30); + ret |= cambus_writeb2(sensor->slv_addr, FORMAT_CONTROL, 0x10); ret |= cambus_writeb2(sensor->slv_addr, FORMAT_CONTROL_MUX, 0x00); pll = (resolution[sensor->framesize][0] > 2048) ? 0x50 : 0x64; // 32 MHz vs 40 MHz break; @@ -547,7 +547,7 @@ static int set_framesize(sensor_t *sensor, framesize_t framesize) // Step 5: Compute total frame time. - uint16_t sensor_hts = (sensor_w * ((sensor->pixformat == PIXFORMAT_JPEG) ? 1 : 2)) + HSYNC_TIME; + uint16_t sensor_hts = (sensor_w * ((sensor->pixformat == PIXFORMAT_GRAYSCALE || sensor->pixformat == PIXFORMAT_JPEG) ? 1 : 2)) + HSYNC_TIME; uint16_t sensor_vts = sensor_h + VYSNC_TIME; uint16_t sensor_x_inc = (((sensor_div * 2) - 1) << 4) | (1 << 0); // odd[7:4]/even[3:0] pixel inc on the bayer pattern @@ -612,7 +612,8 @@ static int set_framesize(sensor_t *sensor, framesize_t framesize) break; case PIXFORMAT_GRAYSCALE: case PIXFORMAT_YUV422: - pll = (w > 2048) ? 0x50 : 0x64; // 32 MHz vs 40 MHz +// pll = (w > 2048) ? 0x50 : 0x64; // 32 MHz vs 40 MHz + pll = 0x64; break; case PIXFORMAT_BAYER: pll = (w > 2048) ? 0x64 : 0x50; // 40 MHz vs 32 MHz (jpeg can go faster at higher reses) @@ -936,7 +937,7 @@ static int set_lens_correction(sensor_t *sensor, int enable, int radi, int coef) int ov5640_init(sensor_t *sensor) { // Initialize sensor structure. - sensor->gs_bpp = 2; + sensor->gs_bpp = 1; sensor->reset = reset; sensor->sleep = sleep; sensor->read_reg = read_reg; diff --git a/src/omv/sensor.c b/src/omv/sensor.c index ef0088d47..5f338a810 100644 --- a/src/omv/sensor.c +++ b/src/omv/sensor.c @@ -904,23 +904,35 @@ void DCMI_DMAConvCpltUser(uint32_t addr) case PIXFORMAT_BAYER: dst += (line - MAIN_FB()->y) * MAIN_FB()->w; src += MAIN_FB()->x; - for (int i = MAIN_FB()->w; i; i--) { - *dst++ = *src++; - } + memcpy(dst, src, MAIN_FB()->w); break; case PIXFORMAT_GRAYSCALE: dst += (line - MAIN_FB()->y) * MAIN_FB()->w; + src += MAIN_FB()->x; if (sensor.gs_bpp == 1) { - src += MAIN_FB()->x; // 1BPP GRAYSCALE. - for (int i = MAIN_FB()->w; i; i--) { - *dst++ = *src++; - } + memcpy(dst, src, MAIN_FB()->w); } else { + uint32_t tmp1, tmp2, pix, *s, *d; src16 += MAIN_FB()->x; + s = (uint32_t *)src16; + d = (uint32_t *)dst; // Extract Y channel from YUV. - for (int i = MAIN_FB()->w; i; i--) { - *dst++ = *src16++; + if (((uint32_t)dst & 3) == 0 && ((uint32_t)src16 & 3) == 0) { + for (int i = MAIN_FB()->w; i>=4; i-=4) { + // destination mem is cached; coalesce the writes to improve throughput + tmp1 = *s++; + tmp2 = *s++; + pix = tmp1 & 0xff; // merge 4 pixels to not saturate CPU write buffer + pix |= ((tmp1 >> 8) & 0xff00); + pix |= ((tmp2 & 0xff) << 16); + pix |= ((tmp2 & 0xff0000) << 8); + *d++ = pix; + } + } else { + for (int i = MAIN_FB()->w; i; i--) { + *dst++ = (uint8_t)*src16++; // low byte is Y channel + } } } break; @@ -928,9 +940,7 @@ void DCMI_DMAConvCpltUser(uint32_t addr) case PIXFORMAT_RGB565: dst16 += (line - MAIN_FB()->y) * MAIN_FB()->w; src16 += MAIN_FB()->x; - for (int i = MAIN_FB()->w; i; i--) { - *dst16++ = *src16++; - } + memcpy(dst16, src16, MAIN_FB()->w * sizeof(uint16_t)); break; case PIXFORMAT_JPEG: default: