Merge pull request #2228 from kwagyeman/kwabena/fix_missing_invalidates

imlib/draw: Add missing speculative read invalidates.
This commit is contained in:
Ibrahim Abdelkader 2024-07-19 18:15:34 +02:00 committed by GitHub
commit f922b76ae3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 69 additions and 9 deletions

View File

@ -801,7 +801,7 @@ void imlib_draw_row_setup(imlib_draw_row_data_t *data) {
cfg.pCLUT = clut; cfg.pCLUT = clut;
cfg.CLUTColorMode = DMA2D_CCM_ARGB8888; cfg.CLUTColorMode = DMA2D_CCM_ARGB8888;
cfg.Size = 255; cfg.Size = 255;
#if defined(MCU_SERIES_F7) || defined(MCU_SERIES_H7) #if __DCACHE_PRESENT
SCB_CleanDCache_by_Addr(clut, 256 * sizeof(uint32_t)); SCB_CleanDCache_by_Addr(clut, 256 * sizeof(uint32_t));
#endif #endif
HAL_DMA2D_CLUTLoad(&data->dma2d, cfg, 1); HAL_DMA2D_CLUTLoad(&data->dma2d, cfg, 1);
@ -828,6 +828,10 @@ void imlib_draw_row_setup(imlib_draw_row_data_t *data) {
data->dma2d.LayerCfg[1].ChromaSubSampling = DMA2D_NO_CSS; data->dma2d.LayerCfg[1].ChromaSubSampling = DMA2D_NO_CSS;
#endif #endif
HAL_DMA2D_ConfigLayer(&data->dma2d, 1); HAL_DMA2D_ConfigLayer(&data->dma2d, 1);
#if __DCACHE_PRESENT
// SCB_InvalidateDCache_by_Addr does nothing if dsize is 0.
data->dma2d_invalidate_dsize = 0;
#endif
} else { } else {
data->row_buffer[1] = data->row_buffer[0]; data->row_buffer[1] = data->row_buffer[0];
} }
@ -866,6 +870,10 @@ void imlib_draw_row_teardown(imlib_draw_row_data_t *data) {
if (data->dma2d_initialized) { if (data->dma2d_initialized) {
if (!data->callback) { if (!data->callback) {
HAL_DMA2D_PollForTransfer(&data->dma2d, 1000); HAL_DMA2D_PollForTransfer(&data->dma2d, 1000);
#if __DCACHE_PRESENT
// Ensures any cached reads to dst16 are dropped.
SCB_InvalidateDCache_by_Addr(data->dma2d_invalidate_addr, data->dma2d_invalidate_dsize);
#endif
} }
HAL_DMA2D_DeInit(&data->dma2d); HAL_DMA2D_DeInit(&data->dma2d);
if (data->src_img_pixfmt == PIXFORMAT_GRAYSCALE) { if (data->src_img_pixfmt == PIXFORMAT_GRAYSCALE) {
@ -2379,15 +2387,24 @@ void imlib_draw_row(int x_start, int x_end, int y_row, imlib_draw_row_data_t *da
if (data->dma2d_enabled) { if (data->dma2d_enabled) {
if (!data->callback) { if (!data->callback) {
HAL_DMA2D_PollForTransfer(&data->dma2d, 1000); HAL_DMA2D_PollForTransfer(&data->dma2d, 1000);
#if __DCACHE_PRESENT
// Ensures any cached reads to dst16 are dropped.
SCB_InvalidateDCache_by_Addr(data->dma2d_invalidate_addr,
data->dma2d_invalidate_dsize);
#endif
} }
#if defined(MCU_SERIES_F7) || defined(MCU_SERIES_H7) #if __DCACHE_PRESENT
// Memory referenced by src8 between (x_end - x_start) may or may not be // Memory referenced by src8 between (x_end - x_start) may or may not be
// cache algined. However, after being flushed it shouldn't change again // cache algined. However, after being flushed it shouldn't change again
// so DMA2D can safety read the line of pixels. // so DMA2D can safety read the line of pixels.
SCB_CleanDCache_by_Addr((uint32_t *) src8, (x_end - x_start) * sizeof(uint8_t)); SCB_CleanDCache_by_Addr((uint32_t *) src8, (x_end - x_start) * sizeof(uint8_t));
// DMA2D will overwrite this area. dst16 (x_end - x_start) must be cache // DMA2D will overwrite this area. dst16 (x_end - x_start) must be cache
// aligned or the line of pixels will be corrutped. // aligned or the line of pixels will be corrutped.
SCB_InvalidateDCache_by_Addr((uint32_t *) dst16, (x_end - x_start) * sizeof(uint16_t)); // Ensures any cached writes to dst16 are dropped.
data->dma2d_invalidate_addr = (uint32_t *) dst16;
data->dma2d_invalidate_dsize = (x_end - x_start) * sizeof(uint16_t);
SCB_InvalidateDCache_by_Addr(data->dma2d_invalidate_addr,
data->dma2d_invalidate_dsize);
#endif #endif
HAL_DMA2D_BlendingStart(&data->dma2d, HAL_DMA2D_BlendingStart(&data->dma2d,
(uint32_t) src8, (uint32_t) src8,
@ -2397,6 +2414,11 @@ void imlib_draw_row(int x_start, int x_end, int y_row, imlib_draw_row_data_t *da
1); 1);
if (data->callback) { if (data->callback) {
HAL_DMA2D_PollForTransfer(&data->dma2d, 1000); HAL_DMA2D_PollForTransfer(&data->dma2d, 1000);
#if __DCACHE_PRESENT
// Ensures any cached reads to dst16 are dropped.
SCB_InvalidateDCache_by_Addr(data->dma2d_invalidate_addr,
data->dma2d_invalidate_dsize);
#endif
} }
} else if (data->smuad_alpha_palette) { } else if (data->smuad_alpha_palette) {
#else #else
@ -2550,15 +2572,24 @@ void imlib_draw_row(int x_start, int x_end, int y_row, imlib_draw_row_data_t *da
if (data->dma2d_enabled) { if (data->dma2d_enabled) {
if (!data->callback) { if (!data->callback) {
HAL_DMA2D_PollForTransfer(&data->dma2d, 1000); HAL_DMA2D_PollForTransfer(&data->dma2d, 1000);
#if __DCACHE_PRESENT
// Ensures any cached reads to dst16 are dropped.
SCB_InvalidateDCache_by_Addr(data->dma2d_invalidate_addr,
data->dma2d_invalidate_dsize);
#endif
} }
#if defined(MCU_SERIES_F7) || defined(MCU_SERIES_H7) #if __DCACHE_PRESENT
// Memory referenced by src16 between (x_end - x_start) may or may not be // Memory referenced by src16 between (x_end - x_start) may or may not be
// cache algined. However, after being flushed it shouldn't change again // cache algined. However, after being flushed it shouldn't change again
// so DMA2D can safety read the line of pixels. // so DMA2D can safety read the line of pixels.
SCB_CleanDCache_by_Addr((uint32_t *) src16, (x_end - x_start) * sizeof(uint16_t)); SCB_CleanDCache_by_Addr((uint32_t *) src16, (x_end - x_start) * sizeof(uint16_t));
// DMA2D will overwrite this area. dst16 (x_end - x_start) must be cache // DMA2D will overwrite this area. dst16 (x_end - x_start) must be cache
// aligned or the line of pixels will be corrutped. // aligned or the line of pixels will be corrutped.
SCB_InvalidateDCache_by_Addr((uint32_t *) dst16, (x_end - x_start) * sizeof(uint16_t)); // Ensures any cached writes to dst16 are dropped.
data->dma2d_invalidate_addr = (uint32_t *) dst16;
data->dma2d_invalidate_dsize = (x_end - x_start) * sizeof(uint16_t);
SCB_InvalidateDCache_by_Addr(data->dma2d_invalidate_addr,
data->dma2d_invalidate_dsize);
#endif #endif
HAL_DMA2D_BlendingStart(&data->dma2d, HAL_DMA2D_BlendingStart(&data->dma2d,
(uint32_t) src16, (uint32_t) src16,
@ -2568,6 +2599,11 @@ void imlib_draw_row(int x_start, int x_end, int y_row, imlib_draw_row_data_t *da
1); 1);
if (data->callback) { if (data->callback) {
HAL_DMA2D_PollForTransfer(&data->dma2d, 1000); HAL_DMA2D_PollForTransfer(&data->dma2d, 1000);
#if __DCACHE_PRESENT
// Ensures any cached reads to dst16 are dropped.
SCB_InvalidateDCache_by_Addr(data->dma2d_invalidate_addr,
data->dma2d_invalidate_dsize);
#endif
} }
} else if (!data->black_background) { } else if (!data->black_background) {
#else #else

View File

@ -1143,6 +1143,10 @@ typedef struct imlib_draw_row_data {
bool dma2d_enabled; // private bool dma2d_enabled; // private
bool dma2d_initialized; // private bool dma2d_initialized; // private
DMA2D_HandleTypeDef dma2d; // private DMA2D_HandleTypeDef dma2d; // private
#if __DCACHE_PRESENT
void *dma2d_invalidate_addr; // private
int32_t dma2d_invalidate_dsize; // private
#endif
#endif #endif
long smuad_alpha; // private long smuad_alpha; // private
uint32_t *smuad_alpha_palette; // private uint32_t *smuad_alpha_palette; // private

View File

@ -729,7 +729,7 @@ static void spi_tv_display(image_t *src_img, int dst_x_start, int dst_y_start, f
#ifdef __DCACHE_PRESENT #ifdef __DCACHE_PRESENT
// Flush data for DMA // Flush data for DMA
SCB_CleanDCache(); SCB_CleanDCache_by_Addr((uint32_t *) dst_img.data, image_size(&dst_img));
#endif #endif
// Update head which means a new image is ready. // Update head which means a new image is ready.

View File

@ -81,6 +81,11 @@ static void jpeg_compress_get_data(JPEG_HandleTypeDef *hjpeg, uint32_t NbDecoded
} }
static void jpeg_compress_data_ready(JPEG_HandleTypeDef *hjpeg, uint8_t *pDataOut, uint32_t OutDataLength) { static void jpeg_compress_data_ready(JPEG_HandleTypeDef *hjpeg, uint8_t *pDataOut, uint32_t OutDataLength) {
if ((!(((uint32_t) pDataOut) % __SCB_DCACHE_LINE_SIZE)) && (OutDataLength == JPEG_OUTPUT_CHUNK_SIZE)) {
// Ensure any cached reads are dropped.
SCB_InvalidateDCache_by_Addr((uint32_t *) pDataOut, JPEG_OUTPUT_CHUNK_SIZE);
}
// We have received this much data. // We have received this much data.
JPEG_state.out_data_len += OutDataLength; JPEG_state.out_data_len += OutDataLength;
@ -96,7 +101,7 @@ static void jpeg_compress_data_ready(JPEG_HandleTypeDef *hjpeg, uint8_t *pDataOu
// image in randomly aligned chunks. We only want to invalidate the cache of the output // image in randomly aligned chunks. We only want to invalidate the cache of the output
// buffer for the initial DMA chunks. So, this code below will do that and then only // buffer for the initial DMA chunks. So, this code below will do that and then only
// invalidate aligned regions when the processor is moving the final parts of the image. // invalidate aligned regions when the processor is moving the final parts of the image.
if (!(((uint32_t) new_pDataOut) % __SCB_DCACHE_LINE_SIZE)) { if ((!(((uint32_t) new_pDataOut) % __SCB_DCACHE_LINE_SIZE)) && (OutDataLength == JPEG_OUTPUT_CHUNK_SIZE)) {
SCB_InvalidateDCache_by_Addr((uint32_t *) new_pDataOut, JPEG_OUTPUT_CHUNK_SIZE); SCB_InvalidateDCache_by_Addr((uint32_t *) new_pDataOut, JPEG_OUTPUT_CHUNK_SIZE);
} }
@ -506,7 +511,7 @@ void jpeg_decompress(image_t *dst, image_t *src) {
HAL_DMA2D_Init(&DMA2D_Handle); HAL_DMA2D_Init(&DMA2D_Handle);
HAL_DMA2D_ConfigLayer(&DMA2D_Handle, 1); HAL_DMA2D_ConfigLayer(&DMA2D_Handle, 1);
// Invalidate the dst image for DMA2D. // Ensure any cached writes are dropped.
SCB_InvalidateDCache_by_Addr((uint32_t *) dst->data, image_size(dst)); SCB_InvalidateDCache_by_Addr((uint32_t *) dst->data, image_size(dst));
} }
} else if (JPEG_state.jpeg_descr.Conf.ColorSpace == JPEG_CMYK_COLORSPACE) { } else if (JPEG_state.jpeg_descr.Conf.ColorSpace == JPEG_CMYK_COLORSPACE) {
@ -565,6 +570,9 @@ void jpeg_decompress(image_t *dst, image_t *src) {
HAL_JPEG_Resume(&JPEG_state.jpeg_descr, JPEG_PAUSE_RESUME_OUTPUT); HAL_JPEG_Resume(&JPEG_state.jpeg_descr, JPEG_PAUSE_RESUME_OUTPUT);
} }
// Ensure any cached reads are dropped.
SCB_InvalidateDCache_by_Addr((uint32_t *) this_mcu_row_buffer_ptr, dst_w_mcus_bytes);
if (JPEG_state.jpeg_descr.Conf.ColorSpace == JPEG_GRAYSCALE_COLORSPACE) { if (JPEG_state.jpeg_descr.Conf.ColorSpace == JPEG_GRAYSCALE_COLORSPACE) {
for (int x_offset = 0; x_offset < src->w; x_offset += JPEG_MCU_W) { for (int x_offset = 0; x_offset < src->w; x_offset += JPEG_MCU_W) {
uint8_t *Y0 = this_mcu_row_buffer_ptr + (x_offset * JPEG_MCU_H); uint8_t *Y0 = this_mcu_row_buffer_ptr + (x_offset * JPEG_MCU_H);
@ -709,9 +717,21 @@ void jpeg_decompress(image_t *dst, image_t *src) {
} }
case PIXFORMAT_RGB565: { case PIXFORMAT_RGB565: {
uint16_t *rp = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(dst, y_offset); uint16_t *rp = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(dst, y_offset);
SCB_InvalidateDCache_by_Addr((uint32_t *) rp, dst->w * dy * sizeof(uint16_t));
HAL_DMA2D_Start(&DMA2D_Handle, (uint32_t) this_mcu_row_buffer_ptr, (uint32_t) rp, dst->w, dy); HAL_DMA2D_Start(&DMA2D_Handle, (uint32_t) this_mcu_row_buffer_ptr, (uint32_t) rp, dst->w, dy);
// Invalidate any cached reads for the previous line that was just written.
if ((y_offset - mcu_h) >= 0) {
uint16_t *previous_rp = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(dst, (y_offset - mcu_h));
SCB_InvalidateDCache_by_Addr((uint32_t *) previous_rp, dst->w * mcu_h * sizeof(uint16_t));
}
HAL_DMA2D_PollForTransfer(&DMA2D_Handle, JPEG_CODEC_TIMEOUT); HAL_DMA2D_PollForTransfer(&DMA2D_Handle, JPEG_CODEC_TIMEOUT);
// For the last row invalidate any cached reads for the line that was just written.
if ((y_offset + mcu_h) >= src->h) {
SCB_InvalidateDCache_by_Addr((uint32_t *) rp, dst->w * mcu_h * sizeof(uint16_t));
}
break; break;
} }
} }