mirror of
https://github.com/openmv/openmv.git
synced 2025-11-04 14:49:50 +08:00
Add support for double buffer, triple buffering, and video fifo
This commit is contained in:
parent
53e7b196ab
commit
9dd90ae55f
@ -36,11 +36,6 @@ static char *pointer_overlay = &_fballoc_overlay_end;
|
||||
// Use fb_alloc_free_till_mark_permanent() instead.
|
||||
#define FB_PERMANENT_FLAG 0x2
|
||||
|
||||
static char *fb_alloc_min_address()
|
||||
{
|
||||
return (char *) (framebuffer_get_buffer() + framebuffer_get_frame_size());
|
||||
}
|
||||
|
||||
char *fb_alloc_stack_pointer()
|
||||
{
|
||||
return pointer;
|
||||
@ -63,7 +58,7 @@ void fb_alloc_init0()
|
||||
|
||||
uint32_t fb_avail()
|
||||
{
|
||||
uint32_t temp = pointer - fb_alloc_min_address() - sizeof(uint32_t);
|
||||
uint32_t temp = pointer - framebuffer_get_buffers_end() - sizeof(uint32_t);
|
||||
return (temp < sizeof(uint32_t)) ? 0 : temp;
|
||||
}
|
||||
|
||||
@ -72,7 +67,7 @@ void fb_alloc_mark()
|
||||
char *new_pointer = pointer - sizeof(uint32_t);
|
||||
|
||||
// Check if allocation overwrites the framebuffer pixels
|
||||
if (new_pointer < fb_alloc_min_address()) {
|
||||
if (new_pointer < framebuffer_get_buffers_end()) {
|
||||
nlr_raise_for_fb_alloc_mark(mp_obj_new_exception_msg(&mp_type_MemoryError,
|
||||
MP_ERROR_TEXT("Out of fast Frame Buffer Stack Memory!"
|
||||
" Please reduce the resolution of the image you are running this algorithm on to bypass this issue!")));
|
||||
@ -149,7 +144,7 @@ void *fb_alloc(uint32_t size, int hints)
|
||||
char *new_pointer = result - sizeof(uint32_t);
|
||||
|
||||
// Check if allocation overwrites the framebuffer pixels
|
||||
if (new_pointer < fb_alloc_min_address()) {
|
||||
if (new_pointer < framebuffer_get_buffers_end()) {
|
||||
fb_alloc_fail();
|
||||
}
|
||||
|
||||
@ -195,7 +190,7 @@ void *fb_alloc0(uint32_t size, int hints)
|
||||
|
||||
void *fb_alloc_all(uint32_t *size, int hints)
|
||||
{
|
||||
uint32_t temp = pointer - fb_alloc_min_address() - sizeof(uint32_t);
|
||||
uint32_t temp = pointer - framebuffer_get_buffers_end() - sizeof(uint32_t);
|
||||
|
||||
if (temp < sizeof(uint32_t)) {
|
||||
*size = 0;
|
||||
|
||||
@ -11,6 +11,7 @@
|
||||
#ifndef __SENSOR_H__
|
||||
#define __SENSOR_H__
|
||||
#include <stdarg.h>
|
||||
#include "cambus.h"
|
||||
#include "imlib.h"
|
||||
|
||||
#define OV2640_SLV_ADDR (0x60)
|
||||
@ -309,6 +310,9 @@ int sensor_set_auto_rotation(bool enable);
|
||||
// Get transpose mode state.
|
||||
bool sensor_get_auto_rotation();
|
||||
|
||||
// Set the number of virtual frame buffers.
|
||||
int sensor_set_framebuffers(int count);
|
||||
|
||||
// Set special digital effects (SDE).
|
||||
int sensor_set_special_effect(sde_t sde);
|
||||
|
||||
@ -329,4 +333,5 @@ const uint16_t *sensor_get_color_palette();
|
||||
|
||||
// Default snapshot function.
|
||||
int sensor_snapshot(sensor_t *sensor, image_t *image, uint32_t flags);
|
||||
|
||||
#endif /* __SENSOR_H__ */
|
||||
|
||||
@ -13,6 +13,8 @@
|
||||
#include "framebuffer.h"
|
||||
#include "omv_boardconfig.h"
|
||||
|
||||
#define FB_ALIGN_SIZE_ROUND_DOWN(x) (((x) / FRAMEBUFFER_ALIGNMENT) * FRAMEBUFFER_ALIGNMENT)
|
||||
#define FB_ALIGN_SIZE_ROUND_UP(x) FB_ALIGN_SIZE_ROUND_DOWN(((x) + FRAMEBUFFER_ALIGNMENT - 1))
|
||||
#define CONSERVATIVE_JPEG_BUF_SIZE (OMV_JPEG_BUF_SIZE-64)
|
||||
|
||||
extern char _fb_base;
|
||||
@ -79,9 +81,6 @@ void framebuffer_init0()
|
||||
memset(MAIN_FB(), 0, sizeof(*MAIN_FB()));
|
||||
memset(JPEG_FB(), 0, sizeof(*JPEG_FB()));
|
||||
|
||||
// Skip the first frame.
|
||||
MAIN_FB()->bpp = -1;
|
||||
|
||||
// Enable streaming.
|
||||
MAIN_FB()->streaming_enabled = true; // controlled by the OpenMV Cam.
|
||||
|
||||
@ -90,6 +89,9 @@ void framebuffer_init0()
|
||||
|
||||
// Set fb_enabled
|
||||
JPEG_FB()->enabled = fb_enabled; // controlled by the IDE.
|
||||
|
||||
// Setup buffering.
|
||||
framebuffer_set_buffers(1);
|
||||
}
|
||||
|
||||
void framebuffer_initialize_image(image_t *img)
|
||||
@ -97,7 +99,7 @@ void framebuffer_initialize_image(image_t *img)
|
||||
img->w = framebuffer->w;
|
||||
img->h = framebuffer->h;
|
||||
img->bpp = framebuffer->bpp;
|
||||
img->data = framebuffer->pixels;
|
||||
img->data = framebuffer_get_buffer(framebuffer->head)->data;
|
||||
}
|
||||
|
||||
static void initialize_jpeg_buf_from_image(image_t *img)
|
||||
@ -220,26 +222,51 @@ int32_t framebuffer_get_depth()
|
||||
return framebuffer->bpp;
|
||||
}
|
||||
|
||||
uint32_t framebuffer_get_frame_size()
|
||||
// Returns the number of bytes the frame buffer could be at the current moment it time.
|
||||
static uint32_t framebuffer_raw_buffer_size()
|
||||
{
|
||||
image_t img;
|
||||
framebuffer_initialize_image(&img);
|
||||
return image_size(&img);
|
||||
uint32_t size = (uint32_t) (fb_alloc_stack_pointer() - ((char *) framebuffer->data));
|
||||
// We don't want to give all of the frame buffer RAM to the frame buffer. So, we will limit
|
||||
// the maximum amount of RAM we return.
|
||||
return IM_MIN(size, OMV_RAW_BUF_SIZE);
|
||||
}
|
||||
|
||||
uint32_t framebuffer_get_buffer_size()
|
||||
{
|
||||
uint32_t size = (uint32_t) (fb_alloc_stack_pointer() - ((char *) framebuffer->pixels));
|
||||
// We don't want to give all of the frame buffer RAM to the frame buffer. So, we will limit the
|
||||
// maximum amount of RAM we return.
|
||||
size = IM_MIN(size, OMV_RAW_BUF_SIZE);
|
||||
// Needs to be a multiple of 32 for DMA transfers...
|
||||
return (size / 32) * 32;
|
||||
uint32_t size;
|
||||
|
||||
if (framebuffer->n_buffers == 1) {
|
||||
// With only 1 vbuffer it's fine to allow the frame buffer size to change given fb_alloc().
|
||||
size = framebuffer_raw_buffer_size();
|
||||
} else {
|
||||
// Whatever the raw size was when the number of buffers were set is locked in...
|
||||
size = framebuffer->raw_buffer_size;
|
||||
}
|
||||
|
||||
// Remove the size of the state header plus alignment padding.
|
||||
size -= sizeof(vbuffer_t);
|
||||
|
||||
// Do we have an estimate on the frame size with mutliple buffers? If so, we can reduce the
|
||||
// RAM each buffer takes up giving some space back to fb_alloc().
|
||||
if ((framebuffer->n_buffers != 1) && framebuffer->u && framebuffer->v) {
|
||||
// Typically a framebuffer will not need more than u*v*2 bytes.
|
||||
uint32_t size_guess = framebuffer->u * framebuffer->v * 2;
|
||||
// Add in extra bytes to prevent round down from shrinking buffer too small.
|
||||
size_guess += FRAMEBUFFER_ALIGNMENT - 1;
|
||||
// Limit the frame buffer size.
|
||||
size = IM_MIN(size, size_guess);
|
||||
}
|
||||
|
||||
// Needs to be a multiple of FRAMEBUFFER_ALIGNMENT for DMA transfers...
|
||||
return FB_ALIGN_SIZE_ROUND_DOWN(size);
|
||||
}
|
||||
|
||||
uint8_t *framebuffer_get_buffer()
|
||||
// Each raw frame buffer is split into two parts. The vbuffer_t struct followed by
|
||||
// padding and then the pixel array starting at the next 32-byte offset.
|
||||
vbuffer_t *framebuffer_get_buffer(int32_t index)
|
||||
{
|
||||
return framebuffer->pixels;
|
||||
uint32_t offset = (sizeof(vbuffer_t) + framebuffer_get_buffer_size()) * index;
|
||||
return (vbuffer_t *) (framebuffer->data + offset);
|
||||
}
|
||||
|
||||
void framebuffer_set(int32_t w, int32_t h, int32_t bpp)
|
||||
@ -248,3 +275,195 @@ void framebuffer_set(int32_t w, int32_t h, int32_t bpp)
|
||||
framebuffer->h = h;
|
||||
framebuffer->bpp = bpp;
|
||||
}
|
||||
|
||||
void framebuffer_flush_buffers()
|
||||
{
|
||||
// Move the tail pointer to the head which empties the virtual fifo while keeping the same
|
||||
// position of the current frame for the rest of the code.
|
||||
framebuffer->tail = framebuffer->head;
|
||||
framebuffer->check_head = true;
|
||||
framebuffer->sampled_head = 0;
|
||||
}
|
||||
|
||||
void framebuffer_reset_buffers()
|
||||
{
|
||||
for (int32_t i = 0; i < framebuffer->n_buffers; i++) {
|
||||
memset(framebuffer_get_buffer(i), 0, sizeof(vbuffer_t));
|
||||
}
|
||||
|
||||
framebuffer_flush_buffers();
|
||||
}
|
||||
|
||||
int framebuffer_set_buffers(int32_t n_buffers)
|
||||
{
|
||||
uint32_t total_size = framebuffer_raw_buffer_size();
|
||||
uint32_t size = total_size / n_buffers;
|
||||
|
||||
// Error out if frame buffers are smaller than this...
|
||||
if (size < (sizeof(vbuffer_t) + FRAMEBUFFER_ALIGNMENT)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Invalidate frame.
|
||||
framebuffer->bpp = -1;
|
||||
|
||||
// Cache the maximum size we can allocate for the frame buffer when vbuffers are greater than 1.
|
||||
framebuffer->raw_buffer_size = size;
|
||||
framebuffer->n_buffers = n_buffers;
|
||||
framebuffer->head = 0;
|
||||
|
||||
framebuffer_reset_buffers();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Returns the real size of bytes in the frame buffer.
|
||||
static uint32_t framebuffer_total_buffer_size()
|
||||
{
|
||||
if (framebuffer->n_buffers == 1) {
|
||||
// Allow fb_alloc to use frame buffer space up until the image size.
|
||||
image_t img;
|
||||
framebuffer_initialize_image(&img);
|
||||
return sizeof(vbuffer_t) + FB_ALIGN_SIZE_ROUND_UP(image_size(&img));
|
||||
} else {
|
||||
// fb_alloc may only use up to the size of all the virtual buffers...
|
||||
return (sizeof(vbuffer_t) + framebuffer_get_buffer_size()) * framebuffer->n_buffers;
|
||||
}
|
||||
}
|
||||
|
||||
void framebuffer_auto_adjust_buffers()
|
||||
{
|
||||
// Keep same buffer count in video fifo mode but resize buffer sizes.
|
||||
if (framebuffer->n_buffers > 3) {
|
||||
framebuffer_set_buffers(framebuffer->n_buffers);
|
||||
return;
|
||||
}
|
||||
|
||||
for (int i = 3; i > 0; i--) {
|
||||
framebuffer_set_buffers(i);
|
||||
|
||||
// Find a buffering size automatically that doesn't use more than half.
|
||||
if (fb_avail() >= framebuffer_total_buffer_size()) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void framebuffer_free_current_buffer()
|
||||
{
|
||||
// Invalidate frame.
|
||||
framebuffer->bpp = -1;
|
||||
|
||||
// Allow frame to be updated in single buffer mode...
|
||||
if (framebuffer->n_buffers == 1) {
|
||||
framebuffer_get_buffer(framebuffer->head)->waiting_for_data = true;
|
||||
}
|
||||
}
|
||||
|
||||
vbuffer_t *framebuffer_get_head(framebuffer_flags_t flags)
|
||||
{
|
||||
int32_t new_head = (framebuffer->head + 1) % framebuffer->n_buffers;
|
||||
|
||||
// Single Buffer Mode.
|
||||
if (framebuffer->n_buffers == 1) {
|
||||
if (framebuffer_get_buffer(framebuffer->head)->waiting_for_data) {
|
||||
return NULL;
|
||||
}
|
||||
// Double Buffer Mode.
|
||||
} else if (framebuffer->n_buffers == 2) {
|
||||
if (framebuffer->head == framebuffer->tail) {
|
||||
return NULL;
|
||||
}
|
||||
// Triple Buffer Mode.
|
||||
} else if (framebuffer->n_buffers == 3) {
|
||||
int32_t sampled_tail = framebuffer->tail;
|
||||
if (framebuffer->head == sampled_tail) {
|
||||
return NULL;
|
||||
} else {
|
||||
new_head = sampled_tail;
|
||||
}
|
||||
// Video FIFO Mode.
|
||||
} else {
|
||||
if (framebuffer->head == framebuffer->tail) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (!(flags & FB_PEEK)) {
|
||||
framebuffer->head = new_head;
|
||||
}
|
||||
|
||||
return framebuffer_get_buffer(new_head);
|
||||
}
|
||||
|
||||
vbuffer_t *framebuffer_get_tail(framebuffer_flags_t flags)
|
||||
{
|
||||
// Sample head on the first line of a new frame.
|
||||
if (framebuffer->check_head) {
|
||||
framebuffer->check_head = false;
|
||||
framebuffer->sampled_head = framebuffer->head;
|
||||
}
|
||||
|
||||
int32_t new_tail = (framebuffer->tail + 1) % framebuffer->n_buffers;
|
||||
|
||||
// Single Buffer Mode.
|
||||
if (framebuffer->n_buffers == 1) {
|
||||
if (!framebuffer_get_buffer(new_tail)->waiting_for_data) {
|
||||
// Setup to check head again.
|
||||
framebuffer->check_head = true;
|
||||
return NULL;
|
||||
}
|
||||
// Double Buffer Mode.
|
||||
} else if (framebuffer->n_buffers == 2) {
|
||||
if (new_tail == framebuffer->sampled_head) {
|
||||
// Setup to check head again.
|
||||
framebuffer->check_head = true;
|
||||
return NULL;
|
||||
}
|
||||
// Triple Buffer Mode.
|
||||
} else if (framebuffer->n_buffers == 3) {
|
||||
// For triple buffering we are never writing where tail or head
|
||||
// (which may instantly update to be equal to tail) is.
|
||||
if (new_tail == framebuffer->sampled_head) {
|
||||
new_tail = (new_tail + 1) % framebuffer->n_buffers;
|
||||
}
|
||||
// Video FIFO Mode.
|
||||
} else {
|
||||
if (new_tail == framebuffer->sampled_head) {
|
||||
// Setup to check head again.
|
||||
framebuffer->check_head = true;
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
vbuffer_t *buffer = framebuffer_get_buffer(new_tail);
|
||||
|
||||
// Reset on start versus the end so offset and jpeg_buffer_overflow are valid after FB_COMMIT.
|
||||
if (buffer->reset_state) {
|
||||
buffer->reset_state = false;
|
||||
buffer->offset = 0;
|
||||
buffer->jpeg_buffer_overflow = false;
|
||||
}
|
||||
|
||||
if (!(flags & FB_PEEK)) {
|
||||
// Trigger reset on the frame buffer the next time it is used.
|
||||
buffer->reset_state = true;
|
||||
|
||||
// Mark the frame buffer ready in single buffer mode.
|
||||
if (framebuffer->n_buffers == 1) {
|
||||
buffer->waiting_for_data = false;
|
||||
}
|
||||
|
||||
framebuffer->tail = new_tail;
|
||||
|
||||
// Setup to check head again.
|
||||
framebuffer->check_head = true;
|
||||
}
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
char *framebuffer_get_buffers_end()
|
||||
{
|
||||
return (char *) (framebuffer->data + framebuffer_total_buffer_size());
|
||||
}
|
||||
|
||||
@ -15,26 +15,53 @@
|
||||
#include "mutex.h"
|
||||
#include "common.h"
|
||||
|
||||
// DMA Buffers need to be aligned by cache lines or 16 bytes.
|
||||
#ifndef __DCACHE_PRESENT
|
||||
#define FRAMEBUFFER_ALIGNMENT 16
|
||||
#else
|
||||
#define FRAMEBUFFER_ALIGNMENT __SCB_DCACHE_LINE_SIZE
|
||||
#endif
|
||||
|
||||
typedef struct framebuffer {
|
||||
int32_t x,y;
|
||||
int32_t w,h;
|
||||
int32_t u,v;
|
||||
int32_t bpp;
|
||||
int32_t streaming_enabled;
|
||||
// NOTE: This buffer must be aligned on a 32 byte boundary
|
||||
OMV_ATTR_ALIGNED(uint8_t pixels[], 32);
|
||||
uint32_t raw_buffer_size;
|
||||
int32_t n_buffers;
|
||||
int32_t head;
|
||||
volatile int32_t tail;
|
||||
bool check_head;
|
||||
int32_t sampled_head;
|
||||
OMV_ATTR_ALIGNED(uint8_t data[], FRAMEBUFFER_ALIGNMENT);
|
||||
} framebuffer_t;
|
||||
|
||||
extern framebuffer_t *framebuffer;
|
||||
|
||||
typedef enum {
|
||||
FB_NO_FLAGS = (0 << 0),
|
||||
FB_PEEK = (1 << 0), // If set, will not move the head/tail.
|
||||
} framebuffer_flags_t;
|
||||
|
||||
typedef struct vbuffer {
|
||||
// Used by snapshot code to figure out the jpeg size (bpp).
|
||||
int32_t offset;
|
||||
bool jpeg_buffer_overflow;
|
||||
// Used internally by frame buffer code.
|
||||
volatile bool waiting_for_data;
|
||||
bool reset_state;
|
||||
// Image data array.
|
||||
OMV_ATTR_ALIGNED(uint8_t data[], FRAMEBUFFER_ALIGNMENT);
|
||||
} vbuffer_t;
|
||||
|
||||
typedef struct jpegbuffer {
|
||||
int32_t w,h;
|
||||
int32_t size;
|
||||
int32_t enabled;
|
||||
int32_t quality;
|
||||
mutex_t lock;
|
||||
// NOTE: This buffer must be aligned on a 32 byte boundary
|
||||
OMV_ATTR_ALIGNED(uint8_t pixels[], 32);
|
||||
OMV_ATTR_ALIGNED(uint8_t pixels[], FRAMEBUFFER_ALIGNMENT);
|
||||
} jpegbuffer_t;
|
||||
|
||||
extern jpegbuffer_t *jpeg_framebuffer;
|
||||
@ -58,16 +85,11 @@ int32_t framebuffer_get_width();
|
||||
int32_t framebuffer_get_height();
|
||||
int32_t framebuffer_get_depth();
|
||||
|
||||
// Return the size of the current frame (w * h * bpp) if the framebuffer is initialized,
|
||||
// otherwise return 0 if the framebuffer is unintialized or invalid (e.g. first frame).
|
||||
uint32_t framebuffer_get_frame_size();
|
||||
|
||||
// Return the max frame size that fits the framebuffer
|
||||
// (i.e OMV_RAW_BUF_SIZE - sizeof(framebuffer_t))
|
||||
// Return the number of bytes in the current buffer.
|
||||
uint32_t framebuffer_get_buffer_size();
|
||||
|
||||
// Return the current buffer address.
|
||||
uint8_t *framebuffer_get_buffer();
|
||||
// Return the state of a buffer.
|
||||
vbuffer_t *framebuffer_get_buffer(int32_t index);
|
||||
|
||||
// Initializes an image_t struct with the frame buffer.
|
||||
void framebuffer_initialize_image(image_t *img);
|
||||
@ -79,6 +101,33 @@ void framebuffer_update_jpeg_buffer();
|
||||
// Set the framebuffer w, h and bpp.
|
||||
void framebuffer_set(int32_t w, int32_t h, int32_t bpp);
|
||||
|
||||
// Clears out all old captures frames in the framebuffer.
|
||||
void framebuffer_flush_buffers();
|
||||
|
||||
// Resets all buffers (for use after aborting)
|
||||
void framebuffer_reset_buffers();
|
||||
|
||||
// Controls the number of virtual buffers in the frame buffer.
|
||||
int framebuffer_set_buffers(int32_t n_buffers);
|
||||
|
||||
// Automatically finds the best buffering size given RAM.
|
||||
void framebuffer_auto_adjust_buffers();
|
||||
|
||||
// Call when done with the current vbuffer to mark it as free.
|
||||
void framebuffer_free_current_buffer();
|
||||
|
||||
// Sets the current frame buffer to the latest virtual frame buffer.
|
||||
// Returns the buffer if it is ready or NULL if not...
|
||||
// Pass FB_PEEK to get the next buffer but not take it.
|
||||
vbuffer_t *framebuffer_get_head(framebuffer_flags_t flags);
|
||||
|
||||
// Return the next vbuffer to store image data to or NULL if none.
|
||||
// Pass FB_PEEK to get the next buffer but not commit it.
|
||||
vbuffer_t *framebuffer_get_tail(framebuffer_flags_t flags);
|
||||
|
||||
// Returns a pointer to the end of the framebuffer(s).
|
||||
char *framebuffer_get_buffers_end();
|
||||
|
||||
// Use these macros to get a pointer to main or JPEG framebuffer.
|
||||
#define MAIN_FB() (framebuffer)
|
||||
#define JPEG_FB() (jpeg_framebuffer)
|
||||
|
||||
@ -11,6 +11,7 @@
|
||||
#include "py/obj.h"
|
||||
#include "py/runtime.h"
|
||||
#include "framebuffer.h"
|
||||
#include "sensor.h"
|
||||
#include "py_helper.h"
|
||||
#include "py_assert.h"
|
||||
|
||||
@ -483,7 +484,7 @@ const uint8_t *py_helper_keyword_alpha_palette(uint n_args, const mp_obj_t *args
|
||||
|
||||
bool py_helper_is_equal_to_framebuffer(image_t *img)
|
||||
{
|
||||
return framebuffer_get_buffer() == img->data;
|
||||
return framebuffer_get_buffer(framebuffer->head)->data == img->data;
|
||||
}
|
||||
|
||||
void py_helper_update_framebuffer(image_t *img)
|
||||
@ -495,8 +496,14 @@ void py_helper_update_framebuffer(image_t *img)
|
||||
|
||||
void py_helper_set_to_framebuffer(image_t *img)
|
||||
{
|
||||
#if MICROPY_PY_SENSOR
|
||||
sensor_set_framebuffers(1);
|
||||
#else
|
||||
framebuffer_set_buffers(1);
|
||||
#endif
|
||||
|
||||
PY_ASSERT_TRUE_MSG((image_size(img) <= framebuffer_get_buffer_size()),
|
||||
"The image doesn't fit in the frame buffer!");
|
||||
framebuffer_set(img->w, img->h, img->bpp);
|
||||
img->data = framebuffer_get_buffer();
|
||||
img->data = framebuffer_get_buffer(framebuffer->head)->data;
|
||||
}
|
||||
|
||||
@ -179,6 +179,11 @@ static mp_obj_t py_sensor_get_id()
|
||||
return mp_obj_new_int(sensor_get_id());
|
||||
}
|
||||
|
||||
static mp_obj_t py_sensor_get_frame_available()
|
||||
{
|
||||
return mp_obj_new_bool(framebuffer->tail != framebuffer->head);
|
||||
}
|
||||
|
||||
static mp_obj_t py_sensor_alloc_extra_fb(mp_obj_t w_obj, mp_obj_t h_obj, mp_obj_t type_obj)
|
||||
{
|
||||
int w = mp_obj_get_int(w_obj);
|
||||
@ -518,6 +523,26 @@ static mp_obj_t py_sensor_get_auto_rotation()
|
||||
return mp_obj_new_bool(sensor_get_auto_rotation());
|
||||
}
|
||||
|
||||
static mp_obj_t py_sensor_set_framebuffers(mp_obj_t count)
|
||||
{
|
||||
mp_int_t c = mp_obj_get_int(count);
|
||||
|
||||
if (framebuffer->n_buffers == c) {
|
||||
return mp_const_none;
|
||||
}
|
||||
|
||||
if ((c < 1) || (sensor_set_framebuffers(c) != 0)) {
|
||||
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Invalid framebuffer count!"));
|
||||
}
|
||||
|
||||
return mp_const_none;
|
||||
}
|
||||
|
||||
static mp_obj_t py_sensor_get_framebuffers()
|
||||
{
|
||||
return mp_obj_new_int(framebuffer->n_buffers);
|
||||
}
|
||||
|
||||
static mp_obj_t py_sensor_set_special_effect(mp_obj_t sde)
|
||||
{
|
||||
if (sensor_set_special_effect(mp_obj_get_int(sde)) != 0) {
|
||||
@ -881,6 +906,7 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_0(py_sensor_width_obj, py_sensor_wi
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_0(py_sensor_height_obj, py_sensor_height);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_0(py_sensor_get_fb_obj, py_sensor_get_fb);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_0(py_sensor_get_id_obj, py_sensor_get_id);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_0(py_sensor_get_frame_available_obj, py_sensor_get_frame_available);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_3(py_sensor_alloc_extra_fb_obj, py_sensor_alloc_extra_fb);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_0(py_sensor_dealloc_extra_fb_obj, py_sensor_dealloc_extra_fb);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_sensor_set_pixformat_obj, py_sensor_set_pixformat);
|
||||
@ -911,6 +937,8 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_sensor_set_transpose_obj, py_sensor_se
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_0(py_sensor_get_transpose_obj, py_sensor_get_transpose);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_sensor_set_auto_rotation_obj, py_sensor_set_auto_rotation);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_0(py_sensor_get_auto_rotation_obj, py_sensor_get_auto_rotation);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_sensor_set_framebuffers_obj, py_sensor_set_framebuffers);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_0(py_sensor_get_framebuffers_obj, py_sensor_get_framebuffers);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_sensor_set_special_effect_obj, py_sensor_set_special_effect);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_3(py_sensor_set_lens_correction_obj, py_sensor_set_lens_correction);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_sensor_set_vsync_callback_obj, py_sensor_set_vsync_callback);
|
||||
@ -1017,6 +1045,13 @@ STATIC const mp_map_elem_t globals_dict_table[] = {
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_IOCTL_HIMAX_MD_CLEAR), MP_OBJ_NEW_SMALL_INT(IOCTL_HIMAX_MD_CLEAR)},
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_IOCTL_HIMAX_OSC_ENABLE), MP_OBJ_NEW_SMALL_INT(IOCTL_HIMAX_OSC_ENABLE)},
|
||||
#endif
|
||||
|
||||
// Framebuffer Sizes
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_SINGLE_BUFFER), MP_OBJ_NEW_SMALL_INT(1)},
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_DOUBLE_BUFFER), MP_OBJ_NEW_SMALL_INT(2)},
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_TRIPPLE_BUFFER), MP_OBJ_NEW_SMALL_INT(3)},
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_VIDEO_FIFO), MP_OBJ_NEW_SMALL_INT(4)},
|
||||
|
||||
// Sensor functions
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR___init__), (mp_obj_t)&py_sensor__init__obj },
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_reset), (mp_obj_t)&py_sensor_reset_obj },
|
||||
@ -1029,6 +1064,7 @@ STATIC const mp_map_elem_t globals_dict_table[] = {
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_height), (mp_obj_t)&py_sensor_height_obj },
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_get_fb), (mp_obj_t)&py_sensor_get_fb_obj },
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_get_id), (mp_obj_t)&py_sensor_get_id_obj },
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_get_frame_available), (mp_obj_t)&py_sensor_get_frame_available_obj },
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_alloc_extra_fb), (mp_obj_t)&py_sensor_alloc_extra_fb_obj },
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_dealloc_extra_fb), (mp_obj_t)&py_sensor_dealloc_extra_fb_obj },
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_set_pixformat), (mp_obj_t)&py_sensor_set_pixformat_obj },
|
||||
@ -1059,6 +1095,8 @@ STATIC const mp_map_elem_t globals_dict_table[] = {
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_get_transpose), (mp_obj_t)&py_sensor_get_transpose_obj },
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_set_auto_rotation), (mp_obj_t)&py_sensor_set_auto_rotation_obj },
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_get_auto_rotation), (mp_obj_t)&py_sensor_get_auto_rotation_obj },
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_set_framebuffers), (mp_obj_t)&py_sensor_set_framebuffers_obj },
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_get_framebuffers), (mp_obj_t)&py_sensor_get_framebuffers_obj },
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_set_special_effect), (mp_obj_t)&py_sensor_set_special_effect_obj },
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_set_lens_correction), (mp_obj_t)&py_sensor_set_lens_correction_obj },
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_set_vsync_callback), (mp_obj_t)&py_sensor_set_vsync_callback_obj },
|
||||
@ -1075,4 +1113,5 @@ const mp_obj_module_t sensor_module = {
|
||||
.base = { &mp_type_module },
|
||||
.globals = (mp_obj_t)&globals_dict,
|
||||
};
|
||||
#endif //MICROPY_PY_SENSOR
|
||||
|
||||
#endif // MICROPY_PY_SENSOR
|
||||
|
||||
@ -153,8 +153,8 @@ soft_reset:
|
||||
uart_init0();
|
||||
#endif
|
||||
|
||||
framebuffer_init0();
|
||||
fb_alloc_init0();
|
||||
framebuffer_init0();
|
||||
|
||||
#if MICROPY_PY_SENSOR
|
||||
sensor_init();
|
||||
@ -166,7 +166,7 @@ soft_reset:
|
||||
MP_OBJ_NEW_SMALL_INT(0),
|
||||
MP_OBJ_NEW_SMALL_INT(115200),
|
||||
};
|
||||
MP_STATE_PORT(board_stdio_uart) =
|
||||
MP_STATE_PORT(board_stdio_uart) =
|
||||
machine_hard_uart_type.make_new((mp_obj_t)&machine_hard_uart_type, MP_ARRAY_SIZE(args), 0, args);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -392,6 +392,8 @@ int sensor_init()
|
||||
|
||||
int sensor_reset()
|
||||
{
|
||||
framebuffer_reset_buffers();
|
||||
|
||||
// Reset the sensor state
|
||||
sensor.sde = 0;
|
||||
sensor.pixformat = 0;
|
||||
@ -812,6 +814,11 @@ bool sensor_get_auto_rotation()
|
||||
return sensor.auto_rotation;
|
||||
}
|
||||
|
||||
int sensor_set_framebuffers(int count)
|
||||
{
|
||||
return framebuffer_set_buffers(count);
|
||||
}
|
||||
|
||||
int sensor_set_special_effect(sde_t sde)
|
||||
{
|
||||
if (sensor.sde == sde) {
|
||||
@ -890,6 +897,10 @@ void VsyncExtiCallback()
|
||||
// within the RAM we have onboard the system.
|
||||
void sensor_check_buffsize()
|
||||
{
|
||||
if (MAIN_FB()->n_buffers != 1) {
|
||||
framebuffer_set_buffers(1);
|
||||
}
|
||||
|
||||
uint32_t size = framebuffer_get_buffer_size();
|
||||
uint32_t bpp;
|
||||
|
||||
@ -986,7 +997,19 @@ void sensor_check_buffsize()
|
||||
// This is the default snapshot function, which can be replaced in sensor_init functions.
|
||||
int sensor_snapshot(sensor_t *sensor, image_t *image, uint32_t flags)
|
||||
{
|
||||
uint8_t *b = MAIN_FB()->pixels;
|
||||
// Compress the framebuffer for the IDE preview, only if it's not the first frame,
|
||||
// the framebuffer is enabled and the image sensor does not support JPEG encoding.
|
||||
// Note: This doesn't run unless the IDE is connected and the framebuffer is enabled.
|
||||
framebuffer_update_jpeg_buffer();
|
||||
|
||||
framebuffer_free_current_buffer();
|
||||
vbuffer_t *buffer = framebuffer_get_tail(FB_NO_FLAGS);
|
||||
|
||||
if (!buffer) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
uint8_t *b = buffer->data;
|
||||
uint32_t _width = MAIN_FB()->w;
|
||||
uint32_t _height = MAIN_FB()->h;
|
||||
int bytesPerRow = _width * 2; // Always read 2 BPP
|
||||
@ -995,11 +1018,6 @@ int sensor_snapshot(sensor_t *sensor, image_t *image, uint32_t flags)
|
||||
uint32_t ulPin = 32; // P1.xx set of GPIO is in 'pin' 32 and above
|
||||
NRF_GPIO_Type *port = nrf_gpio_pin_port_decode(&ulPin);
|
||||
|
||||
// Compress the framebuffer for the IDE preview, only if it's not the first frame,
|
||||
// the framebuffer is enabled and the image sensor does not support JPEG encoding.
|
||||
// Note: This doesn't run unless the IDE is connected and the framebuffer is enabled.
|
||||
framebuffer_update_jpeg_buffer();
|
||||
|
||||
noInterrupts();
|
||||
|
||||
// Falling edge indicates start of frame
|
||||
@ -1009,7 +1027,7 @@ int sensor_snapshot(sensor_t *sensor, image_t *image, uint32_t flags)
|
||||
for (int i = 0; i < _height; i++) {
|
||||
// rising edge indicates start of line
|
||||
while ((*_hrefPort & _hrefMask) == 0); // wait for HIGH
|
||||
|
||||
|
||||
for (int j = 0; j < bytesPerRow; j++) {
|
||||
// rising edges clock each data byte
|
||||
while ((*_pclkPort & _pclkMask) != 0); // wait for LOW
|
||||
@ -1039,7 +1057,7 @@ int sensor_snapshot(sensor_t *sensor, image_t *image, uint32_t flags)
|
||||
case PIXFORMAT_RGB565: {
|
||||
MAIN_FB()->bpp = 2;
|
||||
if (SENSOR_HW_FLAGS_GET(sensor, SWNSOR_HW_FLAGS_RGB565_REV)) {
|
||||
unaligned_memcpy_rev16(MAIN_FB()->pixels, MAIN_FB()->pixels, _width*_height);
|
||||
unaligned_memcpy_rev16(buffer->data, buffer->data, _width*_height);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -1056,7 +1074,8 @@ int sensor_snapshot(sensor_t *sensor, image_t *image, uint32_t flags)
|
||||
image->w = MAIN_FB()->w;
|
||||
image->h = MAIN_FB()->h;
|
||||
image->bpp = MAIN_FB()->bpp;
|
||||
image->pixels = MAIN_FB()->pixels;
|
||||
image->pixels = buffer->data;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -517,8 +517,8 @@ soft_reset:
|
||||
i2c_init0();
|
||||
spi_init0();
|
||||
uart_init0();
|
||||
framebuffer_init0();
|
||||
fb_alloc_init0();
|
||||
framebuffer_init0();
|
||||
sensor_init0();
|
||||
dma_alloc_init0();
|
||||
#ifdef IMLIB_ENABLE_IMAGE_FILE_IO
|
||||
|
||||
@ -32,18 +32,12 @@
|
||||
|
||||
#define MAX_XFER_SIZE (0xFFFF*4)
|
||||
|
||||
extern void __fatal_error(const char *msg);
|
||||
|
||||
sensor_t sensor = {0};
|
||||
static TIM_HandleTypeDef TIMHandle = {0};
|
||||
static DMA_HandleTypeDef DMAHandle = {0};
|
||||
static DCMI_HandleTypeDef DCMIHandle = {0};
|
||||
|
||||
extern uint8_t _line_buf;
|
||||
static uint8_t *dest_fb = NULL;
|
||||
static volatile int offset = 0;
|
||||
static volatile bool jpeg_buffer_overflow = false;
|
||||
static volatile bool waiting_for_data = false;
|
||||
|
||||
const int resolution[][2] = {
|
||||
{0, 0 },
|
||||
@ -221,6 +215,8 @@ static void dcmi_abort()
|
||||
DCMI->CR &= ~DCMI_CR_ENABLE;
|
||||
HAL_DMA_Abort(&DMAHandle);
|
||||
}
|
||||
|
||||
framebuffer_reset_buffers();
|
||||
}
|
||||
|
||||
// Returns true if a crop is being applied to the frame buffer.
|
||||
@ -692,6 +688,9 @@ int sensor_set_framesize(framesize_t framesize)
|
||||
MAIN_FB()->w = MAIN_FB()->u = resolution[framesize][0];
|
||||
MAIN_FB()->h = MAIN_FB()->v = resolution[framesize][1];
|
||||
|
||||
// Pickout a good buffer count for the user.
|
||||
framebuffer_auto_adjust_buffers();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -720,6 +719,8 @@ int sensor_set_windowing(int x, int y, int w, int h)
|
||||
return -1;
|
||||
}
|
||||
|
||||
dcmi_abort();
|
||||
|
||||
// We force everything to be a multiple of 2 so that when you switch between
|
||||
// grayscale/rgb565/bayer/jpeg the frame doesn't need to move around for bayer to work.
|
||||
MAIN_FB()->x = (x / 2) * 2;
|
||||
@ -727,6 +728,9 @@ int sensor_set_windowing(int x, int y, int w, int h)
|
||||
MAIN_FB()->w = MAIN_FB()->u = (w / 2) * 2;
|
||||
MAIN_FB()->h = MAIN_FB()->v = (h / 2) * 2;
|
||||
|
||||
// Pickout a good buffer count for the user.
|
||||
framebuffer_auto_adjust_buffers();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -936,6 +940,13 @@ bool sensor_get_auto_rotation()
|
||||
return sensor.auto_rotation;
|
||||
}
|
||||
|
||||
int sensor_set_framebuffers(int count)
|
||||
{
|
||||
dcmi_abort();
|
||||
|
||||
return framebuffer_set_buffers(count);
|
||||
}
|
||||
|
||||
int sensor_set_special_effect(sde_t sde)
|
||||
{
|
||||
if (sensor.sde == sde) {
|
||||
@ -1105,15 +1116,18 @@ static void sensor_check_buffsize()
|
||||
MAIN_FB()->y += (window_h - MAIN_FB()->v) / 2;
|
||||
if (MAIN_FB()->x % 2) MAIN_FB()->x -= 1;
|
||||
if (MAIN_FB()->y % 2) MAIN_FB()->y -= 1;
|
||||
|
||||
// Pickout a good buffer count for the user.
|
||||
framebuffer_auto_adjust_buffers();
|
||||
}
|
||||
|
||||
// Stop allowing new data in on the end of the frame and let snapshot know that the frame has been
|
||||
// received. Note that DCMI_DMAConvCpltUser() is called before DCMI_IT_FRAME is enabled by
|
||||
// DCMI_DMAXferCplt() so this means that the last line of data is *always* transferred before
|
||||
// waiting_for_data is set to false.
|
||||
// moving the tail to the next buffer.
|
||||
void HAL_DCMI_FrameEventCallback(DCMI_HandleTypeDef *hdcmi)
|
||||
{
|
||||
waiting_for_data = false;
|
||||
framebuffer_get_tail(FB_NO_FLAGS);
|
||||
}
|
||||
|
||||
// This function is called back after each line transfer is complete,
|
||||
@ -1121,11 +1135,15 @@ void HAL_DCMI_FrameEventCallback(DCMI_HandleTypeDef *hdcmi)
|
||||
// DMA transfers the next line to the other half of the line buffer.
|
||||
void DCMI_DMAConvCpltUser(uint32_t addr)
|
||||
{
|
||||
vbuffer_t *buffer = framebuffer_get_tail(FB_PEEK);
|
||||
|
||||
// If snapshot was not already waiting to receive data then we have missed this frame and have
|
||||
// to drop it. So, abort this and future transfers. Snapshot will restart the process.
|
||||
if (!waiting_for_data) {
|
||||
if (!buffer) {
|
||||
DCMI->CR &= ~DCMI_CR_ENABLE;
|
||||
HAL_DMA_Abort_IT(&DMAHandle); // Note: Use HAL_DMA_Abort_IT and not HAL_DMA_Abort inside an interrupt.
|
||||
// Reset the queue of frames when we start dropping frames.
|
||||
framebuffer_flush_buffers();
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1138,10 +1156,10 @@ void DCMI_DMAConvCpltUser(uint32_t addr)
|
||||
// depth on the DCMI hardware and DMA hardware is not enough to prevent data loss.
|
||||
|
||||
uint8_t *src = (uint8_t*) addr;
|
||||
uint8_t *dst = (uint8_t*) dest_fb;
|
||||
uint8_t *dst = (uint8_t*) buffer->data;
|
||||
|
||||
uint16_t *src16 = (uint16_t*) addr;
|
||||
uint16_t *dst16 = (uint16_t*) dest_fb;
|
||||
uint16_t *dst16 = (uint16_t*) buffer->data;
|
||||
|
||||
if (sensor.pixformat == PIXFORMAT_JPEG) {
|
||||
if (sensor.chip_id == OV5640_ID) {
|
||||
@ -1159,12 +1177,12 @@ void DCMI_DMAConvCpltUser(uint32_t addr)
|
||||
//
|
||||
uint16_t size = __REV16(*src16);
|
||||
// Prevent a buffer overflow when writing the jpeg data.
|
||||
if (offset + size > framebuffer_get_buffer_size()) {
|
||||
jpeg_buffer_overflow = true;
|
||||
if (buffer->offset + size > framebuffer_get_buffer_size()) {
|
||||
buffer->jpeg_buffer_overflow = true;
|
||||
return;
|
||||
}
|
||||
unaligned_memcpy(dst + offset, src16 + 1, size);
|
||||
offset += size;
|
||||
unaligned_memcpy(dst + buffer->offset, src16 + 1, size);
|
||||
buffer->offset += size;
|
||||
} else {
|
||||
// JPEG MODE 3:
|
||||
//
|
||||
@ -1180,41 +1198,41 @@ void DCMI_DMAConvCpltUser(uint32_t addr)
|
||||
// is not optimal. However, it works okay for the OV2640 since the PCLK is much lower
|
||||
// than the OV5640 PCLK. The OV5640 drops data in this mode. Hence using mode 4 above.
|
||||
//
|
||||
offset += 1;
|
||||
buffer->offset += 1;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Implement per line, per pixel cropping, and image transposing (for image rotation) in
|
||||
// in software using the CPU to transfer the image from the line buffers to the frame buffer.
|
||||
if (offset >= MAIN_FB()->y && offset <= (MAIN_FB()->y + MAIN_FB()->h)) {
|
||||
if (buffer->offset >= MAIN_FB()->y && buffer->offset <= (MAIN_FB()->y + MAIN_FB()->v)) {
|
||||
if (!sensor.transpose) {
|
||||
switch (sensor.pixformat) {
|
||||
case PIXFORMAT_BAYER:
|
||||
dst += (offset - MAIN_FB()->y) * MAIN_FB()->w;
|
||||
dst += (buffer->offset - MAIN_FB()->y) * MAIN_FB()->u;
|
||||
src += MAIN_FB()->x;
|
||||
unaligned_memcpy(dst, src, MAIN_FB()->w);
|
||||
unaligned_memcpy(dst, src, MAIN_FB()->u);
|
||||
break;
|
||||
case PIXFORMAT_GRAYSCALE:
|
||||
dst += (offset - MAIN_FB()->y) * MAIN_FB()->w;
|
||||
dst += (buffer->offset - MAIN_FB()->y) * MAIN_FB()->u;
|
||||
if (sensor.gs_bpp == 1) {
|
||||
// 1BPP GRAYSCALE.
|
||||
src += MAIN_FB()->x;
|
||||
unaligned_memcpy(dst, src, MAIN_FB()->w);
|
||||
unaligned_memcpy(dst, src, MAIN_FB()->u);
|
||||
} else {
|
||||
// Extract Y channel from YUV.
|
||||
src16 += MAIN_FB()->x;
|
||||
unaligned_2_to_1_memcpy(dst, src16, MAIN_FB()->w);
|
||||
unaligned_2_to_1_memcpy(dst, src16, MAIN_FB()->u);
|
||||
}
|
||||
break;
|
||||
case PIXFORMAT_YUV422:
|
||||
case PIXFORMAT_RGB565:
|
||||
dst16 += (offset - MAIN_FB()->y) * MAIN_FB()->w;
|
||||
dst16 += (buffer->offset - MAIN_FB()->y) * MAIN_FB()->u;
|
||||
src16 += MAIN_FB()->x;
|
||||
if (SENSOR_HW_FLAGS_GET(&sensor, SWNSOR_HW_FLAGS_RGB565_REV)) {
|
||||
unaligned_memcpy_rev16(dst16, src16, MAIN_FB()->w);
|
||||
unaligned_memcpy_rev16(dst16, src16, MAIN_FB()->u);
|
||||
} else {
|
||||
unaligned_memcpy(dst16, src16, MAIN_FB()->w * sizeof(uint16_t));
|
||||
unaligned_memcpy(dst16, src16, MAIN_FB()->u * sizeof(uint16_t));
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@ -1223,26 +1241,26 @@ void DCMI_DMAConvCpltUser(uint32_t addr)
|
||||
} else {
|
||||
switch (sensor.pixformat) {
|
||||
case PIXFORMAT_BAYER:
|
||||
dst += offset - MAIN_FB()->y;
|
||||
dst += buffer->offset - MAIN_FB()->y;
|
||||
src += MAIN_FB()->x;
|
||||
for (int i = MAIN_FB()->w, h = MAIN_FB()->h; i; i--) {
|
||||
for (int i = MAIN_FB()->u, h = MAIN_FB()->v; i; i--) {
|
||||
*dst = *src++;
|
||||
dst += h;
|
||||
}
|
||||
break;
|
||||
case PIXFORMAT_GRAYSCALE:
|
||||
dst += offset - MAIN_FB()->y;
|
||||
dst += buffer->offset - MAIN_FB()->y;
|
||||
if (sensor.gs_bpp == 1) {
|
||||
src += MAIN_FB()->x;
|
||||
// 1BPP GRAYSCALE.
|
||||
for (int i = MAIN_FB()->w, h = MAIN_FB()->h; i; i--) {
|
||||
for (int i = MAIN_FB()->u, h = MAIN_FB()->v; i; i--) {
|
||||
*dst = *src++;
|
||||
dst += h;
|
||||
}
|
||||
} else {
|
||||
src16 += MAIN_FB()->x;
|
||||
// Extract Y channel from YUV.
|
||||
for (int i = MAIN_FB()->w, h = MAIN_FB()->h; i; i--) {
|
||||
for (int i = MAIN_FB()->u, h = MAIN_FB()->v; i; i--) {
|
||||
*dst = *src16++;
|
||||
dst += h;
|
||||
}
|
||||
@ -1250,15 +1268,15 @@ void DCMI_DMAConvCpltUser(uint32_t addr)
|
||||
break;
|
||||
case PIXFORMAT_YUV422:
|
||||
case PIXFORMAT_RGB565:
|
||||
dst16 += offset - MAIN_FB()->y;
|
||||
dst16 += buffer->offset - MAIN_FB()->y;
|
||||
src16 += MAIN_FB()->x;
|
||||
if (SENSOR_HW_FLAGS_GET(&sensor, SWNSOR_HW_FLAGS_RGB565_REV)) {
|
||||
for (int i = MAIN_FB()->w, h = MAIN_FB()->h; i; i--) {
|
||||
for (int i = MAIN_FB()->u, h = MAIN_FB()->v; i; i--) {
|
||||
*dst16 = __REV16(*src16++);
|
||||
dst16 += h;
|
||||
}
|
||||
} else {
|
||||
for (int i = MAIN_FB()->w, h = MAIN_FB()->h; i; i--) {
|
||||
for (int i = MAIN_FB()->u, h = MAIN_FB()->v; i; i--) {
|
||||
*dst16 = *src16++;
|
||||
dst16 += h;
|
||||
}
|
||||
@ -1270,7 +1288,7 @@ void DCMI_DMAConvCpltUser(uint32_t addr)
|
||||
}
|
||||
}
|
||||
|
||||
offset++;
|
||||
buffer->offset++;
|
||||
}
|
||||
|
||||
// This is the default snapshot function, which can be replaced in sensor_init functions. This function
|
||||
@ -1288,9 +1306,6 @@ int sensor_snapshot(sensor_t *sensor, image_t *image, uint32_t flags)
|
||||
// first to save space before being cropped until it fits.
|
||||
sensor_check_buffsize();
|
||||
|
||||
// Set the current frame buffer target used in the DMA line callback function.
|
||||
dest_fb = MAIN_FB()->pixels;
|
||||
|
||||
// The user may have changed the MAIN_FB width or height on the last image so we need
|
||||
// to restore that here. We don't have to restore bpp because that's taken care of
|
||||
// already in the code below. Note that we do the JPEG compression above first to save
|
||||
@ -1298,10 +1313,6 @@ int sensor_snapshot(sensor_t *sensor, image_t *image, uint32_t flags)
|
||||
MAIN_FB()->w = MAIN_FB()->u;
|
||||
MAIN_FB()->h = MAIN_FB()->v;
|
||||
|
||||
// If an error occurs we should have a valid w/h and invalid bpp so that we leave the frame
|
||||
// buffer like how sensor_set_pixformat()/sensor_set_framesize() leave it.
|
||||
MAIN_FB()->bpp = -1;
|
||||
|
||||
// We use the stored frame size to read the whole frame. Note that cropping is
|
||||
// done in the line function using the dimensions stored in MAIN_FB()->x,y,w,h.
|
||||
uint32_t w = resolution[sensor->framesize][0];
|
||||
@ -1333,9 +1344,9 @@ int sensor_snapshot(sensor_t *sensor, image_t *image, uint32_t flags)
|
||||
addr = (uint32_t) &_line_buf;
|
||||
} else {
|
||||
// The JPEG image will be directly transferred to the frame buffer.
|
||||
// The DCMI hardware can transfer up to 524,280 bytes.
|
||||
// The DCMI hardware can transfer up to 524,280 bytes.
|
||||
length = MAX_XFER_SIZE * 2;
|
||||
addr = (uint32_t) (MAIN_FB()->pixels);
|
||||
addr = 0;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@ -1352,17 +1363,10 @@ int sensor_snapshot(sensor_t *sensor, image_t *image, uint32_t flags)
|
||||
HAL_DCMI_ConfigCrop(&DCMIHandle, 0, 0, w-1, h-1);
|
||||
#endif
|
||||
|
||||
// Clear the offset counter variable before we allow more data to be received.
|
||||
offset = 0;
|
||||
|
||||
// Clear jpeg error flag before we allow more data to be received.
|
||||
jpeg_buffer_overflow = false;
|
||||
|
||||
// If DCMI_DMAConvCpltUser() happens before waiting_for_data = true; below then the
|
||||
// transfer is stopped and it will be re-enabled again right afterwards. We know the
|
||||
// transfer was stopped by checking DCMI_CR_ENABLE.
|
||||
|
||||
waiting_for_data = true;
|
||||
// If DCMI_DMAConvCpltUser() happens before framebuffer_free_current_buffer(); below then the
|
||||
// transfer is stopped and it will be re-enabled again right afterwards in the single vbuffer
|
||||
// case. We know the transfer was stopped by checking DCMI_CR_ENABLE.
|
||||
framebuffer_free_current_buffer();
|
||||
|
||||
// We will be in one of the following states now:
|
||||
// 1. No transfer is currently running right now and DCMI_CR_ENABLE is not set.
|
||||
@ -1384,23 +1388,37 @@ int sensor_snapshot(sensor_t *sensor, image_t *image, uint32_t flags)
|
||||
// methods set the addresses right after each other. So, effectively DMA is just writing
|
||||
// data to a circular buffer with an interrupt every time 1/2 of it is written.
|
||||
if ((sensor->pixformat == PIXFORMAT_JPEG) && (sensor->chip_id != OV5640_ID)) {
|
||||
// Get the destination buffer address. Given we only capture one frame in this mode and
|
||||
// have to abort once the transfer from DMA stalls it's okay to call producer functions.
|
||||
vbuffer_t *buffer = framebuffer_get_tail(FB_PEEK);
|
||||
|
||||
if (!buffer) {
|
||||
return -6;
|
||||
}
|
||||
|
||||
uint32_t size = framebuffer_get_buffer_size();
|
||||
length = IM_MIN(length, size);
|
||||
// Start a transfer where the whole frame buffer is located where the DMA is writing
|
||||
// data to. We only use this for JPEG mode for the OV2640. Since we don't know the
|
||||
// line size of data being transferred we just examine how much data was transferred
|
||||
// once DMA hardware stalls waiting for data. Note that because we are writing
|
||||
// directly to the frame buffer we do not have the option of aborting the transfer
|
||||
// if we are not ready to move data from a line buffer to the frame buffer.
|
||||
HAL_DCMI_Start_DMA(&DCMIHandle,
|
||||
DCMI_MODE_SNAPSHOT, addr, length/4);
|
||||
HAL_DCMI_Start_DMA(&DCMIHandle, DCMI_MODE_SNAPSHOT, (uint32_t) buffer->data, length/4);
|
||||
// In this mode the DMA hardware is just treating the frame buffer as two large
|
||||
// DMA buffers. At the end of the frame less data may be transferred than requested.
|
||||
|
||||
// If length is greater than MAX_XFER_SIZE then HAL_DCMI_Start_DMA splits length
|
||||
// into two transfers less than MAX_XFER_SIZE.
|
||||
if (length > MAX_XFER_SIZE) {
|
||||
length /= 2;
|
||||
}
|
||||
} else {
|
||||
// Start a multibuffer transfer (line by line). The DMA hardware will ping-pong
|
||||
// transferring data between the uncached line buffers. Since data is continuously
|
||||
// being captured the ping-ponging will stop at the end of the frame and then
|
||||
// continue when the next frame starts.
|
||||
HAL_DCMI_Start_DMA_MB(&DCMIHandle,
|
||||
DCMI_MODE_CONTINUOUS, addr, length/4, h);
|
||||
HAL_DCMI_Start_DMA_MB(&DCMIHandle, DCMI_MODE_CONTINUOUS, addr, length/4, h);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1424,14 +1442,15 @@ int sensor_snapshot(sensor_t *sensor, image_t *image, uint32_t flags)
|
||||
__HAL_DCMI_ENABLE_IT(&DCMIHandle, DCMI_IT_FRAME);
|
||||
}
|
||||
|
||||
vbuffer_t *buffer = NULL;
|
||||
|
||||
// Wait for the frame data. __WFI() below will exit right on time because of DCMI_IT_FRAME.
|
||||
// While waiting SysTick will trigger allowing us to timeout.
|
||||
for (tick_start = HAL_GetTick(); waiting_for_data; ) {
|
||||
for (tick_start = HAL_GetTick(); !(buffer = framebuffer_get_head(FB_NO_FLAGS)); ) {
|
||||
__WFI();
|
||||
|
||||
// If we haven't exited this loop before the timeout then we need to abort the transfer.
|
||||
if ((HAL_GetTick() - tick_start) >= 3000) {
|
||||
waiting_for_data = false;
|
||||
dcmi_abort();
|
||||
|
||||
#if defined(DCMI_FSYNC_PIN)
|
||||
@ -1459,7 +1478,7 @@ int sensor_snapshot(sensor_t *sensor, image_t *image, uint32_t flags)
|
||||
#endif
|
||||
|
||||
// The JPEG in the frame buffer is actually invalid.
|
||||
if (jpeg_buffer_overflow) {
|
||||
if (buffer->jpeg_buffer_overflow) {
|
||||
return -5;
|
||||
}
|
||||
|
||||
@ -1500,27 +1519,25 @@ int sensor_snapshot(sensor_t *sensor, image_t *image, uint32_t flags)
|
||||
if (sensor->chip_id == OV5640_ID) {
|
||||
// Offset contains the sum of all the bytes transferred from the offset buffers
|
||||
// while in DCMI_DMAConvCpltUser().
|
||||
MAIN_FB()->bpp = offset;
|
||||
MAIN_FB()->bpp = buffer->offset;
|
||||
} else {
|
||||
// Offset contains the number of MAX_XFER_SIZE transfers completed. To get the number of bytes transferred
|
||||
// Offset contains the number of length transfers completed. To get the number of bytes transferred
|
||||
// within a transfer we have to look at the DMA counter and see how much data was moved.
|
||||
MAIN_FB()->bpp = (offset * MAX_XFER_SIZE) + ((MAX_XFER_SIZE/4) - __HAL_DMA_GET_COUNTER(&DMAHandle))*4;
|
||||
MAIN_FB()->bpp = buffer->offset * length;
|
||||
|
||||
uint32_t size = framebuffer_get_buffer_size();
|
||||
// DMA has most likely corrupted FB alloc state and or more.
|
||||
if (MAIN_FB()->bpp > size) {
|
||||
__fatal_error("JPEG Overflow!");
|
||||
if (__HAL_DMA_GET_COUNTER(&DMAHandle)) { // Add in the uncompleted transfer length.
|
||||
MAIN_FB()->bpp += ((length / 4) - __HAL_DMA_GET_COUNTER(&DMAHandle)) * 4;
|
||||
}
|
||||
|
||||
#if defined(MCU_SERIES_F7) || defined(MCU_SERIES_H7)
|
||||
// In JPEG mode, the DMA uses the frame buffer memory directly instead of the line buffer, which is
|
||||
// located in a cacheable region and therefore must be invalidated before the CPU can access it again.
|
||||
// Note: The frame buffer address is 32-byte aligned, and the size is a multiple of 32-bytes for all boards.
|
||||
SCB_InvalidateDCache_by_Addr((uint32_t*)MAIN_FB()->pixels, size);
|
||||
SCB_InvalidateDCache_by_Addr(buffer->data, MAIN_FB()->bpp);
|
||||
#endif
|
||||
}
|
||||
// Clean trailing data after 0xFFD9 at the end of the jpeg byte stream.
|
||||
MAIN_FB()->bpp = jpeg_clean_trailing_bytes(MAIN_FB()->bpp, MAIN_FB()->pixels);
|
||||
MAIN_FB()->bpp = jpeg_clean_trailing_bytes(MAIN_FB()->bpp, buffer->data);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -1535,7 +1552,8 @@ int sensor_snapshot(sensor_t *sensor, image_t *image, uint32_t flags)
|
||||
image->w = MAIN_FB()->w;
|
||||
image->h = MAIN_FB()->h;
|
||||
image->bpp = MAIN_FB()->bpp;
|
||||
image->pixels = MAIN_FB()->pixels;
|
||||
image->data = buffer->data;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -461,6 +461,10 @@ void HAL_SPI_RxCpltCallback(SPI_HandleTypeDef *hspi)
|
||||
|
||||
static int sensor_check_buffsize(sensor_t *sensor)
|
||||
{
|
||||
if (MAIN_FB()->n_buffers != 1) {
|
||||
framebuffer_set_buffers(1);
|
||||
}
|
||||
|
||||
int bpp=0;
|
||||
switch (sensor->pixformat) {
|
||||
case PIXFORMAT_BAYER:
|
||||
@ -475,7 +479,7 @@ static int sensor_check_buffsize(sensor_t *sensor)
|
||||
break;
|
||||
}
|
||||
|
||||
if ((MAIN_FB()->w * MAIN_FB()->h * bpp) > framebuffer_get_buffer_size()) {
|
||||
if ((MAIN_FB()->u * MAIN_FB()->v * bpp) > framebuffer_get_buffer_size()) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -494,6 +498,13 @@ static int snapshot(sensor_t *sensor, image_t *image, uint32_t flags)
|
||||
return -1;
|
||||
}
|
||||
|
||||
framebuffer_free_current_buffer();
|
||||
vbuffer_t *buffer = framebuffer_get_tail(FB_NO_FLAGS);
|
||||
|
||||
if (!buffer) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// The SPI DMA device is always clocking the FLIR Lepton in the background.
|
||||
// The code below resets the vospi control values to let data be pulled in.
|
||||
// If we need to re-sync we do it. Otherwise, after we finish pulling data
|
||||
@ -562,7 +573,7 @@ static int snapshot(sensor_t *sensor, image_t *image, uint32_t flags)
|
||||
image->w = MAIN_FB()->u;
|
||||
image->h = MAIN_FB()->v;
|
||||
image->bpp = MAIN_FB()->bpp; // invalid
|
||||
image->data = MAIN_FB()->pixels; // valid
|
||||
image->data = buffer->data; // valid
|
||||
|
||||
uint16_t *src = (uint16_t*) vospi_buffer;
|
||||
|
||||
|
||||
@ -179,15 +179,15 @@ int main()
|
||||
}
|
||||
#endif
|
||||
|
||||
sensor_init0();
|
||||
framebuffer_init0();
|
||||
fb_alloc_init0();
|
||||
framebuffer_init0();
|
||||
sensor_init0();
|
||||
|
||||
// Initialize the sensor
|
||||
if (sensor_init() != 0) {
|
||||
__fatal_error();
|
||||
}
|
||||
|
||||
|
||||
sensor_reset();
|
||||
|
||||
/* Init Device Library */
|
||||
|
||||
Loading…
Reference in New Issue
Block a user