mirror of
https://github.com/openmv/openmv.git
synced 2025-11-04 14:49:50 +08:00
FLIR: Faster UVC streaming.
This commit is contained in:
parent
36cd44ad23
commit
d140a8ef1b
148
src/omv/lepton.c
148
src/omv/lepton.c
@ -346,7 +346,7 @@ void HAL_SPI_RxCpltCallback(SPI_HandleTypeDef *hspi)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int snapshot(sensor_t *sensor, image_t *image, streaming_cb_t cb)
|
static int snapshot(sensor_t *sensor, image_t *image, streaming_cb_t streaming_cb)
|
||||||
{
|
{
|
||||||
fb_update_jpeg_buffer();
|
fb_update_jpeg_buffer();
|
||||||
|
|
||||||
@ -354,92 +354,104 @@ static int snapshot(sensor_t *sensor, image_t *image, streaming_cb_t cb)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// The SPI DMA device is always clocking the FLIR Lepton in the background.
|
bool frame_ready = false;
|
||||||
// The code below resets the vospi control values to let data be pulled in.
|
bool streaming = (streaming_cb != NULL); // Streaming mode.
|
||||||
// If we need to re-sync we do it. Otherwise, after we finish pulling data
|
|
||||||
// in we exit and let the SPI bus keep running. Then on the next call to
|
|
||||||
// snapshot we read in more data and pull in the next frame.
|
|
||||||
HAL_NVIC_DisableIRQ(LEPTON_SPI_DMA_IRQn);
|
|
||||||
vospi_pid = VOSPI_FIRST_PACKET;
|
|
||||||
vospi_seg = VOSPI_FIRST_SEGMENT;
|
|
||||||
HAL_NVIC_EnableIRQ(LEPTON_SPI_DMA_IRQn);
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (vospi_resync == true) {
|
// The SPI DMA device is always clocking the FLIR Lepton in the background.
|
||||||
lepton_sync();
|
// The code below resets the vospi control values to let data be pulled in.
|
||||||
|
// If we need to re-sync we do it. Otherwise, after we finish pulling data
|
||||||
|
// in we exit and let the SPI bus keep running. Then on the next call to
|
||||||
|
// snapshot we read in more data and pull in the next frame.
|
||||||
|
HAL_NVIC_DisableIRQ(LEPTON_SPI_DMA_IRQn);
|
||||||
|
vospi_pid = VOSPI_FIRST_PACKET;
|
||||||
|
vospi_seg = VOSPI_FIRST_SEGMENT;
|
||||||
|
HAL_NVIC_EnableIRQ(LEPTON_SPI_DMA_IRQn);
|
||||||
|
|
||||||
|
do {
|
||||||
|
if (vospi_resync == true) {
|
||||||
|
lepton_sync();
|
||||||
|
}
|
||||||
|
if (frame_ready == true && streaming_cb != NULL) {
|
||||||
|
// Start streaming the frame while a new one is captured.
|
||||||
|
streaming = streaming_cb(image);
|
||||||
|
frame_ready = false;
|
||||||
|
} else {
|
||||||
|
__WFI();
|
||||||
|
}
|
||||||
|
} while (vospi_pid < vospi_packets); // only checking one volatile var so atomic.
|
||||||
|
|
||||||
|
MAIN_FB()->w = MAIN_FB()->u;
|
||||||
|
MAIN_FB()->h = MAIN_FB()->v;
|
||||||
|
|
||||||
|
switch (sensor->pixformat) {
|
||||||
|
case PIXFORMAT_RGB565: {
|
||||||
|
MAIN_FB()->bpp = sizeof(uint16_t);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case PIXFORMAT_GRAYSCALE: {
|
||||||
|
MAIN_FB()->bpp = sizeof(uint8_t);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default: {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
__WFI();
|
|
||||||
} while (vospi_pid < vospi_packets); // only checking one volatile var so atomic.
|
|
||||||
|
|
||||||
MAIN_FB()->w = MAIN_FB()->u;
|
image->w = MAIN_FB()->u;
|
||||||
MAIN_FB()->h = MAIN_FB()->v;
|
image->h = MAIN_FB()->v;
|
||||||
|
image->bpp = MAIN_FB()->bpp; // invalid
|
||||||
|
image->data = MAIN_FB()->pixels; // valid
|
||||||
|
|
||||||
switch (sensor->pixformat) {
|
uint16_t *src = (uint16_t*) vospi_buffer;
|
||||||
case PIXFORMAT_RGB565: {
|
|
||||||
MAIN_FB()->bpp = sizeof(uint16_t);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case PIXFORMAT_GRAYSCALE: {
|
|
||||||
MAIN_FB()->bpp = sizeof(uint8_t);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
default: {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
image->w = MAIN_FB()->u;
|
float x_scale = resolution[sensor->framesize][0] / ((float) h_res);
|
||||||
image->h = MAIN_FB()->v;
|
float y_scale = resolution[sensor->framesize][1] / ((float) v_res);
|
||||||
image->bpp = MAIN_FB()->bpp; // invalid
|
// MAX == KeepAspectRationByExpanding - MIN == KeepAspectRatio
|
||||||
image->data = MAIN_FB()->pixels; // valid
|
float scale = IM_MAX(x_scale, y_scale), scale_inv = 1.0f / scale;
|
||||||
|
int x_offset = (resolution[sensor->framesize][0] - (h_res * scale)) / 2;
|
||||||
|
int y_offset = (resolution[sensor->framesize][1] - (v_res * scale)) / 2;
|
||||||
|
// The code below upscales the source image to the requested frame size
|
||||||
|
// and then crops it to the window set by the user.
|
||||||
|
|
||||||
uint16_t *src = (uint16_t*) vospi_buffer;
|
for (int y = y_offset, yy = fast_ceilf(v_res * scale) + y_offset; y < yy; y++) {
|
||||||
|
if ((MAIN_FB()->y <= y) && (y < (MAIN_FB()->y + MAIN_FB()->v))) { // user window cropping
|
||||||
|
|
||||||
float x_scale = resolution[sensor->framesize][0] / ((float) h_res);
|
uint16_t *row_ptr = src + (fast_floorf(y * scale_inv) * h_res);
|
||||||
float y_scale = resolution[sensor->framesize][1] / ((float) v_res);
|
|
||||||
// MAX == KeepAspectRationByExpanding - MIN == KeepAspectRatio
|
|
||||||
float scale = IM_MAX(x_scale, y_scale), scale_inv = 1.0f / scale;
|
|
||||||
int x_offset = (resolution[sensor->framesize][0] - (h_res * scale)) / 2;
|
|
||||||
int y_offset = (resolution[sensor->framesize][1] - (v_res * scale)) / 2;
|
|
||||||
// The code below upscales the source image to the requested frame size
|
|
||||||
// and then crops it to the window set by the user.
|
|
||||||
|
|
||||||
for (int y = y_offset, yy = fast_ceilf(v_res * scale) + y_offset; y < yy; y++) {
|
for (int x = x_offset, xx = fast_ceilf(h_res * scale) + x_offset; x < xx; x++) {
|
||||||
if ((MAIN_FB()->y <= y) && (y < (MAIN_FB()->y + MAIN_FB()->v))) { // user window cropping
|
if ((MAIN_FB()->x <= x) && (x < (MAIN_FB()->x + MAIN_FB()->u))) { // user window cropping
|
||||||
|
|
||||||
uint16_t *row_ptr = src + (fast_floorf(y * scale_inv) * h_res);
|
// Value is the 14-bit value from the FLIR IR camera.
|
||||||
|
// However, with AGC enabled only the bottom 8-bits are non-zero.
|
||||||
|
int value = __REV16(row_ptr[fast_floorf(x * scale_inv)]) & 0x3FFF;
|
||||||
|
|
||||||
for (int x = x_offset, xx = fast_ceilf(h_res * scale) + x_offset; x < xx; x++) {
|
int t_x = x - MAIN_FB()->x;
|
||||||
if ((MAIN_FB()->x <= x) && (x < (MAIN_FB()->x + MAIN_FB()->u))) { // user window cropping
|
int t_y = y - MAIN_FB()->y;
|
||||||
|
|
||||||
// Value is the 14-bit value from the FLIR IR camera.
|
if (h_mirror) t_x = MAIN_FB()->u - t_x - 1;
|
||||||
// However, with AGC enabled only the bottom 8-bits are non-zero.
|
if (v_flip) t_y = MAIN_FB()->v - t_y - 1;
|
||||||
int value = __REV16(row_ptr[fast_floorf(x * scale_inv)]) & 0x3FFF;
|
|
||||||
|
|
||||||
int t_x = x - MAIN_FB()->x;
|
switch (sensor->pixformat) {
|
||||||
int t_y = y - MAIN_FB()->y;
|
case PIXFORMAT_RGB565: {
|
||||||
|
IMAGE_PUT_RGB565_PIXEL(image, t_x, t_y, rainbow_table[value & 0xFF]);
|
||||||
if (h_mirror) t_x = MAIN_FB()->u - t_x - 1;
|
break;
|
||||||
if (v_flip) t_y = MAIN_FB()->v - t_y - 1;
|
}
|
||||||
|
case PIXFORMAT_GRAYSCALE: {
|
||||||
switch (sensor->pixformat) {
|
IMAGE_PUT_GRAYSCALE_PIXEL(image, t_x, t_y, value & 0xFF);
|
||||||
case PIXFORMAT_RGB565: {
|
break;
|
||||||
IMAGE_PUT_RGB565_PIXEL(image, t_x, t_y, rainbow_table[value & 0xFF]);
|
}
|
||||||
break;
|
default: {
|
||||||
}
|
break;
|
||||||
case PIXFORMAT_GRAYSCALE: {
|
}
|
||||||
IMAGE_PUT_GRAYSCALE_PIXEL(image, t_x, t_y, value & 0xFF);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
default: {
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
|
frame_ready = true;
|
||||||
|
} while (streaming && streaming_cb != NULL);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user