modules/py_image: Replace unpack with ndarray creation.

This commit is contained in:
Kwabena W. Agyeman 2024-07-17 16:16:19 -07:00
parent 8b38f38378
commit 9848eed128
4 changed files with 109 additions and 115 deletions

View File

@ -6,6 +6,7 @@
# This work is licensed under the MIT license, see the file LICENSE for details.
import image
from ulab import numpy as np
class Normalization:
@ -34,6 +35,7 @@ class Normalization:
return n
buffer, shape, dtype = args
# Create an image using the input tensor as buffer.
if len(shape) != 4:
raise ValueError("Expected input tensor with shape: (1, H, W, C)")
@ -42,12 +44,32 @@ class Normalization:
raise ValueError("Expected batches to be 1")
if c != 1 and c != 3:
raise ValueError("Expected channels to be 1 or 3")
# Place the image buffer at the end of the input buffer so we can convert it in-place.
pixfmt = image.GRAYSCALE if c == 1 else image.RGB565
img = image.Image(w, h, pixfmt, buffer=buffer)
offset = len(buffer) - (w * h * (1 if c == 1 else 2))
img = image.Image(w, h, pixfmt, buffer=memoryview(buffer)[offset:])
# Copy and scale (if needed) the input image to the input buffer.
hints = image.BILINEAR | image.CENTER | image.SCALE_ASPECT_EXPAND | image.BLACK_BACKGROUND
img.draw_image(self._image, 0, 0, roi=self.roi, hint=hints)
# Scale and convert the image to input tensor data.
img.unpack(buffer, dtype, scale=self.scale, mean=self.mean, stdev=self.stdev)
# Convert the image in-place into an ndarray input tensor.
array = img.to_ndarray(dtype, buffer=buffer)
# Normalize the input tensor.
if dtype == ord('f'):
fscale = (self.scale[1] - self.scale[0]) / 255.0
fadd = self.scale[0]
def grayscale(x):
return (x[0] * 0.299) + (x[1] * 0.587) + (x[2] * 0.114)
if c == 1:
fadd = (fadd - grayscale(self.mean)) / grayscale(self.stdev)
fscale = fscale / grayscale(self.stdev)
else:
fadd = (fadd - np.array(self.mean)) / np.array(self.stdev)
fscale = fscale / np.array(self.stdev)
array = (array * fscale) + fadd

View File

@ -423,95 +423,6 @@ void imlib_fill_image_from_float(image_t *img, int w, int h, float *data, float
}
}
// Unpacks src into dst. dst must be an array of src->w*src->h*dtype*channels bytes, where channels is
// 1 for grayscale and 3 for RGB.
void imlib_unpack(void *dst, image_t *src, const char dtype, float *scale, float *mean, float *stdev) {
// src will be unpacked into dst in reverse order so that we can handle in-place unpacking.
int size = (src->w * src->h) - 1; // must be int per countdown loop
float fscale = 1.0f, fadd = 0.0f;
if (scale[0] == 0.0f && scale[1] == 1.0f) {
fscale = 1.0f / 255.0f;
} else if (scale[0] == -1.0f && scale[1] == 1.0f) {
fscale = 2.0f / 255.0f;
fadd = -1.0f;
} else if (scale[0] == -128.0f && scale[1] == 127.0f) {
fadd = -128.0f;
}
float fscale_r = fscale, fadd_r = fadd;
float fscale_g = fscale, fadd_g = fadd;
float fscale_b = fscale, fadd_b = fadd;
// To normalize the input image we need to subtract the mean and divide by the standard deviation.
// We can do this by applying the normalization to fscale and fadd outside the loop.
// Red
fadd_r = (fadd_r - mean[0]) / stdev[0];
fscale_r /= stdev[0];
// Green
fadd_g = (fadd_g - mean[1]) / stdev[1];
fscale_g /= stdev[1];
// Blue
fadd_b = (fadd_b - mean[2]) / stdev[2];
fscale_b /= stdev[2];
// Grayscale -> Y = 0.299R + 0.587G + 0.114B
float m = (mean[0] * 0.299f) + (mean[1] * 0.587f) + (mean[2] * 0.114f);
float s = (stdev[0] * 0.299f) + (stdev[1] * 0.587f) + (stdev[2] * 0.114f);
fadd = (fadd - m) / s;
fscale /= s;
if (src->pixfmt == PIXFORMAT_GRAYSCALE) {
uint8_t *input_u8 = (uint8_t *) src->data;
if (dtype == 'f') {
// convert u8 -> f32
float *output_f32 = (float *) dst;
for (; size >= 0; size -= 1) {
output_f32[size] = (input_u8[size] * fscale) + fadd;
}
} else {
// convert u8 -> s8
#if (__ARM_ARCH > 6)
uint32_t *input_u32 = (uint32_t *) src->data;
uint32_t *output_u32 = (uint32_t *) dst;
for (; size >= 3; size -= 4) {
output_u32[size / 4] = input_u32[size / 4] ^ 0x80808080;
}
#endif
uint8_t *input_u8 = (uint8_t *) src->data;
uint8_t *output_u8 = (uint8_t *) dst;
for (; size >= 0; size -= 1) {
output_u8[size] = input_u8[size] ^ 128;
}
}
} else if (src->pixfmt == PIXFORMAT_RGB565) {
int rgb_size = size * 3; // must be int per countdown loop
if (dtype == 'f') {
uint16_t *input_u16 = (uint16_t *) src->data;
float *output_f32 = (float *) dst;
for (; size >= 0; size -= 1, rgb_size -= 3) {
int pixel = input_u16[size];
output_f32[rgb_size + 0] = (COLOR_RGB565_TO_R8(pixel) * fscale_r) + fadd_r;
output_f32[rgb_size + 1] = (COLOR_RGB565_TO_G8(pixel) * fscale_g) + fadd_g;
output_f32[rgb_size + 2] = (COLOR_RGB565_TO_B8(pixel) * fscale_b) + fadd_b;
}
} else {
uint16_t *input_u16 = (uint16_t *) src->data;
uint8_t *output_u8 = (uint8_t *) dst;
for (; size >= 0; size -= 1, rgb_size -= 3) {
int pixel = input_u16[size];
output_u8[rgb_size + 0] = COLOR_RGB565_TO_R8(pixel) ^ 128;
output_u8[rgb_size + 1] = COLOR_RGB565_TO_G8(pixel) ^ 128;
output_u8[rgb_size + 2] = COLOR_RGB565_TO_B8(pixel) ^ 128;
}
}
} else {
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Expected input channels to be 1 or 3"));
}
}
int8_t imlib_rgb565_to_l(uint16_t pixel) {
float r_lin = xyz_table[COLOR_RGB565_TO_R8(pixel)];
float g_lin = xyz_table[COLOR_RGB565_TO_G8(pixel)];

View File

@ -1157,7 +1157,6 @@ void imlib_deinit_all();
// Generic Helper Functions
void imlib_fill_image_from_float(image_t *img, int w, int h, float *data, float min, float max,
bool mirror, bool flip, bool dst_transpose, bool src_transpose);
void imlib_unpack(void *dst, image_t *src, const char dtype, float *scale, float *mean, float *stdev);
// Bayer Image Processing
pixformat_t imlib_bayer_shift(pixformat_t pixfmt, int x, int y, bool transpose);

View File

@ -725,22 +725,19 @@ static mp_obj_t py_image_bytearray(mp_obj_t img_obj) {
}
static MP_DEFINE_CONST_FUN_OBJ_1(py_image_bytearray_obj, py_image_bytearray);
static mp_obj_t py_image_unpack(uint n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
enum { ARG_buffer, ARG_dtype, ARG_scale, ARG_mean, ARG_stdev };
#if defined(MODULE_ULAB_ENABLED) && (ULAB_MAX_DIMS == 4)
static mp_obj_t py_image_to_ndarray(uint n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
enum { ARG_dtype, ARG_buffer };
static const mp_arg_t allowed_args[] = {
{ MP_QSTR_buffer, MP_ARG_OBJ | MP_ARG_REQUIRED, {.u_rom_obj = MP_ROM_NONE} },
{ MP_QSTR_dtype, MP_ARG_OBJ | MP_ARG_REQUIRED, {.u_rom_obj = MP_ROM_NONE } },
{ MP_QSTR_scale, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE } },
{ MP_QSTR_mean, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE } },
{ MP_QSTR_stdev, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE } },
{ MP_QSTR_buffer, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} },
};
image_t *image = py_helper_arg_to_image(pos_args[0], ARG_IMAGE_ANY);
mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
mp_arg_parse_all(n_args - 1, pos_args + 1, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
mp_buffer_info_t bufinfo = {0};
mp_get_buffer_raise(args[ARG_buffer].u_obj, &bufinfo, MP_BUFFER_WRITE);
int len = image->w * image->h;
int dtype_code;
int dtype_size;
@ -753,7 +750,6 @@ static mp_obj_t py_image_unpack(uint n_args, const mp_obj_t *pos_args, mp_map_t
}
switch (dtype_code) {
case 'c':
case 'b':
case 'B': {
dtype_size = 1;
@ -769,14 +765,24 @@ static mp_obj_t py_image_unpack(uint n_args, const mp_obj_t *pos_args, mp_map_t
}
}
size_t shape[ULAB_MAX_DIMS];
size_t strides[ULAB_MAX_DIMS];
int channels;
int ndim;
switch (image->pixfmt) {
case PIXFORMAT_GRAYSCALE: {
memcpy(shape, (size_t []) {0, 0, image->h, image->w}, sizeof(shape));
memcpy(strides, (size_t []) {0, 0, image->w * dtype_size, dtype_size}, sizeof(strides));
channels = 1;
ndim = 2;
break;
}
case PIXFORMAT_RGB565: {
memcpy(shape, (size_t []) {0, image->h, image->w, 3}, sizeof(shape));
memcpy(strides, (size_t []) {0, image->w * dtype_size * 3, dtype_size * 3, dtype_size}, sizeof(strides));
channels = 3;
ndim = 3;
break;
}
default: {
@ -785,24 +791,78 @@ static mp_obj_t py_image_unpack(uint n_args, const mp_obj_t *pos_args, mp_map_t
}
}
if ((image->w * image->h * dtype_size * channels) > bufinfo.len) {
mp_raise_ValueError(MP_ERROR_TEXT("Buffer size is too small"));
ndarray_obj_t *ndarray;
if (args[ARG_buffer].u_obj != mp_const_none) {
mp_buffer_info_t bufinfo = {0};
mp_get_buffer_raise(args[ARG_buffer].u_obj, &bufinfo, MP_BUFFER_WRITE);
if ((len * dtype_size * channels) > bufinfo.len) {
mp_raise_ValueError(MP_ERROR_TEXT("Buffer is too small"));
}
ndarray = m_new_obj(ndarray_obj_t);
ndarray->base.type = &ulab_ndarray_type;
ndarray->dtype = dtype_code;
ndarray->boolean = NDARRAY_NUMERIC;
ndarray->ndim = ndim;
ndarray->len = len * channels;
ndarray->itemsize = dtype_size;
memcpy(ndarray->shape, shape, sizeof(shape));
memcpy(ndarray->strides, strides, sizeof(strides));
ndarray->array = bufinfo.buf;
ndarray->origin = bufinfo.buf;
} else {
ndarray = ndarray_new_dense_ndarray(ndim, shape, dtype_code);
}
// scale, offset
float scale[2] = {0.0f, 1.0f};
py_helper_arg_to_float_array(args[ARG_scale].u_obj, scale, 2);
int shift = (dtype_code == 'b') ? 0x80808080 : 0x00000000;
float mean[3] = {0.0f, 0.0f, 0.0f};
py_helper_arg_to_float_array(args[ARG_mean].u_obj, mean, 3);
if (image->pixfmt == PIXFORMAT_GRAYSCALE) {
uint8_t *input_u8 = (uint8_t *) image->data;
if (dtype_code == 'f') {
float *output_f32 = (float *) ndarray->array;
for (int i = 0; i < len; i++) {
output_f32[i] = input_u8[i];
}
} else {
uint8_t *output_u8 = (uint8_t *) ndarray->array;
float stdev[3] = {1.0f, 1.0f, 1.0f};
py_helper_arg_to_float_array(args[ARG_stdev].u_obj, stdev, 3);
int i = 0;
imlib_unpack(bufinfo.buf, image, dtype_code, scale, mean, stdev);
return pos_args[0];
for (; i < len; i += 4) {
*((uint32_t *) (output_u8 + i)) = *((uint32_t *) (input_u8 + i)) ^ shift;
}
for (; i < len; i++) {
output_u8[i] = input_u8[i] ^ shift;
}
}
} else {
uint16_t *input_u16 = (uint16_t *) image->data;
if (dtype_code == 'f') {
float *output_f32 = (float *) ndarray->array;
for (int i = 0, j = 0; i < len; i++, j += 3) {
int pixel = input_u16[i];
output_f32[j + 0] = COLOR_RGB565_TO_R8(pixel);
output_f32[j + 1] = COLOR_RGB565_TO_G8(pixel);
output_f32[j + 2] = COLOR_RGB565_TO_B8(pixel);
}
} else {
uint8_t *output_u8 = (uint8_t *) ndarray->array;
for (int i = 0, j = 0; i < len; i++, j += 3) {
int pixel = input_u16[i];
output_u8[j + 0] = COLOR_RGB565_TO_R8(pixel) ^ shift;
output_u8[j + 1] = COLOR_RGB565_TO_G8(pixel) ^ shift;
output_u8[j + 2] = COLOR_RGB565_TO_B8(pixel) ^ shift;
}
}
}
return MP_OBJ_FROM_PTR(ndarray);
}
static MP_DEFINE_CONST_FUN_OBJ_KW(py_image_unpack_obj, 1, py_image_unpack);
static MP_DEFINE_CONST_FUN_OBJ_KW(py_image_to_ndarray_obj, 1, py_image_to_ndarray);
#endif
static mp_obj_t py_image_get_pixel(uint n_args, const mp_obj_t *args, mp_map_t *kw_args) {
image_t *arg_img = py_helper_arg_to_image(args[0], ARG_IMAGE_UNCOMPRESSED);
@ -6469,7 +6529,9 @@ static const mp_rom_map_elem_t locals_dict_table[] = {
{MP_ROM_QSTR(MP_QSTR_format), MP_ROM_PTR(&py_image_format_obj)},
{MP_ROM_QSTR(MP_QSTR_size), MP_ROM_PTR(&py_image_size_obj)},
{MP_ROM_QSTR(MP_QSTR_bytearray), MP_ROM_PTR(&py_image_bytearray_obj)},
{MP_ROM_QSTR(MP_QSTR_unpack), MP_ROM_PTR(&py_image_unpack_obj)},
#if defined(MODULE_ULAB_ENABLED) && (ULAB_MAX_DIMS == 4)
{MP_ROM_QSTR(MP_QSTR_to_ndarray), MP_ROM_PTR(&py_image_to_ndarray_obj)},
#endif
{MP_ROM_QSTR(MP_QSTR_get_pixel), MP_ROM_PTR(&py_image_get_pixel_obj)},
{MP_ROM_QSTR(MP_QSTR_set_pixel), MP_ROM_PTR(&py_image_set_pixel_obj)},
{MP_ROM_QSTR(MP_QSTR_to_bitmap), MP_ROM_PTR(&py_image_to_bitmap_obj)},